This commit is contained in:
Phil Race 2018-08-27 10:54:58 -07:00
commit d0a60f53ee
939 changed files with 20194 additions and 13276 deletions

View File

@ -500,8 +500,11 @@ e1b3def126240d5433902f3cb0e91a4c27f6db50 jdk-11+18
ea900a7dc7d77dee30865c60eabd87fc24b1037c jdk-11+24
331888ea4a788df801b1edf8836646cd25fc758b jdk-11+25
945ba9278a272a5477ffb1b3ea1b04174fed8036 jdk-11+26
9d7d74c6f2cbe522e39fa22dc557fdd3f79b32ad jdk-11+27
69b438908512d3dfef5852c6a843a5778333a309 jdk-12+2
990db216e7199b2ba9989d8fa20b657e0ca7d969 jdk-12+3
499b873761d8e8a1cc4aa649daf04cbe98cbce77 jdk-12+4
f8696e0ab9b795030429fc3374ec03e378fd9ed7 jdk-12+5
7939b3c4e4088bf4f70ec5bbd8030393b653372f jdk-12+6
ef57958c7c511162da8d9a75f0b977f0f7ac464e jdk-12+7
492b366f8e5784cc4927c2c98f9b8a3f16c067eb jdk-12+8

View File

@ -72,6 +72,7 @@
<li><a href="#specifying-the-target-platform">Specifying the Target Platform</a></li>
<li><a href="#toolchain-considerations">Toolchain Considerations</a></li>
<li><a href="#native-libraries">Native Libraries</a></li>
<li><a href="#creating-and-using-sysroots-with-qemu-deboostrap">Creating And Using Sysroots With qemu-deboostrap</a></li>
<li><a href="#building-for-armaarch64">Building for ARM/aarch64</a></li>
<li><a href="#verifying-the-build">Verifying the Build</a></li>
</ul></li>
@ -634,6 +635,72 @@ cp: cannot stat `arm-linux-gnueabihf/libSM.so&#39;: No such file or directory
cp: cannot stat `arm-linux-gnueabihf/libXt.so&#39;: No such file or directory</code></pre></li>
<li><p>If the X11 libraries are not properly detected by <code>configure</code>, you can point them out by <code>--with-x</code>.</p></li>
</ul>
<h3 id="creating-and-using-sysroots-with-qemu-deboostrap">Creating And Using Sysroots With qemu-deboostrap</h3>
<p>Fortunately, you can create sysroots for foreign architectures with tools provided by your OS. On Debian/Ubuntu systems, one could use <code>qemu-deboostrap</code> to create the <em>target</em> system chroot, which would have the native libraries and headers specific to that <em>target</em> system. After that, we can use the cross-compiler on the <em>build</em> system, pointing into chroot to get the build dependencies right. This allows building for foreign architectures with native compilation speed.</p>
<p>For example, cross-compiling to AArch64 from x86_64 could be done like this:</p>
<ul>
<li><p>Install cross-compiler on the <em>build</em> system:</p>
<pre><code>apt install g++-aarch64-linux-gnu gcc-aarch64-linux-gnu</code></pre></li>
<li><p>Create chroot on the <em>build</em> system, configuring it for <em>target</em> system:</p>
<pre><code>sudo qemu-debootstrap --arch=arm64 --verbose \
--include=fakeroot,build-essential,libx11-dev,libxext-dev,libxrender-dev,libxtst-dev,libxt-dev,libcups2-dev,libfontconfig1-dev,libasound2-dev,libfreetype6-dev,libpng12-dev \
--resolve-deps jessie /chroots/arm64 http://httpredir.debian.org/debian/</code></pre></li>
<li><p>Configure and build with newly created chroot as sysroot/toolchain-path:</p>
<pre><code>CC=aarch64-linux-gnu-gcc CXX=aarch64-linux-gnu-g++ sh ./configure --openjdk-target=aarch64-linux-gnu --with-sysroot=/chroots/arm64/ --with-toolchain-path=/chroots/arm64/
make images
ls build/linux-aarch64-normal-server-release/</code></pre></li>
</ul>
<p>The build does not create new files in that chroot, so it can be reused for multiple builds without additional cleanup.</p>
<p>Architectures that are known to successfully cross-compile like this are:</p>
<table>
<thead>
<tr class="header">
<th style="text-align: left;">Target</th>
<th style="text-align: left;"><code>CC</code></th>
<th style="text-align: left;"><code>CXX</code></th>
<th><code>--arch=...</code></th>
<th><code>--openjdk-target=...</code></th>
</tr>
</thead>
<tbody>
<tr class="odd">
<td style="text-align: left;">x86</td>
<td style="text-align: left;">default</td>
<td style="text-align: left;">default</td>
<td>i386</td>
<td>i386-linux-gnu</td>
</tr>
<tr class="even">
<td style="text-align: left;">armhf</td>
<td style="text-align: left;">gcc-arm-linux-gnueabihf</td>
<td style="text-align: left;">g++-arm-linux-gnueabihf</td>
<td>armhf</td>
<td>arm-linux-gnueabihf</td>
</tr>
<tr class="odd">
<td style="text-align: left;">aarch64</td>
<td style="text-align: left;">gcc-aarch64-linux-gnu</td>
<td style="text-align: left;">g++-aarch64-linux-gnu</td>
<td>arm64</td>
<td>aarch64-linux-gnu</td>
</tr>
<tr class="even">
<td style="text-align: left;">ppc64el</td>
<td style="text-align: left;">gcc-powerpc64le-linux-gnu</td>
<td style="text-align: left;">g++-powerpc64le-linux-gnu</td>
<td>ppc64el</td>
<td>powerpc64le-linux-gnu</td>
</tr>
<tr class="odd">
<td style="text-align: left;">s390x</td>
<td style="text-align: left;">gcc-s390x-linux-gnu</td>
<td style="text-align: left;">g++-s390x-linux-gnu</td>
<td>s390x</td>
<td>s390x-linux-gnu</td>
</tr>
</tbody>
</table>
<p>Additional architectures might be supported by Debian/Ubuntu Ports.</p>
<h3 id="building-for-armaarch64">Building for ARM/aarch64</h3>
<p>A common cross-compilation target is the ARM CPU. When building for ARM, it is useful to set the ABI profile. A number of pre-defined ABI profiles are available using <code>--with-abi-profile</code>: arm-vfp-sflt, arm-vfp-hflt, arm-sflt, armv5-vfp-sflt, armv6-vfp-hflt. Note that soft-float ABIs are no longer properly supported by the JDK.</p>
<p>The JDK contains two different ports for the aarch64 platform, one is the original aarch64 port from the <a href="http://openjdk.java.net/projects/aarch64-port">AArch64 Port Project</a> and one is a 64-bit version of the Oracle contributed ARM port. When targeting aarch64, by the default the original aarch64 port is used. To select the Oracle ARM 64 port, use <code>--with-cpu-port=arm64</code>. Also set the corresponding value (<code>aarch64</code> or <code>arm64</code>) to --with-abi-profile, to ensure a consistent build.</p>

View File

@ -1018,6 +1018,51 @@ Note that X11 is needed even if you only want to build a headless JDK.
* If the X11 libraries are not properly detected by `configure`, you can
point them out by `--with-x`.
### Creating And Using Sysroots With qemu-deboostrap
Fortunately, you can create sysroots for foreign architectures with tools
provided by your OS. On Debian/Ubuntu systems, one could use `qemu-deboostrap` to
create the *target* system chroot, which would have the native libraries and headers
specific to that *target* system. After that, we can use the cross-compiler on the *build*
system, pointing into chroot to get the build dependencies right. This allows building
for foreign architectures with native compilation speed.
For example, cross-compiling to AArch64 from x86_64 could be done like this:
* Install cross-compiler on the *build* system:
```
apt install g++-aarch64-linux-gnu gcc-aarch64-linux-gnu
```
* Create chroot on the *build* system, configuring it for *target* system:
```
sudo qemu-debootstrap --arch=arm64 --verbose \
--include=fakeroot,build-essential,libx11-dev,libxext-dev,libxrender-dev,libxtst-dev,libxt-dev,libcups2-dev,libfontconfig1-dev,libasound2-dev,libfreetype6-dev,libpng12-dev \
--resolve-deps jessie /chroots/arm64 http://httpredir.debian.org/debian/
```
* Configure and build with newly created chroot as sysroot/toolchain-path:
```
CC=aarch64-linux-gnu-gcc CXX=aarch64-linux-gnu-g++ sh ./configure --openjdk-target=aarch64-linux-gnu --with-sysroot=/chroots/arm64/ --with-toolchain-path=/chroots/arm64/
make images
ls build/linux-aarch64-normal-server-release/
```
The build does not create new files in that chroot, so it can be reused for multiple builds
without additional cleanup.
Architectures that are known to successfully cross-compile like this are:
Target `CC` `CXX` `--arch=...` `--openjdk-target=...`
------------ ------------------------- --------------------------- ------------ ----------------------
x86 default default i386 i386-linux-gnu
armhf gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf armhf arm-linux-gnueabihf
aarch64 gcc-aarch64-linux-gnu g++-aarch64-linux-gnu arm64 aarch64-linux-gnu
ppc64el gcc-powerpc64le-linux-gnu g++-powerpc64le-linux-gnu ppc64el powerpc64le-linux-gnu
s390x gcc-s390x-linux-gnu g++-s390x-linux-gnu s390x s390x-linux-gnu
Additional architectures might be supported by Debian/Ubuntu Ports.
### Building for ARM/aarch64
A common cross-compilation target is the ARM CPU. When building for ARM, it is

View File

@ -511,6 +511,10 @@ jdk.aot_ADD_JAVAC_FLAGS += -parameters -XDstringConcat=inline \
--add-exports jdk.internal.vm.ci/jdk.vm.ci.sparc=jdk.internal.vm.compiler,jdk.aot \
#
jdk.aot_EXCLUDES += \
jdk.tools.jaotc.test
#
################################################################################
sun.charsets_COPY += .dat

View File

@ -94,7 +94,7 @@ define SetupTestFilesCompilationBody
CFLAGS := $$($1_CFLAGS) $$($1_CFLAGS_$$(name)), \
LDFLAGS := $$($1_LDFLAGS) $$($1_LDFLAGS_$$(name)), \
LIBS := $$($1_LIBS_$$(name)), \
OPTIMIZATION := LOW, \
OPTIMIZATION := $$(if $$($1_OPTIMIZATION_$$(name)),$$($1_OPTIMIZATION_$$(name)),LOW), \
COPY_DEBUG_SYMBOLS := false, \
STRIP_SYMBOLS := false, \
)) \

View File

@ -124,7 +124,7 @@ $(GENSRC_DIR)/module-info.java.extra: $(GENSRC_DIR)/_gensrc_proc_done
($(CD) $(GENSRC_DIR)/META-INF/providers && \
p=""; \
impl=""; \
for i in $$($(LS) | $(SORT)); do \
for i in $$($(GREP) '^' * | $(SORT) -t ':' -k 2 | $(SED) 's/:.*//'); do \
c=$$($(CAT) $$i | $(TR) -d '\n\r'); \
if test x$$p != x$$c; then \
if test x$$p != x; then \

View File

@ -431,14 +431,12 @@ public class GenModuleInfoSource {
}
uses.put(name, statement);
break;
/* Disable this check until jdk.internal.vm.compiler generated file is fixed.
case "provides":
if (provides.containsKey(name)) {
throw parser.newError("multiple " + keyword + " " + name);
}
provides.put(name, statement);
break;
*/
}
String lookAhead = lookAhead(parser);
if (lookAhead.equals(statement.qualifier)) {

View File

@ -230,7 +230,11 @@ public class ModuleInfoExtraTest {
new String[] {
" uses s;",
" uses s;"
}, ".*, line .*, multiple uses s.*"
}, ".*, line .*, multiple uses s.*",
new String[] {
" provides s with impl1;",
" provides s with impl2, impl3;"
}, ".*, line .*, multiple provides s.*"
);
void errorCases() {

View File

@ -139,6 +139,15 @@ NSK_AOD_INCLUDES := \
-I$(VM_TESTBASE_DIR)/nsk/share/native \
-I$(VM_TESTBASE_DIR)/nsk/share/jni
NO_FRAMEPOINTER_CFLAGS :=
ifeq ($(OPENJDK_TARGET_OS),linux)
NO_FRAMEPOINTER_CFLAGS := -fomit-frame-pointer
endif
BUILD_HOTSPOT_JTREG_LIBRARIES_CFLAGS_libNoFramePointer := $(NO_FRAMEPOINTER_CFLAGS)
# Optimization -O3 needed, HIGH == -O3
BUILD_HOTSPOT_JTREG_LIBRARIES_OPTIMIZATION_libNoFramePointer := HIGH
BUILD_HOTSPOT_JTREG_LIBRARIES_CFLAGS_libProcessUtils := $(VM_SHARE_INCLUDES)
BUILD_HOTSPOT_JTREG_LIBRARIES_CFLAGS_libThreadController := $(NSK_MONITORING_INCLUDES)

View File

@ -43,7 +43,8 @@ suite = {
"jdk.vm.ci.services" : {
"subDir" : "../jdk.internal.vm.ci/share/classes",
"sourceDirs" : ["src"],
"javaCompliance" : "9",
"javaCompliance" : "9+",
"checkstyleVersion" : "8.8",
"workingSets" : "API,JVMCI",
},
@ -53,7 +54,7 @@ suite = {
"subDir" : "../jdk.internal.vm.ci/share/classes",
"sourceDirs" : ["src"],
"checkstyle" : "jdk.vm.ci.services",
"javaCompliance" : "9",
"javaCompliance" : "9+",
"workingSets" : "API,JVMCI",
},
@ -61,7 +62,7 @@ suite = {
"subDir" : "../jdk.internal.vm.ci/share/classes",
"sourceDirs" : ["src"],
"checkstyle" : "jdk.vm.ci.services",
"javaCompliance" : "9",
"javaCompliance" : "9+",
"workingSets" : "API,JVMCI",
},
@ -70,7 +71,7 @@ suite = {
"sourceDirs" : ["src"],
"dependencies" : ["jdk.vm.ci.meta"],
"checkstyle" : "jdk.vm.ci.services",
"javaCompliance" : "9",
"javaCompliance" : "9+",
"workingSets" : "API,JVMCI",
},
@ -85,7 +86,7 @@ suite = {
"jdk.vm.ci.hotspot",
],
"checkstyle" : "jdk.vm.ci.services",
"javaCompliance" : "9",
"javaCompliance" : "9+",
"workingSets" : "API,JVMCI",
},
@ -97,7 +98,7 @@ suite = {
"jdk.vm.ci.services",
],
"checkstyle" : "jdk.vm.ci.services",
"javaCompliance" : "9",
"javaCompliance" : "9+",
"workingSets" : "API,JVMCI",
},
@ -110,7 +111,7 @@ suite = {
"jdk.vm.ci.runtime",
],
"checkstyle" : "jdk.vm.ci.services",
"javaCompliance" : "9",
"javaCompliance" : "9+",
"workingSets" : "API,JVMCI",
},
@ -121,7 +122,7 @@ suite = {
"sourceDirs" : ["src"],
"dependencies" : ["jdk.vm.ci.code"],
"checkstyle" : "jdk.vm.ci.services",
"javaCompliance" : "9",
"javaCompliance" : "9+",
"workingSets" : "JVMCI,AArch64",
},
@ -130,7 +131,7 @@ suite = {
"sourceDirs" : ["src"],
"dependencies" : ["jdk.vm.ci.code"],
"checkstyle" : "jdk.vm.ci.services",
"javaCompliance" : "9",
"javaCompliance" : "9+",
"workingSets" : "JVMCI,AMD64",
},
@ -139,7 +140,7 @@ suite = {
"sourceDirs" : ["src"],
"dependencies" : ["jdk.vm.ci.code"],
"checkstyle" : "jdk.vm.ci.services",
"javaCompliance" : "9",
"javaCompliance" : "9+",
"workingSets" : "JVMCI,SPARC",
},
@ -156,7 +157,7 @@ suite = {
"jdk.internal.org.objectweb.asm",
],
"checkstyle" : "jdk.vm.ci.services",
"javaCompliance" : "9",
"javaCompliance" : "9+",
"workingSets" : "JVMCI",
},
@ -168,7 +169,7 @@ suite = {
"jdk.vm.ci.hotspot",
],
"checkstyle" : "jdk.vm.ci.services",
"javaCompliance" : "9",
"javaCompliance" : "9+",
"workingSets" : "API,JVMCI",
},
@ -180,7 +181,7 @@ suite = {
"jdk.vm.ci.hotspot",
],
"checkstyle" : "jdk.vm.ci.services",
"javaCompliance" : "9",
"javaCompliance" : "9+",
"workingSets" : "JVMCI,HotSpot,AArch64",
},
@ -192,7 +193,7 @@ suite = {
"jdk.vm.ci.hotspot",
],
"checkstyle" : "jdk.vm.ci.services",
"javaCompliance" : "9",
"javaCompliance" : "9+",
"workingSets" : "JVMCI,HotSpot,AMD64",
},
@ -204,7 +205,7 @@ suite = {
"jdk.vm.ci.hotspot",
],
"checkstyle" : "jdk.vm.ci.services",
"javaCompliance" : "9",
"javaCompliance" : "9+",
"workingSets" : "JVMCI,HotSpot,SPARC",
},

File diff suppressed because it is too large Load Diff

View File

@ -2167,6 +2167,9 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
Register length = op->length()->as_register();
Register tmp = op->tmp()->as_register();
__ resolve(ACCESS_READ, src);
__ resolve(ACCESS_WRITE, dst);
CodeStub* stub = op->stub();
int flags = op->flags();
BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
@ -2510,6 +2513,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
scratch = op->scratch_opr()->as_register();
}
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
__ resolve(ACCESS_READ | ACCESS_WRITE, obj);
// add debug info for NullPointerException only if one is possible
int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());
if (op->info() != NULL) {

View File

@ -941,6 +941,10 @@ void LIRGenerator::do_update_CRC32(Intrinsic* x) {
index = tmp;
}
if (is_updateBytes) {
base_op = access_resolve(ACCESS_READ, base_op);
}
if (offset) {
LIR_Opr tmp = new_pointer_register();
__ add(base_op, LIR_OprFact::intConst(offset), tmp);
@ -1019,6 +1023,10 @@ void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
index = tmp;
}
if (is_updateBytes) {
base_op = access_resolve(ACCESS_READ, base_op);
}
if (offset) {
LIR_Opr tmp = new_pointer_register();
__ add(base_op, LIR_OprFact::intConst(offset), tmp);

View File

@ -3038,6 +3038,9 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
Register length = op->length()->as_register();
Register tmp = op->tmp()->as_register();
__ resolve(ACCESS_READ, src);
__ resolve(ACCESS_WRITE, dst);
CodeStub* stub = op->stub();
int flags = op->flags();
BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
@ -3476,6 +3479,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
scratch = op->scratch_opr()->as_register();
}
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
__ resolve(ACCESS_READ | ACCESS_WRITE, obj);
// add debug info for NullPointerException only if one is possible
int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());
if (op->info() != NULL) {

View File

@ -997,6 +997,10 @@ void LIRGenerator::do_update_CRC32(Intrinsic* x) {
}
#endif
if (is_updateBytes) {
base_op = access_resolve(IS_NOT_NULL | ACCESS_READ, base_op);
}
LIR_Address* a = new LIR_Address(base_op,
index,
offset,
@ -1054,7 +1058,7 @@ void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
constant_aOffset = result_aOffset->as_jlong();
result_aOffset = LIR_OprFact::illegalOpr;
}
LIR_Opr result_a = a.result();
LIR_Opr result_a = access_resolve(ACCESS_READ, a.result());
long constant_bOffset = 0;
LIR_Opr result_bOffset = bOffset.result();
@ -1062,7 +1066,7 @@ void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
constant_bOffset = result_bOffset->as_jlong();
result_bOffset = LIR_OprFact::illegalOpr;
}
LIR_Opr result_b = b.result();
LIR_Opr result_b = access_resolve(ACCESS_READ, b.result());
#ifndef _LP64
result_a = new_register(T_INT);

View File

@ -23,10 +23,12 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/codeBlob.hpp"
#include "gc/z/zBarrier.inline.hpp"
#include "gc/z/zBarrierSet.hpp"
#include "gc/z/zBarrierSetAssembler.hpp"
#include "gc/z/zBarrierSetRuntime.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "utilities/macros.hpp"
#ifdef COMPILER1

View File

@ -3123,6 +3123,16 @@ void MacroAssembler::store_double(Address dst) {
}
}
void MacroAssembler::push_zmm(XMMRegister reg) {
lea(rsp, Address(rsp, -64)); // Use lea to not affect flags
evmovdqul(Address(rsp, 0), reg, Assembler::AVX_512bit);
}
void MacroAssembler::pop_zmm(XMMRegister reg) {
evmovdqul(reg, Address(rsp, 0), Assembler::AVX_512bit);
lea(rsp, Address(rsp, 64)); // Use lea to not affect flags
}
void MacroAssembler::fremr(Register tmp) {
save_rax(tmp);
{ Label L;
@ -3848,33 +3858,25 @@ void MacroAssembler::pcmpeqb(XMMRegister dst, XMMRegister src) {
} else if ((dst_enc < 16) && (src_enc < 16)) {
Assembler::pcmpeqb(dst, src);
} else if (src_enc < 16) {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
push_zmm(xmm0);
evmovdqul(xmm0, dst, Assembler::AVX_512bit);
Assembler::pcmpeqb(xmm0, src);
movdqu(dst, xmm0);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm0);
} else if (dst_enc < 16) {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
push_zmm(xmm0);
evmovdqul(xmm0, src, Assembler::AVX_512bit);
Assembler::pcmpeqb(dst, xmm0);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm0);
} else {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
push_zmm(xmm0);
push_zmm(xmm1);
movdqu(xmm0, src);
movdqu(xmm1, dst);
Assembler::pcmpeqb(xmm1, xmm0);
movdqu(dst, xmm1);
evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm1);
pop_zmm(xmm0);
}
}
@ -3886,33 +3888,25 @@ void MacroAssembler::pcmpeqw(XMMRegister dst, XMMRegister src) {
} else if ((dst_enc < 16) && (src_enc < 16)) {
Assembler::pcmpeqw(dst, src);
} else if (src_enc < 16) {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
push_zmm(xmm0);
evmovdqul(xmm0, dst, Assembler::AVX_512bit);
Assembler::pcmpeqw(xmm0, src);
movdqu(dst, xmm0);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm0);
} else if (dst_enc < 16) {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
push_zmm(xmm0);
evmovdqul(xmm0, src, Assembler::AVX_512bit);
Assembler::pcmpeqw(dst, xmm0);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm0);
} else {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
push_zmm(xmm0);
push_zmm(xmm1);
movdqu(xmm0, src);
movdqu(xmm1, dst);
Assembler::pcmpeqw(xmm1, xmm0);
movdqu(dst, xmm1);
evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm1);
pop_zmm(xmm0);
}
}
@ -3921,13 +3915,11 @@ void MacroAssembler::pcmpestri(XMMRegister dst, Address src, int imm8) {
if (dst_enc < 16) {
Assembler::pcmpestri(dst, src, imm8);
} else {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
push_zmm(xmm0);
evmovdqul(xmm0, dst, Assembler::AVX_512bit);
Assembler::pcmpestri(xmm0, src, imm8);
movdqu(dst, xmm0);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm0);
}
}
@ -3937,33 +3929,25 @@ void MacroAssembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) {
if ((dst_enc < 16) && (src_enc < 16)) {
Assembler::pcmpestri(dst, src, imm8);
} else if (src_enc < 16) {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
push_zmm(xmm0);
evmovdqul(xmm0, dst, Assembler::AVX_512bit);
Assembler::pcmpestri(xmm0, src, imm8);
movdqu(dst, xmm0);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm0);
} else if (dst_enc < 16) {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
push_zmm(xmm0);
evmovdqul(xmm0, src, Assembler::AVX_512bit);
Assembler::pcmpestri(dst, xmm0, imm8);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm0);
} else {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
push_zmm(xmm0);
push_zmm(xmm1);
movdqu(xmm0, src);
movdqu(xmm1, dst);
Assembler::pcmpestri(xmm1, xmm0, imm8);
movdqu(dst, xmm1);
evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm1);
pop_zmm(xmm0);
}
}
@ -3975,33 +3959,25 @@ void MacroAssembler::pmovzxbw(XMMRegister dst, XMMRegister src) {
} else if ((dst_enc < 16) && (src_enc < 16)) {
Assembler::pmovzxbw(dst, src);
} else if (src_enc < 16) {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
push_zmm(xmm0);
evmovdqul(xmm0, dst, Assembler::AVX_512bit);
Assembler::pmovzxbw(xmm0, src);
movdqu(dst, xmm0);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm0);
} else if (dst_enc < 16) {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
push_zmm(xmm0);
evmovdqul(xmm0, src, Assembler::AVX_512bit);
Assembler::pmovzxbw(dst, xmm0);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm0);
} else {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
push_zmm(xmm0);
push_zmm(xmm1);
movdqu(xmm0, src);
movdqu(xmm1, dst);
Assembler::pmovzxbw(xmm1, xmm0);
movdqu(dst, xmm1);
evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm1);
pop_zmm(xmm0);
}
}
@ -4012,13 +3988,11 @@ void MacroAssembler::pmovzxbw(XMMRegister dst, Address src) {
} else if (dst_enc < 16) {
Assembler::pmovzxbw(dst, src);
} else {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
push_zmm(xmm0);
evmovdqul(xmm0, dst, Assembler::AVX_512bit);
Assembler::pmovzxbw(xmm0, src);
movdqu(dst, xmm0);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm0);
}
}
@ -4027,12 +4001,10 @@ void MacroAssembler::pmovmskb(Register dst, XMMRegister src) {
if (src_enc < 16) {
Assembler::pmovmskb(dst, src);
} else {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
push_zmm(xmm0);
evmovdqul(xmm0, src, Assembler::AVX_512bit);
Assembler::pmovmskb(dst, xmm0);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm0);
}
}
@ -4042,31 +4014,23 @@ void MacroAssembler::ptest(XMMRegister dst, XMMRegister src) {
if ((dst_enc < 16) && (src_enc < 16)) {
Assembler::ptest(dst, src);
} else if (src_enc < 16) {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
push_zmm(xmm0);
evmovdqul(xmm0, dst, Assembler::AVX_512bit);
Assembler::ptest(xmm0, src);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm0);
} else if (dst_enc < 16) {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
push_zmm(xmm0);
evmovdqul(xmm0, src, Assembler::AVX_512bit);
Assembler::ptest(dst, xmm0);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm0);
} else {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
push_zmm(xmm0);
push_zmm(xmm1);
movdqu(xmm0, src);
movdqu(xmm1, dst);
Assembler::ptest(xmm1, xmm0);
evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm1);
pop_zmm(xmm0);
}
}
@ -4221,13 +4185,11 @@ void MacroAssembler::vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, A
evmovdqul(dst, xmm0, Assembler::AVX_512bit);
evmovdqul(xmm0, src, Assembler::AVX_512bit);
} else {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
push_zmm(xmm0);
evmovdqul(xmm0, nds, Assembler::AVX_512bit);
vandps(xmm0, xmm0, negate_field, vector_len);
evmovdqul(dst, xmm0, Assembler::AVX_512bit);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm0);
}
}
}
@ -4258,13 +4220,11 @@ void MacroAssembler::vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, A
evmovdqul(dst, xmm0, Assembler::AVX_512bit);
evmovdqul(xmm0, src, Assembler::AVX_512bit);
} else {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
push_zmm(xmm0);
evmovdqul(xmm0, nds, Assembler::AVX_512bit);
vandpd(xmm0, xmm0, negate_field, vector_len);
evmovdqul(dst, xmm0, Assembler::AVX_512bit);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm0);
}
}
}
@ -4294,16 +4254,14 @@ void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, i
evmovdqul(xmm0, nds, Assembler::AVX_512bit);
} else {
// worse case scenario, all regs are in the upper bank
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
push_zmm(xmm1);
evmovdqul(nds, xmm0, Assembler::AVX_512bit);
evmovdqul(xmm1, src, Assembler::AVX_512bit);
evmovdqul(xmm0, dst, Assembler::AVX_512bit);
Assembler::vpaddb(xmm0, xmm0, xmm1, vector_len);
evmovdqul(dst, xmm0, Assembler::AVX_512bit);
evmovdqul(xmm0, nds, Assembler::AVX_512bit);
evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm1);
}
}
@ -4353,16 +4311,14 @@ void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, i
evmovdqul(xmm0, nds, Assembler::AVX_512bit);
} else {
// worse case scenario, all regs are in the upper bank
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
push_zmm(xmm1);
evmovdqul(nds, xmm0, Assembler::AVX_512bit);
evmovdqul(xmm1, src, Assembler::AVX_512bit);
evmovdqul(xmm0, dst, Assembler::AVX_512bit);
Assembler::vpaddw(xmm0, xmm0, xmm1, vector_len);
evmovdqul(dst, xmm0, Assembler::AVX_512bit);
evmovdqul(xmm0, nds, Assembler::AVX_512bit);
evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm1);
}
}
@ -4404,33 +4360,25 @@ void MacroAssembler::vpbroadcastw(XMMRegister dst, XMMRegister src) {
} else if ((dst_enc < 16) && (src_enc < 16)) {
Assembler::vpbroadcastw(dst, src);
} else if (src_enc < 16) {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
push_zmm(xmm0);
evmovdqul(xmm0, dst, Assembler::AVX_512bit);
Assembler::vpbroadcastw(xmm0, src);
movdqu(dst, xmm0);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm0);
} else if (dst_enc < 16) {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
push_zmm(xmm0);
evmovdqul(xmm0, src, Assembler::AVX_512bit);
Assembler::vpbroadcastw(dst, xmm0);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm0);
} else {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
push_zmm(xmm0);
push_zmm(xmm1);
movdqu(xmm0, src);
movdqu(xmm1, dst);
Assembler::vpbroadcastw(xmm1, xmm0);
movdqu(dst, xmm1);
evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm1);
pop_zmm(xmm0);
}
}
@ -4442,33 +4390,25 @@ void MacroAssembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src,
if ((dst_enc < 16) && (src_enc < 16)) {
Assembler::vpcmpeqb(dst, nds, src, vector_len);
} else if (src_enc < 16) {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
push_zmm(xmm0);
evmovdqul(xmm0, dst, Assembler::AVX_512bit);
Assembler::vpcmpeqb(xmm0, xmm0, src, vector_len);
movdqu(dst, xmm0);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm0);
} else if (dst_enc < 16) {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
push_zmm(xmm0);
evmovdqul(xmm0, src, Assembler::AVX_512bit);
Assembler::vpcmpeqb(dst, dst, xmm0, vector_len);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm0);
} else {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
push_zmm(xmm0);
push_zmm(xmm1);
movdqu(xmm0, src);
movdqu(xmm1, dst);
Assembler::vpcmpeqb(xmm1, xmm1, xmm0, vector_len);
movdqu(dst, xmm1);
evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm1);
pop_zmm(xmm0);
}
}
@ -4480,33 +4420,25 @@ void MacroAssembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src,
if ((dst_enc < 16) && (src_enc < 16)) {
Assembler::vpcmpeqw(dst, nds, src, vector_len);
} else if (src_enc < 16) {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
push_zmm(xmm0);
evmovdqul(xmm0, dst, Assembler::AVX_512bit);
Assembler::vpcmpeqw(xmm0, xmm0, src, vector_len);
movdqu(dst, xmm0);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm0);
} else if (dst_enc < 16) {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
push_zmm(xmm0);
evmovdqul(xmm0, src, Assembler::AVX_512bit);
Assembler::vpcmpeqw(dst, dst, xmm0, vector_len);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm0);
} else {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
push_zmm(xmm0);
push_zmm(xmm1);
movdqu(xmm0, src);
movdqu(xmm1, dst);
Assembler::vpcmpeqw(xmm1, xmm1, xmm0, vector_len);
movdqu(dst, xmm1);
evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm1);
pop_zmm(xmm0);
}
}
@ -4517,13 +4449,11 @@ void MacroAssembler::vpmovzxbw(XMMRegister dst, Address src, int vector_len) {
} else if (dst_enc < 16) {
Assembler::vpmovzxbw(dst, src, vector_len);
} else {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
push_zmm(xmm0);
evmovdqul(xmm0, dst, Assembler::AVX_512bit);
Assembler::vpmovzxbw(xmm0, src, vector_len);
movdqu(dst, xmm0);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm0);
}
}
@ -4532,12 +4462,10 @@ void MacroAssembler::vpmovmskb(Register dst, XMMRegister src) {
if (src_enc < 16) {
Assembler::vpmovmskb(dst, src);
} else {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
push_zmm(xmm0);
evmovdqul(xmm0, src, Assembler::AVX_512bit);
Assembler::vpmovmskb(dst, xmm0);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm0);
}
}
@ -4566,16 +4494,14 @@ void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src,
evmovdqul(xmm0, nds, Assembler::AVX_512bit);
} else {
// worse case scenario, all regs are in the upper bank
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
push_zmm(xmm1);
evmovdqul(nds, xmm0, Assembler::AVX_512bit);
evmovdqul(xmm1, src, Assembler::AVX_512bit);
evmovdqul(xmm0, dst, Assembler::AVX_512bit);
Assembler::vpmullw(xmm0, xmm0, xmm1, vector_len);
evmovdqul(dst, xmm0, Assembler::AVX_512bit);
evmovdqul(xmm0, nds, Assembler::AVX_512bit);
evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm1);
}
}
@ -4625,16 +4551,14 @@ void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, i
evmovdqul(xmm0, nds, Assembler::AVX_512bit);
} else {
// worse case scenario, all regs are in the upper bank
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
push_zmm(xmm1);
evmovdqul(nds, xmm0, Assembler::AVX_512bit);
evmovdqul(xmm1, src, Assembler::AVX_512bit);
evmovdqul(xmm0, dst, Assembler::AVX_512bit);
Assembler::vpsubb(xmm0, xmm0, xmm1, vector_len);
evmovdqul(dst, xmm0, Assembler::AVX_512bit);
evmovdqul(xmm0, nds, Assembler::AVX_512bit);
evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm1);
}
}
@ -4684,16 +4608,14 @@ void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, i
evmovdqul(xmm0, nds, Assembler::AVX_512bit);
} else {
// worse case scenario, all regs are in the upper bank
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
push_zmm(xmm1);
evmovdqul(nds, xmm0, Assembler::AVX_512bit);
evmovdqul(xmm1, src, Assembler::AVX_512bit);
evmovdqul(xmm0, dst, Assembler::AVX_512bit);
Assembler::vpsubw(xmm0, xmm0, xmm1, vector_len);
evmovdqul(dst, xmm0, Assembler::AVX_512bit);
evmovdqul(xmm0, nds, Assembler::AVX_512bit);
evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm1);
}
}
@ -4751,8 +4673,7 @@ void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift,
evmovdqul(dst, nds, Assembler::AVX_512bit);
} else {
// worse case scenario, all regs are in the upper bank
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
push_zmm(xmm1);
evmovdqul(nds, xmm0, Assembler::AVX_512bit);
evmovdqul(xmm1, shift, Assembler::AVX_512bit);
evmovdqul(xmm0, dst, Assembler::AVX_512bit);
@ -4760,8 +4681,7 @@ void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift,
evmovdqul(xmm1, dst, Assembler::AVX_512bit);
evmovdqul(dst, xmm0, Assembler::AVX_512bit);
evmovdqul(xmm0, nds, Assembler::AVX_512bit);
evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm1);
}
}
@ -4819,8 +4739,7 @@ void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift,
evmovdqul(dst, nds, Assembler::AVX_512bit);
} else {
// worse case scenario, all regs are in the upper bank
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
push_zmm(xmm1);
evmovdqul(nds, xmm0, Assembler::AVX_512bit);
evmovdqul(xmm1, shift, Assembler::AVX_512bit);
evmovdqul(xmm0, dst, Assembler::AVX_512bit);
@ -4828,8 +4747,7 @@ void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift,
evmovdqul(xmm1, dst, Assembler::AVX_512bit);
evmovdqul(dst, xmm0, Assembler::AVX_512bit);
evmovdqul(xmm0, nds, Assembler::AVX_512bit);
evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm1);
}
}
@ -4887,8 +4805,7 @@ void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift,
evmovdqul(dst, nds, Assembler::AVX_512bit);
} else {
// worse case scenario, all regs are in the upper bank
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
push_zmm(xmm1);
evmovdqul(nds, xmm0, Assembler::AVX_512bit);
evmovdqul(xmm1, shift, Assembler::AVX_512bit);
evmovdqul(xmm0, dst, Assembler::AVX_512bit);
@ -4896,8 +4813,7 @@ void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift,
evmovdqul(xmm1, dst, Assembler::AVX_512bit);
evmovdqul(dst, xmm0, Assembler::AVX_512bit);
evmovdqul(xmm0, nds, Assembler::AVX_512bit);
evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm1);
}
}
@ -4928,31 +4844,23 @@ void MacroAssembler::vptest(XMMRegister dst, XMMRegister src) {
if ((dst_enc < 16) && (src_enc < 16)) {
Assembler::vptest(dst, src);
} else if (src_enc < 16) {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
push_zmm(xmm0);
evmovdqul(xmm0, dst, Assembler::AVX_512bit);
Assembler::vptest(xmm0, src);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm0);
} else if (dst_enc < 16) {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
push_zmm(xmm0);
evmovdqul(xmm0, src, Assembler::AVX_512bit);
Assembler::vptest(dst, xmm0);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm0);
} else {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
push_zmm(xmm0);
push_zmm(xmm1);
movdqu(xmm0, src);
movdqu(xmm1, dst);
Assembler::vptest(xmm1, xmm0);
evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm1);
pop_zmm(xmm0);
}
}
@ -4966,45 +4874,35 @@ void MacroAssembler::punpcklbw(XMMRegister dst, XMMRegister src) {
if (dst_enc < 16) {
Assembler::punpcklbw(dst, src);
} else {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
push_zmm(xmm0);
evmovdqul(xmm0, dst, Assembler::AVX_512bit);
Assembler::punpcklbw(xmm0, xmm0);
evmovdqul(dst, xmm0, Assembler::AVX_512bit);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm0);
}
} else {
if ((src_enc < 16) && (dst_enc < 16)) {
Assembler::punpcklbw(dst, src);
} else if (src_enc < 16) {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
push_zmm(xmm0);
evmovdqul(xmm0, dst, Assembler::AVX_512bit);
Assembler::punpcklbw(xmm0, src);
evmovdqul(dst, xmm0, Assembler::AVX_512bit);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm0);
} else if (dst_enc < 16) {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
push_zmm(xmm0);
evmovdqul(xmm0, src, Assembler::AVX_512bit);
Assembler::punpcklbw(dst, xmm0);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm0);
} else {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
push_zmm(xmm0);
push_zmm(xmm1);
evmovdqul(xmm0, dst, Assembler::AVX_512bit);
evmovdqul(xmm1, src, Assembler::AVX_512bit);
Assembler::punpcklbw(xmm0, xmm1);
evmovdqul(dst, xmm0, Assembler::AVX_512bit);
evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm1);
pop_zmm(xmm0);
}
}
} else {
@ -5020,12 +4918,10 @@ void MacroAssembler::pshufd(XMMRegister dst, Address src, int mode) {
if (dst_enc < 16) {
Assembler::pshufd(dst, src, mode);
} else {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
push_zmm(xmm0);
Assembler::pshufd(xmm0, src, mode);
evmovdqul(dst, xmm0, Assembler::AVX_512bit);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm0);
}
}
}
@ -5040,45 +4936,35 @@ void MacroAssembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) {
if (dst_enc < 16) {
Assembler::pshuflw(dst, src, mode);
} else {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
push_zmm(xmm0);
evmovdqul(xmm0, dst, Assembler::AVX_512bit);
Assembler::pshuflw(xmm0, xmm0, mode);
evmovdqul(dst, xmm0, Assembler::AVX_512bit);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm0);
}
} else {
if ((src_enc < 16) && (dst_enc < 16)) {
Assembler::pshuflw(dst, src, mode);
} else if (src_enc < 16) {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
push_zmm(xmm0);
evmovdqul(xmm0, dst, Assembler::AVX_512bit);
Assembler::pshuflw(xmm0, src, mode);
evmovdqul(dst, xmm0, Assembler::AVX_512bit);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm0);
} else if (dst_enc < 16) {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
push_zmm(xmm0);
evmovdqul(xmm0, src, Assembler::AVX_512bit);
Assembler::pshuflw(dst, xmm0, mode);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm0);
} else {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
push_zmm(xmm0);
push_zmm(xmm1);
evmovdqul(xmm0, dst, Assembler::AVX_512bit);
evmovdqul(xmm1, src, Assembler::AVX_512bit);
Assembler::pshuflw(xmm0, xmm1, mode);
evmovdqul(dst, xmm0, Assembler::AVX_512bit);
evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm1);
pop_zmm(xmm0);
}
}
} else {
@ -5166,13 +5052,11 @@ void MacroAssembler::vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral
if (VM_Version::supports_avx512novl() &&
(nds_upper_bank || dst_upper_bank)) {
if (dst_upper_bank) {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
push_zmm(xmm0);
movflt(xmm0, nds);
vxorps(xmm0, xmm0, src, Assembler::AVX_128bit);
movflt(dst, xmm0);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm0);
} else {
movflt(dst, nds);
vxorps(dst, dst, src, Assembler::AVX_128bit);
@ -5190,13 +5074,11 @@ void MacroAssembler::vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral
if (VM_Version::supports_avx512novl() &&
(nds_upper_bank || dst_upper_bank)) {
if (dst_upper_bank) {
subptr(rsp, 64);
evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
push_zmm(xmm0);
movdbl(xmm0, nds);
vxorpd(xmm0, xmm0, src, Assembler::AVX_128bit);
movdbl(dst, xmm0);
evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
addptr(rsp, 64);
pop_zmm(xmm0);
} else {
movdbl(dst, nds);
vxorpd(dst, dst, src, Assembler::AVX_128bit);
@ -10567,7 +10449,7 @@ void MacroAssembler::char_array_compress(Register src, Register dst, Register le
XMMRegister tmp1Reg, XMMRegister tmp2Reg,
XMMRegister tmp3Reg, XMMRegister tmp4Reg,
Register tmp5, Register result) {
Label copy_chars_loop, return_length, return_zero, done, below_threshold;
Label copy_chars_loop, return_length, return_zero, done;
// rsi: src
// rdi: dst
@ -10590,13 +10472,12 @@ void MacroAssembler::char_array_compress(Register src, Register dst, Register le
set_vector_masking(); // opening of the stub context for programming mask registers
Label copy_32_loop, copy_loop_tail, restore_k1_return_zero;
Label copy_32_loop, copy_loop_tail, restore_k1_return_zero, below_threshold;
// alignement
Label post_alignement;
// alignment
Label post_alignment;
// if length of the string is less than 16, handle it in an old fashioned
// way
// if length of the string is less than 16, handle it in an old fashioned way
testl(len, -32);
jcc(Assembler::zero, below_threshold);
@ -10609,7 +10490,7 @@ void MacroAssembler::char_array_compress(Register src, Register dst, Register le
kmovql(k3, k1);
testl(len, -64);
jcc(Assembler::zero, post_alignement);
jcc(Assembler::zero, post_alignment);
movl(tmp5, dst);
andl(tmp5, (32 - 1));
@ -10618,7 +10499,7 @@ void MacroAssembler::char_array_compress(Register src, Register dst, Register le
// bail out when there is nothing to be done
testl(tmp5, 0xFFFFFFFF);
jcc(Assembler::zero, post_alignement);
jcc(Assembler::zero, post_alignment);
// ~(~0 << len), where len is the # of remaining elements to process
movl(result, 0xFFFFFFFF);
@ -10638,8 +10519,8 @@ void MacroAssembler::char_array_compress(Register src, Register dst, Register le
addptr(dst, tmp5);
subl(len, tmp5);
bind(post_alignement);
// end of alignement
bind(post_alignment);
// end of alignment
movl(tmp5, len);
andl(tmp5, (32 - 1)); // tail count (in chars)
@ -10694,11 +10575,12 @@ void MacroAssembler::char_array_compress(Register src, Register dst, Register le
jmp(return_zero);
clear_vector_masking(); // closing of the stub context for programming mask registers
}
if (UseSSE42Intrinsics) {
Label copy_32_loop, copy_16, copy_tail;
bind(below_threshold);
}
if (UseSSE42Intrinsics) {
Label copy_32_loop, copy_16, copy_tail;
movl(result, len);
@ -10812,8 +10694,7 @@ void MacroAssembler::byte_array_inflate(Register src, Register dst, Register len
Label copy_32_loop, copy_tail;
Register tmp3_aliased = len;
// if length of the string is less than 16, handle it in an old fashioned
// way
// if length of the string is less than 16, handle it in an old fashioned way
testl(len, -16);
jcc(Assembler::zero, below_threshold);
@ -10927,7 +10808,10 @@ void MacroAssembler::byte_array_inflate(Register src, Register dst, Register len
addptr(dst, 8);
bind(copy_bytes);
} else {
bind(below_threshold);
}
testl(len, len);
jccb(Assembler::zero, done);
lea(src, Address(src, len, Address::times_1));

View File

@ -482,6 +482,10 @@ class MacroAssembler: public Assembler {
// from register xmm0. Otherwise, the value is stored from the FPU stack.
void store_double(Address dst);
// Save/restore ZMM (512bit) register on stack.
void push_zmm(XMMRegister reg);
void pop_zmm(XMMRegister reg);
// pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
void push_fTOS();

View File

@ -317,18 +317,6 @@ reg_class ptr_rsp_reg(RSP, RSP_H);
// Singleton class for TLS pointer
reg_class ptr_r15_reg(R15, R15_H);
// The registers which can be used for
// a thread local safepoint poll
// * R12 is reserved for heap base
// * R13 cannot be encoded for addressing without an offset byte
// * R15 is reserved for the JavaThread
reg_class ptr_rex_reg(R8, R8_H,
R9, R9_H,
R10, R10_H,
R11, R11_H,
R14, R14_H);
// Class for all long registers (excluding RSP)
reg_class long_reg_with_rbp(RAX, RAX_H,
RDX, RDX_H,
@ -3557,16 +3545,6 @@ operand r15_RegP()
interface(REG_INTER);
%}
operand rex_RegP()
%{
constraint(ALLOC_IN_RC(ptr_rex_reg));
match(RegP);
match(rRegP);
format %{ %}
interface(REG_INTER);
%}
operand rRegL()
%{
constraint(ALLOC_IN_RC(long_reg));
@ -12360,7 +12338,7 @@ instruct safePoint_poll_far(rFlagsReg cr, rRegP poll)
ins_pipe(ialu_reg_mem);
%}
instruct safePoint_poll_tls(rFlagsReg cr, rex_RegP poll)
instruct safePoint_poll_tls(rFlagsReg cr, rRegP poll)
%{
predicate(SafepointMechanism::uses_thread_local_poll());
match(SafePoint poll);
@ -12369,13 +12347,12 @@ instruct safePoint_poll_tls(rFlagsReg cr, rex_RegP poll)
format %{ "testl rax, [$poll]\t"
"# Safepoint: poll for GC" %}
ins_cost(125);
size(3); /* setting an explicit size will cause debug builds to assert if size is incorrect */
size(4); /* setting an explicit size will cause debug builds to assert if size is incorrect */
ins_encode %{
__ relocate(relocInfo::poll_type);
address pre_pc = __ pc();
__ testl(rax, Address($poll$$Register, 0));
address post_pc = __ pc();
guarantee(pre_pc[0] == 0x41 && pre_pc[1] == 0x85, "must emit #rex test-ax [reg]");
assert(nativeInstruction_at(pre_pc)->is_safepoint_poll(), "must emit test %%eax [reg]");
%}
ins_pipe(ialu_reg_mem);
%}

View File

@ -5793,11 +5793,21 @@ int os::get_core_path(char* buffer, size_t bufferSize) {
core_pattern[ret] = '\0';
}
// Replace the %p in the core pattern with the process id. NOTE: we do this
// only if the pattern doesn't start with "|", and we support only one %p in
// the pattern.
char *pid_pos = strstr(core_pattern, "%p");
const char* tail = (pid_pos != NULL) ? (pid_pos + 2) : ""; // skip over the "%p"
int written;
if (core_pattern[0] == '/') {
written = jio_snprintf(buffer, bufferSize, "%s", core_pattern);
if (pid_pos != NULL) {
*pid_pos = '\0';
written = jio_snprintf(buffer, bufferSize, "%s%d%s", core_pattern,
current_process_id(), tail);
} else {
written = jio_snprintf(buffer, bufferSize, "%s", core_pattern);
}
} else {
char cwd[PATH_MAX];
@ -5810,6 +5820,10 @@ int os::get_core_path(char* buffer, size_t bufferSize) {
written = jio_snprintf(buffer, bufferSize,
"\"%s\" (or dumping to %s/core.%d)",
&core_pattern[1], p, current_process_id());
} else if (pid_pos != NULL) {
*pid_pos = '\0';
written = jio_snprintf(buffer, bufferSize, "%s/%s%d%s", p, core_pattern,
current_process_id(), tail);
} else {
written = jio_snprintf(buffer, bufferSize, "%s/%s", p, core_pattern);
}

View File

@ -1006,7 +1006,7 @@ bool AOTCodeHeap::reconcile_dynamic_klass(AOTCompiledMethod *caller, InstanceKla
InstanceKlass* dyno = InstanceKlass::cast(dyno_klass);
if (!dyno->is_anonymous()) {
if (!dyno->is_unsafe_anonymous()) {
if (_klasses_got[dyno_data->_got_index] != dyno) {
// compile-time class different from runtime class, fail and deoptimize
sweep_dependent_methods(holder_data);

View File

@ -362,7 +362,7 @@ void AOTCompiledMethod::log_identity(xmlStream* log) const {
log->print(" aot='%2d'", _heap->dso_id());
}
void AOTCompiledMethod::log_state_change() const {
void AOTCompiledMethod::log_state_change(oop cause) const {
if (LogCompilation) {
ResourceMark m;
if (xtty != NULL) {

View File

@ -193,7 +193,7 @@ private:
virtual int comp_level() const { return CompLevel_aot; }
virtual address verified_entry_point() const { return _code + _meta->verified_entry_offset(); }
virtual void log_identity(xmlStream* stream) const;
virtual void log_state_change() const;
virtual void log_state_change(oop cause = NULL) const;
virtual bool make_entrant() NOT_TIERED({ ShouldNotReachHere(); return false; });
virtual bool make_not_entrant() { return make_not_entrant_helper(not_entrant); }
virtual bool make_not_used() { return make_not_entrant_helper(not_used); }

View File

@ -42,7 +42,7 @@ GrowableArray<AOTLib*>* AOTLoader::_libraries = new(ResourceObj::C_HEAP, mtCode)
#define FOR_ALL_AOT_LIBRARIES(lib) for (GrowableArrayIterator<AOTLib*> lib = libraries()->begin(); lib != libraries()->end(); ++lib)
void AOTLoader::load_for_klass(InstanceKlass* ik, Thread* thread) {
if (ik->is_anonymous()) {
if (ik->is_unsafe_anonymous()) {
// don't even bother
return;
}
@ -54,7 +54,7 @@ void AOTLoader::load_for_klass(InstanceKlass* ik, Thread* thread) {
}
uint64_t AOTLoader::get_saved_fingerprint(InstanceKlass* ik) {
if (ik->is_anonymous()) {
if (ik->is_unsafe_anonymous()) {
// don't even bother
return 0;
}

View File

@ -34,9 +34,5 @@ const DecoratorSet C1_NEEDS_PATCHING = DECORATOR_LAST << 1;
// Use the C1_MASK_BOOLEAN decorator for boolean accesses where the value
// needs to be masked.
const DecoratorSet C1_MASK_BOOLEAN = DECORATOR_LAST << 2;
// The C1_WRITE_ACCESS decorator is used to mark writing accesses.
const DecoratorSet C1_WRITE_ACCESS = DECORATOR_LAST << 3;
// The C1_READ_ACCESS decorator is used to mark reading accesses.
const DecoratorSet C1_READ_ACCESS = DECORATOR_LAST << 4;
#endif // SHARE_VM_C1_C1_DECORATORS_HPP

View File

@ -1844,8 +1844,8 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
// invoke-special-super
if (bc_raw == Bytecodes::_invokespecial && !target->is_object_initializer()) {
ciInstanceKlass* sender_klass =
calling_klass->is_anonymous() ? calling_klass->host_klass() :
calling_klass;
calling_klass->is_unsafe_anonymous() ? calling_klass->unsafe_anonymous_host() :
calling_klass;
if (sender_klass->is_interface()) {
int index = state()->stack_size() - (target->arg_size_no_receiver() + 1);
Value receiver = state()->stack_at(index);

View File

@ -112,6 +112,9 @@ LIR_Assembler::LIR_Assembler(Compilation* c):
LIR_Assembler::~LIR_Assembler() {
// The unwind handler label may be unnbound if this destructor is invoked because of a bail-out.
// Reset it here to avoid an assertion.
_unwind_handler_entry.reset();
}

View File

@ -71,11 +71,7 @@ class LIR_Assembler: public CompilationResourceObj {
void record_non_safepoint_debug_info();
// unified bailout support
void bailout(const char* msg) {
// reset the label in case it hits assertion in destructor.
_unwind_handler_entry.reset();
compilation()->bailout(msg);
}
void bailout(const char* msg) const { compilation()->bailout(msg); }
bool bailed_out() const { return compilation()->bailed_out(); }
// code emission patterns and accessors

View File

@ -1285,9 +1285,10 @@ void LIRGenerator::do_getClass(Intrinsic* x) {
// FIXME T_ADDRESS should actually be T_METADATA but it can't because the
// meaning of these two is mixed up (see JDK-8026837).
__ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), temp, info);
__ move_wide(new LIR_Address(temp, in_bytes(Klass::java_mirror_offset()), T_ADDRESS), result);
__ move_wide(new LIR_Address(temp, in_bytes(Klass::java_mirror_offset()), T_ADDRESS), temp);
// mirror = ((OopHandle)mirror)->resolve();
__ move_wide(new LIR_Address(result, T_OBJECT), result);
access_load(IN_NATIVE, T_OBJECT,
LIR_OprFact::address(new LIR_Address(temp, T_OBJECT)), result);
}
// java.lang.Class::isPrimitive()
@ -1614,7 +1615,7 @@ void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type,
LIRItem& base, LIR_Opr offset, LIR_Opr result,
CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
decorators |= C1_READ_ACCESS;
decorators |= ACCESS_READ;
LIRAccess access(this, decorators, base, offset, type, patch_info, load_emit_info);
if (access.is_raw()) {
_barrier_set->BarrierSetC1::load_at(access, result);
@ -1623,10 +1624,22 @@ void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type,
}
}
void LIRGenerator::access_load(DecoratorSet decorators, BasicType type,
LIR_Opr addr, LIR_Opr result) {
decorators |= ACCESS_READ;
LIRAccess access(this, decorators, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, type);
access.set_resolved_addr(addr);
if (access.is_raw()) {
_barrier_set->BarrierSetC1::load(access, result);
} else {
_barrier_set->load(access, result);
}
}
void LIRGenerator::access_store_at(DecoratorSet decorators, BasicType type,
LIRItem& base, LIR_Opr offset, LIR_Opr value,
CodeEmitInfo* patch_info, CodeEmitInfo* store_emit_info) {
decorators |= C1_WRITE_ACCESS;
decorators |= ACCESS_WRITE;
LIRAccess access(this, decorators, base, offset, type, patch_info, store_emit_info);
if (access.is_raw()) {
_barrier_set->BarrierSetC1::store_at(access, value);
@ -1637,9 +1650,9 @@ void LIRGenerator::access_store_at(DecoratorSet decorators, BasicType type,
LIR_Opr LIRGenerator::access_atomic_cmpxchg_at(DecoratorSet decorators, BasicType type,
LIRItem& base, LIRItem& offset, LIRItem& cmp_value, LIRItem& new_value) {
decorators |= ACCESS_READ;
decorators |= ACCESS_WRITE;
// Atomic operations are SEQ_CST by default
decorators |= C1_READ_ACCESS;
decorators |= C1_WRITE_ACCESS;
decorators |= ((decorators & MO_DECORATOR_MASK) != 0) ? MO_SEQ_CST : 0;
LIRAccess access(this, decorators, base, offset, type);
if (access.is_raw()) {
@ -1651,9 +1664,9 @@ LIR_Opr LIRGenerator::access_atomic_cmpxchg_at(DecoratorSet decorators, BasicTyp
LIR_Opr LIRGenerator::access_atomic_xchg_at(DecoratorSet decorators, BasicType type,
LIRItem& base, LIRItem& offset, LIRItem& value) {
decorators |= ACCESS_READ;
decorators |= ACCESS_WRITE;
// Atomic operations are SEQ_CST by default
decorators |= C1_READ_ACCESS;
decorators |= C1_WRITE_ACCESS;
decorators |= ((decorators & MO_DECORATOR_MASK) != 0) ? MO_SEQ_CST : 0;
LIRAccess access(this, decorators, base, offset, type);
if (access.is_raw()) {
@ -1665,9 +1678,9 @@ LIR_Opr LIRGenerator::access_atomic_xchg_at(DecoratorSet decorators, BasicType t
LIR_Opr LIRGenerator::access_atomic_add_at(DecoratorSet decorators, BasicType type,
LIRItem& base, LIRItem& offset, LIRItem& value) {
decorators |= ACCESS_READ;
decorators |= ACCESS_WRITE;
// Atomic operations are SEQ_CST by default
decorators |= C1_READ_ACCESS;
decorators |= C1_WRITE_ACCESS;
decorators |= ((decorators & MO_DECORATOR_MASK) != 0) ? MO_SEQ_CST : 0;
LIRAccess access(this, decorators, base, offset, type);
if (access.is_raw()) {
@ -1677,6 +1690,15 @@ LIR_Opr LIRGenerator::access_atomic_add_at(DecoratorSet decorators, BasicType ty
}
}
LIR_Opr LIRGenerator::access_resolve(DecoratorSet decorators, LIR_Opr obj) {
// Use stronger ACCESS_WRITE|ACCESS_READ by default.
if ((decorators & (ACCESS_READ | ACCESS_WRITE)) == 0) {
decorators |= ACCESS_READ | ACCESS_WRITE;
}
return _barrier_set->resolve(this, decorators, obj);
}
void LIRGenerator::do_LoadField(LoadField* x) {
bool needs_patching = x->needs_patching();
bool is_volatile = x->field()->is_volatile();
@ -1754,11 +1776,12 @@ void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
if (GenerateRangeChecks) {
CodeEmitInfo* info = state_for(x);
CodeStub* stub = new RangeCheckStub(info, index.result());
LIR_Opr buf_obj = access_resolve(IS_NOT_NULL | ACCESS_READ, buf.result());
if (index.result()->is_constant()) {
cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
cmp_mem_int(lir_cond_belowEqual, buf_obj, java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
__ branch(lir_cond_belowEqual, T_INT, stub);
} else {
cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(),
cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf_obj,
java_nio_Buffer::limit_offset(), T_INT, info);
__ branch(lir_cond_aboveEqual, T_INT, stub);
}

View File

@ -288,6 +288,9 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
LIRItem& base, LIR_Opr offset, LIR_Opr result,
CodeEmitInfo* patch_info = NULL, CodeEmitInfo* load_emit_info = NULL);
void access_load(DecoratorSet decorators, BasicType type,
LIR_Opr addr, LIR_Opr result);
LIR_Opr access_atomic_cmpxchg_at(DecoratorSet decorators, BasicType type,
LIRItem& base, LIRItem& offset, LIRItem& cmp_value, LIRItem& new_value);
@ -297,6 +300,8 @@ class LIRGenerator: public InstructionVisitor, public BlockClosure {
LIR_Opr access_atomic_add_at(DecoratorSet decorators, BasicType type,
LIRItem& base, LIRItem& offset, LIRItem& value);
LIR_Opr access_resolve(DecoratorSet decorators, LIR_Opr obj);
// These need to guarantee JMM volatile semantics are preserved on each platform
// and requires one implementation per architecture.
LIR_Opr atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value);

View File

@ -55,8 +55,9 @@
#include "runtime/atomic.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/compilationPolicy.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/fieldDescriptor.inline.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/threadCritical.hpp"

View File

@ -31,7 +31,7 @@
#include "interpreter/linkResolver.hpp"
#include "memory/universe.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/fieldDescriptor.hpp"
#include "runtime/fieldDescriptor.inline.hpp"
#include "runtime/handles.inline.hpp"
// ciField
@ -222,9 +222,9 @@ static bool trust_final_non_static_fields(ciInstanceKlass* holder) {
// Even if general trusting is disabled, trust system-built closures in these packages.
if (holder->is_in_package("java/lang/invoke") || holder->is_in_package("sun/invoke"))
return true;
// Trust VM anonymous classes. They are private API (sun.misc.Unsafe) and can't be serialized,
// so there is no hacking of finals going on with them.
if (holder->is_anonymous())
// Trust VM unsafe anonymous classes. They are private API (jdk.internal.misc.Unsafe)
// and can't be serialized, so there is no hacking of finals going on with them.
if (holder->is_unsafe_anonymous())
return true;
// Trust final fields in all boxed classes
if (holder->is_box_klass())

View File

@ -33,7 +33,7 @@
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
#include "oops/fieldStreams.hpp"
#include "runtime/fieldDescriptor.hpp"
#include "runtime/fieldDescriptor.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/jniHandles.inline.hpp"
@ -62,7 +62,7 @@ ciInstanceKlass::ciInstanceKlass(Klass* k) :
_nonstatic_field_size = ik->nonstatic_field_size();
_has_nonstatic_fields = ik->has_nonstatic_fields();
_has_nonstatic_concrete_methods = ik->has_nonstatic_concrete_methods();
_is_anonymous = ik->is_anonymous();
_is_unsafe_anonymous = ik->is_unsafe_anonymous();
_nonstatic_fields = NULL; // initialized lazily by compute_nonstatic_fields:
_has_injected_fields = -1;
_implementor = NULL; // we will fill these lazily
@ -73,13 +73,13 @@ ciInstanceKlass::ciInstanceKlass(Klass* k) :
// InstanceKlass are created for both weak and strong metadata. Ensuring this metadata
// alive covers the cases where there are weak roots without performance cost.
oop holder = ik->holder_phantom();
if (ik->is_anonymous()) {
if (ik->is_unsafe_anonymous()) {
// Though ciInstanceKlass records class loader oop, it's not enough to keep
// VM anonymous classes alive (loader == NULL). Klass holder should be used instead.
// It is enough to record a ciObject, since cached elements are never removed
// VM unsafe anonymous classes alive (loader == NULL). Klass holder should
// be used instead. It is enough to record a ciObject, since cached elements are never removed
// during ciObjectFactory lifetime. ciObjectFactory itself is created for
// every compilation and lives for the whole duration of the compilation.
assert(holder != NULL, "holder of anonymous class is the mirror which is never null");
assert(holder != NULL, "holder of unsafe anonymous class is the mirror which is never null");
(void)CURRENT_ENV->get_object(holder);
}
@ -122,7 +122,7 @@ ciInstanceKlass::ciInstanceKlass(ciSymbol* name,
_has_nonstatic_fields = false;
_nonstatic_fields = NULL;
_has_injected_fields = -1;
_is_anonymous = false;
_is_unsafe_anonymous = false;
_loader = loader;
_protection_domain = protection_domain;
_is_shared = false;
@ -615,12 +615,12 @@ ciInstanceKlass* ciInstanceKlass::implementor() {
return impl;
}
ciInstanceKlass* ciInstanceKlass::host_klass() {
ciInstanceKlass* ciInstanceKlass::unsafe_anonymous_host() {
assert(is_loaded(), "must be loaded");
if (is_anonymous()) {
if (is_unsafe_anonymous()) {
VM_ENTRY_MARK
Klass* host_klass = get_instanceKlass()->host_klass();
return CURRENT_ENV->get_instance_klass(host_klass);
Klass* unsafe_anonymous_host = get_instanceKlass()->unsafe_anonymous_host();
return CURRENT_ENV->get_instance_klass(unsafe_anonymous_host);
}
return NULL;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -53,7 +53,7 @@ private:
bool _has_subklass;
bool _has_nonstatic_fields;
bool _has_nonstatic_concrete_methods;
bool _is_anonymous;
bool _is_unsafe_anonymous;
ciFlags _flags;
jint _nonstatic_field_size;
@ -179,8 +179,8 @@ public:
return _has_nonstatic_concrete_methods;
}
bool is_anonymous() {
return _is_anonymous;
bool is_unsafe_anonymous() {
return _is_unsafe_anonymous;
}
ciInstanceKlass* get_canonical_holder(int offset);
@ -260,7 +260,7 @@ public:
return NULL;
}
ciInstanceKlass* host_klass();
ciInstanceKlass* unsafe_anonymous_host();
bool can_be_instantiated() {
assert(is_loaded(), "must be loaded");

View File

@ -35,6 +35,7 @@
#include "memory/resourceArea.hpp"
#include "oops/method.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/fieldDescriptor.inline.hpp"
#include "utilities/copy.hpp"
#include "utilities/macros.hpp"

View File

@ -2091,7 +2091,7 @@ AnnotationCollector::annotation_index(const ClassLoaderData* loader_data,
// Privileged code can use all annotations. Other code silently drops some.
const bool privileged = loader_data->is_the_null_class_loader_data() ||
loader_data->is_platform_class_loader_data() ||
loader_data->is_anonymous();
loader_data->is_unsafe_anonymous();
switch (sid) {
case vmSymbols::VM_SYMBOL_ENUM_NAME(reflect_CallerSensitive_signature): {
if (_location != _in_method) break; // only allow for methods
@ -5591,7 +5591,7 @@ void ClassFileParser::fill_instance_klass(InstanceKlass* ik, bool changed_by_loa
ik->set_this_class_index(_this_class_index);
if (is_anonymous()) {
if (is_unsafe_anonymous()) {
// _this_class_index is a CONSTANT_Class entry that refers to this
// anonymous class itself. If this class needs to refer to its own methods or
// fields, it would use a CONSTANT_MethodRef, etc, which would reference
@ -5607,9 +5607,9 @@ void ClassFileParser::fill_instance_klass(InstanceKlass* ik, bool changed_by_loa
ik->set_has_nonstatic_concrete_methods(_has_nonstatic_concrete_methods);
ik->set_declares_nonstatic_concrete_methods(_declares_nonstatic_concrete_methods);
if (_host_klass != NULL) {
assert (ik->is_anonymous(), "should be the same");
ik->set_host_klass(_host_klass);
if (_unsafe_anonymous_host != NULL) {
assert (ik->is_unsafe_anonymous(), "should be the same");
ik->set_unsafe_anonymous_host(_unsafe_anonymous_host);
}
// Set PackageEntry for this_klass
@ -5760,15 +5760,15 @@ void ClassFileParser::fill_instance_klass(InstanceKlass* ik, bool changed_by_loa
debug_only(ik->verify();)
}
// For an anonymous class that is in the unnamed package, move it to its host class's
// For an unsafe anonymous class that is in the unnamed package, move it to its host class's
// package by prepending its host class's package name to its class name and setting
// its _class_name field.
void ClassFileParser::prepend_host_package_name(const InstanceKlass* host_klass, TRAPS) {
void ClassFileParser::prepend_host_package_name(const InstanceKlass* unsafe_anonymous_host, TRAPS) {
ResourceMark rm(THREAD);
assert(strrchr(_class_name->as_C_string(), '/') == NULL,
"Anonymous class should not be in a package");
"Unsafe anonymous class should not be in a package");
const char* host_pkg_name =
ClassLoader::package_from_name(host_klass->name()->as_C_string(), NULL);
ClassLoader::package_from_name(unsafe_anonymous_host->name()->as_C_string(), NULL);
if (host_pkg_name != NULL) {
size_t host_pkg_len = strlen(host_pkg_name);
@ -5778,7 +5778,7 @@ void ClassFileParser::prepend_host_package_name(const InstanceKlass* host_klass,
// Copy host package name and trailing /.
strncpy(new_anon_name, host_pkg_name, host_pkg_len);
new_anon_name[host_pkg_len] = '/';
// Append anonymous class name. The anonymous class name can contain odd
// Append unsafe anonymous class name. The unsafe anonymous class name can contain odd
// characters. So, do a strncpy instead of using sprintf("%s...").
strncpy(new_anon_name + host_pkg_len + 1, (char *)_class_name->base(), class_name_len);
@ -5793,19 +5793,19 @@ void ClassFileParser::prepend_host_package_name(const InstanceKlass* host_klass,
// nothing. If the anonymous class is in the unnamed package then move it to its
// host's package. If the classes are in different packages then throw an IAE
// exception.
void ClassFileParser::fix_anonymous_class_name(TRAPS) {
assert(_host_klass != NULL, "Expected an anonymous class");
void ClassFileParser::fix_unsafe_anonymous_class_name(TRAPS) {
assert(_unsafe_anonymous_host != NULL, "Expected an unsafe anonymous class");
const jbyte* anon_last_slash = UTF8::strrchr(_class_name->base(),
_class_name->utf8_length(), '/');
if (anon_last_slash == NULL) { // Unnamed package
prepend_host_package_name(_host_klass, CHECK);
prepend_host_package_name(_unsafe_anonymous_host, CHECK);
} else {
if (!_host_klass->is_same_class_package(_host_klass->class_loader(), _class_name)) {
if (!_unsafe_anonymous_host->is_same_class_package(_unsafe_anonymous_host->class_loader(), _class_name)) {
ResourceMark rm(THREAD);
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
err_msg("Host class %s and anonymous class %s are in different packages",
_host_klass->name()->as_C_string(), _class_name->as_C_string()));
_unsafe_anonymous_host->name()->as_C_string(), _class_name->as_C_string()));
}
}
}
@ -5825,14 +5825,14 @@ ClassFileParser::ClassFileParser(ClassFileStream* stream,
Symbol* name,
ClassLoaderData* loader_data,
Handle protection_domain,
const InstanceKlass* host_klass,
const InstanceKlass* unsafe_anonymous_host,
GrowableArray<Handle>* cp_patches,
Publicity pub_level,
TRAPS) :
_stream(stream),
_requested_name(name),
_loader_data(loader_data),
_host_klass(host_klass),
_unsafe_anonymous_host(unsafe_anonymous_host),
_cp_patches(cp_patches),
_num_patched_klasses(0),
_max_num_patched_klasses(0),
@ -6140,8 +6140,8 @@ void ClassFileParser::parse_stream(const ClassFileStream* const stream,
// if this is an anonymous class fix up its name if it's in the unnamed
// package. Otherwise, throw IAE if it is in a different package than
// its host class.
if (_host_klass != NULL) {
fix_anonymous_class_name(CHECK);
if (_unsafe_anonymous_host != NULL) {
fix_unsafe_anonymous_class_name(CHECK);
}
// Verification prevents us from creating names with dots in them, this
@ -6166,9 +6166,9 @@ void ClassFileParser::parse_stream(const ClassFileStream* const stream,
warning("DumpLoadedClassList and CDS are not supported in exploded build");
DumpLoadedClassList = NULL;
} else if (SystemDictionaryShared::is_sharing_possible(_loader_data) &&
_host_klass == NULL) {
_unsafe_anonymous_host == NULL) {
// Only dump the classes that can be stored into CDS archive.
// Anonymous classes such as generated LambdaForm classes are also not included.
// Unsafe anonymous classes such as generated LambdaForm classes are also not included.
oop class_loader = _loader_data->class_loader();
ResourceMark rm(THREAD);
bool skip = false;

View File

@ -82,7 +82,7 @@ class ClassFileParser {
const Symbol* _requested_name;
Symbol* _class_name;
mutable ClassLoaderData* _loader_data;
const InstanceKlass* _host_klass;
const InstanceKlass* _unsafe_anonymous_host;
GrowableArray<Handle>* _cp_patches; // overrides for CP entries
int _num_patched_klasses;
int _max_num_patched_klasses;
@ -173,8 +173,8 @@ class ClassFileParser {
ConstantPool* cp,
TRAPS);
void prepend_host_package_name(const InstanceKlass* host_klass, TRAPS);
void fix_anonymous_class_name(TRAPS);
void prepend_host_package_name(const InstanceKlass* unsafe_anonymous_host, TRAPS);
void fix_unsafe_anonymous_class_name(TRAPS);
void fill_instance_klass(InstanceKlass* ik, bool cf_changed_in_CFLH, TRAPS);
void set_klass(InstanceKlass* instance);
@ -501,7 +501,7 @@ class ClassFileParser {
Symbol* name,
ClassLoaderData* loader_data,
Handle protection_domain,
const InstanceKlass* host_klass,
const InstanceKlass* unsafe_anonymous_host,
GrowableArray<Handle>* cp_patches,
Publicity pub_level,
TRAPS);
@ -524,10 +524,10 @@ class ClassFileParser {
u2 this_class_index() const { return _this_class_index; }
u2 super_class_index() const { return _super_class_index; }
bool is_anonymous() const { return _host_klass != NULL; }
bool is_unsafe_anonymous() const { return _unsafe_anonymous_host != NULL; }
bool is_interface() const { return _access_flags.is_interface(); }
const InstanceKlass* host_klass() const { return _host_klass; }
const InstanceKlass* unsafe_anonymous_host() const { return _unsafe_anonymous_host; }
const GrowableArray<Handle>* cp_patches() const { return _cp_patches; }
ClassLoaderData* loader_data() const { return _loader_data; }
const Symbol* class_name() const { return _class_name; }

View File

@ -1400,7 +1400,7 @@ InstanceKlass* ClassLoader::load_class(Symbol* name, bool search_append_only, TR
name,
loader_data,
protection_domain,
NULL, // host_klass
NULL, // unsafe_anonymous_host
NULL, // cp_patches
THREAD);
if (HAS_PENDING_EXCEPTION) {
@ -1443,8 +1443,8 @@ void ClassLoader::record_result(InstanceKlass* ik, const ClassFileStream* stream
assert(DumpSharedSpaces, "sanity");
assert(stream != NULL, "sanity");
if (ik->is_anonymous()) {
// We do not archive anonymous classes.
if (ik->is_unsafe_anonymous()) {
// We do not archive unsafe anonymous classes.
return;
}

View File

@ -141,16 +141,16 @@ void ClassLoaderData::initialize_name(Handle class_loader) {
_name_and_id = SymbolTable::new_symbol(cl_instance_name_and_id, CATCH);
}
ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous) :
ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_unsafe_anonymous) :
_metaspace(NULL),
_metaspace_lock(new Mutex(Monitor::leaf+1, "Metaspace allocation lock", true,
Monitor::_safepoint_check_never)),
_unloading(false), _is_anonymous(is_anonymous),
_unloading(false), _is_unsafe_anonymous(is_unsafe_anonymous),
_modified_oops(true), _accumulated_modified_oops(false),
// An anonymous class loader data doesn't have anything to keep
// it from being unloaded during parsing of the anonymous class.
// An unsafe anonymous class loader data doesn't have anything to keep
// it from being unloaded during parsing of the unsafe anonymous class.
// The null-class-loader should always be kept alive.
_keep_alive((is_anonymous || h_class_loader.is_null()) ? 1 : 0),
_keep_alive((is_unsafe_anonymous || h_class_loader.is_null()) ? 1 : 0),
_claimed(0),
_handles(),
_klasses(NULL), _packages(NULL), _modules(NULL), _unnamed_module(NULL), _dictionary(NULL),
@ -164,14 +164,14 @@ ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous) :
_class_loader_klass = h_class_loader->klass();
}
if (!is_anonymous) {
// The holder is initialized later for anonymous classes, and before calling anything
if (!is_unsafe_anonymous) {
// The holder is initialized later for unsafe anonymous classes, and before calling anything
// that call class_loader().
initialize_holder(h_class_loader);
// A ClassLoaderData created solely for an anonymous class should never have a
// A ClassLoaderData created solely for an unsafe anonymous class should never have a
// ModuleEntryTable or PackageEntryTable created for it. The defining package
// and module for an anonymous class will be found in its host class.
// and module for an unsafe anonymous class will be found in its host class.
_packages = new PackageEntryTable(PackageEntryTable::_packagetable_entry_size);
if (h_class_loader.is_null()) {
// Create unnamed module for boot loader
@ -287,20 +287,20 @@ bool ClassLoaderData::claim() {
return (int) Atomic::cmpxchg(1, &_claimed, 0) == 0;
}
// Anonymous classes have their own ClassLoaderData that is marked to keep alive
// Unsafe anonymous classes have their own ClassLoaderData that is marked to keep alive
// while the class is being parsed, and if the class appears on the module fixup list.
// Due to the uniqueness that no other class shares the anonymous class' name or
// ClassLoaderData, no other non-GC thread has knowledge of the anonymous class while
// Due to the uniqueness that no other class shares the unsafe anonymous class' name or
// ClassLoaderData, no other non-GC thread has knowledge of the unsafe anonymous class while
// it is being defined, therefore _keep_alive is not volatile or atomic.
void ClassLoaderData::inc_keep_alive() {
if (is_anonymous()) {
if (is_unsafe_anonymous()) {
assert(_keep_alive >= 0, "Invalid keep alive increment count");
_keep_alive++;
}
}
void ClassLoaderData::dec_keep_alive() {
if (is_anonymous()) {
if (is_unsafe_anonymous()) {
assert(_keep_alive > 0, "Invalid keep alive decrement count");
_keep_alive--;
}
@ -402,20 +402,20 @@ void ClassLoaderData::record_dependency(const Klass* k) {
// Do not need to record dependency if the dependency is to a class whose
// class loader data is never freed. (i.e. the dependency's class loader
// is one of the three builtin class loaders and the dependency is not
// anonymous.)
// unsafe anonymous.)
if (to_cld->is_permanent_class_loader_data()) {
return;
}
oop to;
if (to_cld->is_anonymous()) {
// Just return if an anonymous class is attempting to record a dependency
// to itself. (Note that every anonymous class has its own unique class
if (to_cld->is_unsafe_anonymous()) {
// Just return if an unsafe anonymous class is attempting to record a dependency
// to itself. (Note that every unsafe anonymous class has its own unique class
// loader data.)
if (to_cld == from_cld) {
return;
}
// Anonymous class dependencies are through the mirror.
// Unsafe anonymous class dependencies are through the mirror.
to = k->java_mirror();
} else {
to = to_cld->class_loader();
@ -640,7 +640,7 @@ const int _boot_loader_dictionary_size = 1009;
const int _default_loader_dictionary_size = 107;
Dictionary* ClassLoaderData::create_dictionary() {
assert(!is_anonymous(), "anonymous class loader data do not have a dictionary");
assert(!is_unsafe_anonymous(), "unsafe anonymous class loader data do not have a dictionary");
int size;
bool resizable = false;
if (_the_null_class_loader_data == NULL) {
@ -655,7 +655,7 @@ Dictionary* ClassLoaderData::create_dictionary() {
size = _default_loader_dictionary_size;
resizable = true;
}
if (!DynamicallyResizeSystemDictionaries || DumpSharedSpaces || UseSharedSpaces) {
if (!DynamicallyResizeSystemDictionaries || DumpSharedSpaces) {
resizable = false;
}
return new Dictionary(this, size, resizable);
@ -677,7 +677,7 @@ oop ClassLoaderData::holder_phantom() const {
// Unloading support
bool ClassLoaderData::is_alive() const {
bool alive = keep_alive() // null class loader and incomplete anonymous klasses.
bool alive = keep_alive() // null class loader and incomplete unsafe anonymous klasses.
|| (_holder.peek() != NULL); // and not cleaned by the GC weak handle processing.
return alive;
@ -767,13 +767,13 @@ ClassLoaderData::~ClassLoaderData() {
// Returns true if this class loader data is for the app class loader
// or a user defined system class loader. (Note that the class loader
// data may be anonymous.)
// data may be unsafe anonymous.)
bool ClassLoaderData::is_system_class_loader_data() const {
return SystemDictionary::is_system_class_loader(class_loader());
}
// Returns true if this class loader data is for the platform class loader.
// (Note that the class loader data may be anonymous.)
// (Note that the class loader data may be unsafe anonymous.)
bool ClassLoaderData::is_platform_class_loader_data() const {
return SystemDictionary::is_platform_class_loader(class_loader());
}
@ -781,7 +781,7 @@ bool ClassLoaderData::is_platform_class_loader_data() const {
// Returns true if the class loader for this class loader data is one of
// the 3 builtin (boot application/system or platform) class loaders,
// including a user-defined system class loader. Note that if the class
// loader data is for an anonymous class then it may get freed by a GC
// loader data is for an unsafe anonymous class then it may get freed by a GC
// even if its class loader is one of these loaders.
bool ClassLoaderData::is_builtin_class_loader_data() const {
return (is_boot_class_loader_data() ||
@ -790,10 +790,10 @@ bool ClassLoaderData::is_builtin_class_loader_data() const {
}
// Returns true if this class loader data is a class loader data
// that is not ever freed by a GC. It must be one of the builtin
// class loaders and not anonymous.
// that is not ever freed by a GC. It must be the CLD for one of the builtin
// class loaders and not the CLD for an unsafe anonymous class.
bool ClassLoaderData::is_permanent_class_loader_data() const {
return is_builtin_class_loader_data() && !is_anonymous();
return is_builtin_class_loader_data() && !is_unsafe_anonymous();
}
ClassLoaderMetaspace* ClassLoaderData::metaspace_non_null() {
@ -810,8 +810,8 @@ ClassLoaderMetaspace* ClassLoaderData::metaspace_non_null() {
if (this == the_null_class_loader_data()) {
assert (class_loader() == NULL, "Must be");
metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::BootMetaspaceType);
} else if (is_anonymous()) {
metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::AnonymousMetaspaceType);
} else if (is_unsafe_anonymous()) {
metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::UnsafeAnonymousMetaspaceType);
} else if (class_loader()->is_a(SystemDictionary::reflect_DelegatingClassLoader_klass())) {
metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::ReflectionMetaspaceType);
} else {
@ -962,8 +962,8 @@ void ClassLoaderData::free_deallocate_list_C_heap_structures() {
}
}
// These anonymous class loaders are to contain classes used for JSR292
ClassLoaderData* ClassLoaderData::anonymous_class_loader_data(Handle loader) {
// These CLDs are to contain unsafe anonymous classes used for JSR292
ClassLoaderData* ClassLoaderData::unsafe_anonymous_class_loader_data(Handle loader) {
// Add a new class loader data to the graph.
return ClassLoaderDataGraph::add(loader, true);
}
@ -1005,8 +1005,8 @@ void ClassLoaderData::print_value_on(outputStream* out) const {
// loader data: 0xsomeaddr of 'bootstrap'
out->print("loader data: " INTPTR_FORMAT " of %s", p2i(this), loader_name_and_id());
}
if (is_anonymous()) {
out->print(" anonymous");
if (is_unsafe_anonymous()) {
out->print(" unsafe anonymous");
}
}
@ -1014,7 +1014,7 @@ void ClassLoaderData::print_value_on(outputStream* out) const {
void ClassLoaderData::print_on(outputStream* out) const {
out->print("ClassLoaderData CLD: " PTR_FORMAT ", loader: " PTR_FORMAT ", loader_klass: %s {",
p2i(this), p2i(_class_loader.ptr_raw()), loader_name_and_id());
if (is_anonymous()) out->print(" anonymous");
if (is_unsafe_anonymous()) out->print(" unsafe anonymous");
if (claimed()) out->print(" claimed");
if (is_unloading()) out->print(" unloading");
out->print(" metaspace: " INTPTR_FORMAT, p2i(metaspace_or_null()));
@ -1032,8 +1032,8 @@ void ClassLoaderData::verify() {
assert_locked_or_safepoint(_metaspace_lock);
oop cl = class_loader();
guarantee(this == class_loader_data(cl) || is_anonymous(), "Must be the same");
guarantee(cl != NULL || this == ClassLoaderData::the_null_class_loader_data() || is_anonymous(), "must be");
guarantee(this == class_loader_data(cl) || is_unsafe_anonymous(), "Must be the same");
guarantee(cl != NULL || this == ClassLoaderData::the_null_class_loader_data() || is_unsafe_anonymous(), "must be");
// Verify the integrity of the allocated space.
if (metaspace_or_null() != NULL) {
@ -1069,14 +1069,14 @@ bool ClassLoaderDataGraph::_metaspace_oom = false;
// Add a new class loader data node to the list. Assign the newly created
// ClassLoaderData into the java/lang/ClassLoader object as a hidden field
ClassLoaderData* ClassLoaderDataGraph::add_to_graph(Handle loader, bool is_anonymous) {
ClassLoaderData* ClassLoaderDataGraph::add_to_graph(Handle loader, bool is_unsafe_anonymous) {
NoSafepointVerifier no_safepoints; // we mustn't GC until we've installed the
// ClassLoaderData in the graph since the CLD
// contains oops in _handles that must be walked.
ClassLoaderData* cld = new ClassLoaderData(loader, is_anonymous);
ClassLoaderData* cld = new ClassLoaderData(loader, is_unsafe_anonymous);
if (!is_anonymous) {
if (!is_unsafe_anonymous) {
// First, Atomically set it
ClassLoaderData* old = java_lang_ClassLoader::cmpxchg_loader_data(cld, loader(), NULL);
if (old != NULL) {
@ -1109,8 +1109,8 @@ ClassLoaderData* ClassLoaderDataGraph::add_to_graph(Handle loader, bool is_anony
} while (true);
}
ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_anonymous) {
ClassLoaderData* loader_data = add_to_graph(loader, is_anonymous);
ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_unsafe_anonymous) {
ClassLoaderData* loader_data = add_to_graph(loader, is_unsafe_anonymous);
// Initialize _name and _name_and_id after the loader data is added to the
// CLDG because adding the Symbol for _name and _name_and_id might safepoint.
if (loader.not_null()) {
@ -1119,28 +1119,6 @@ ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_anonymous) {
return loader_data;
}
void ClassLoaderDataGraph::oops_do(OopClosure* f, bool must_claim) {
for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
cld->oops_do(f, must_claim);
}
}
void ClassLoaderDataGraph::keep_alive_oops_do(OopClosure* f, bool must_claim) {
for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
if (cld->keep_alive()) {
cld->oops_do(f, must_claim);
}
}
}
void ClassLoaderDataGraph::always_strong_oops_do(OopClosure* f, bool must_claim) {
if (ClassUnloading) {
keep_alive_oops_do(f, must_claim);
} else {
oops_do(f, must_claim);
}
}
void ClassLoaderDataGraph::cld_do(CLDClosure* cl) {
for (ClassLoaderData* cld = _head; cl != NULL && cld != NULL; cld = cld->next()) {
cl->do_cld(cld);
@ -1166,13 +1144,9 @@ void ClassLoaderDataGraph::roots_cld_do(CLDClosure* strong, CLDClosure* weak) {
}
}
void ClassLoaderDataGraph::keep_alive_cld_do(CLDClosure* cl) {
roots_cld_do(cl, NULL);
}
void ClassLoaderDataGraph::always_strong_cld_do(CLDClosure* cl) {
if (ClassUnloading) {
keep_alive_cld_do(cl);
roots_cld_do(cl, NULL);
} else {
cld_do(cl);
}
@ -1280,15 +1254,6 @@ void ClassLoaderDataGraph::dictionary_classes_do(void f(InstanceKlass*, TRAPS),
}
}
// Walks all entries in the dictionary including entries initiated by this class loader.
void ClassLoaderDataGraph::dictionary_all_entries_do(void f(InstanceKlass*, ClassLoaderData*)) {
Thread* thread = Thread::current();
FOR_ALL_DICTIONARY(cld) {
Handle holder(thread, cld->holder_phantom());
cld->dictionary()->all_entries_do(f);
}
}
void ClassLoaderDataGraph::verify_dictionary() {
FOR_ALL_DICTIONARY(cld) {
cld->dictionary()->verify();

View File

@ -92,29 +92,24 @@ class ClassLoaderDataGraph : public AllStatic {
static volatile size_t _num_instance_classes;
static volatile size_t _num_array_classes;
static ClassLoaderData* add_to_graph(Handle class_loader, bool anonymous);
static ClassLoaderData* add(Handle class_loader, bool anonymous);
static ClassLoaderData* add_to_graph(Handle class_loader, bool is_unsafe_anonymous);
static ClassLoaderData* add(Handle class_loader, bool is_unsafe_anonymous);
public:
static ClassLoaderData* find_or_create(Handle class_loader);
static void clean_module_and_package_info();
static void purge();
static void clear_claimed_marks();
// oops do
static void oops_do(OopClosure* f, bool must_claim);
static void keep_alive_oops_do(OopClosure* blk, bool must_claim);
static void always_strong_oops_do(OopClosure* blk, bool must_claim);
// cld do
// Iteration through CLDG inside a safepoint; GC support
static void cld_do(CLDClosure* cl);
static void cld_unloading_do(CLDClosure* cl);
static void roots_cld_do(CLDClosure* strong, CLDClosure* weak);
static void keep_alive_cld_do(CLDClosure* cl);
static void always_strong_cld_do(CLDClosure* cl);
// klass do
// Walking classes through the ClassLoaderDataGraph include array classes. It also includes
// classes that are allocated but not loaded, classes that have errors, and scratch classes
// for redefinition. These classes are removed during the next class unloading.
// Walking the ClassLoaderDataGraph also includes anonymous classes.
// Walking the ClassLoaderDataGraph also includes unsafe anonymous classes.
static void classes_do(KlassClosure* klass_closure);
static void classes_do(void f(Klass* const));
static void methods_do(void f(Method*));
@ -139,9 +134,6 @@ class ClassLoaderDataGraph : public AllStatic {
// Added for initialize_itable_for_klass to handle exceptions.
static void dictionary_classes_do(void f(InstanceKlass*, TRAPS), TRAPS);
// Iterate all classes and their class loaders, including initiating class loaders.
static void dictionary_all_entries_do(void f(InstanceKlass*, ClassLoaderData*));
// VM_CounterDecay iteration support
static InstanceKlass* try_get_next_class();
@ -238,16 +230,17 @@ class ClassLoaderData : public CHeapObj<mtClass> {
// classes in the class loader are allocated.
Mutex* _metaspace_lock; // Locks the metaspace for allocations and setup.
bool _unloading; // true if this class loader goes away
bool _is_anonymous; // if this CLD is for an anonymous class
bool _is_unsafe_anonymous; // CLD is dedicated to one class and that class determines the CLDs lifecycle.
// For example, an unsafe anonymous class.
// Remembered sets support for the oops in the class loader data.
bool _modified_oops; // Card Table Equivalent (YC/CMS support)
bool _accumulated_modified_oops; // Mod Union Equivalent (CMS support)
s2 _keep_alive; // if this CLD is kept alive.
// Used for anonymous classes and the boot class
// Used for unsafe anonymous classes and the boot class
// loader. _keep_alive does not need to be volatile or
// atomic since there is one unique CLD per anonymous class.
// atomic since there is one unique CLD per unsafe anonymous class.
volatile int _claimed; // true if claimed, for example during GC traces.
// To avoid applying oop closure more than once.
@ -283,7 +276,7 @@ class ClassLoaderData : public CHeapObj<mtClass> {
void set_next(ClassLoaderData* next) { _next = next; }
ClassLoaderData* next() const { return _next; }
ClassLoaderData(Handle h_class_loader, bool is_anonymous);
ClassLoaderData(Handle h_class_loader, bool is_unsafe_anonymous);
~ClassLoaderData();
// The CLD are not placed in the Heap, so the Card Table or
@ -337,7 +330,7 @@ class ClassLoaderData : public CHeapObj<mtClass> {
Mutex* metaspace_lock() const { return _metaspace_lock; }
bool is_anonymous() const { return _is_anonymous; }
bool is_unsafe_anonymous() const { return _is_unsafe_anonymous; }
static void init_null_class_loader_data();
@ -346,15 +339,15 @@ class ClassLoaderData : public CHeapObj<mtClass> {
}
// Returns true if this class loader data is for the system class loader.
// (Note that the class loader data may be anonymous.)
// (Note that the class loader data may be unsafe anonymous.)
bool is_system_class_loader_data() const;
// Returns true if this class loader data is for the platform class loader.
// (Note that the class loader data may be anonymous.)
// (Note that the class loader data may be unsafe anonymous.)
bool is_platform_class_loader_data() const;
// Returns true if this class loader data is for the boot class loader.
// (Note that the class loader data may be anonymous.)
// (Note that the class loader data may be unsafe anonymous.)
inline bool is_boot_class_loader_data() const;
bool is_builtin_class_loader_data() const;
@ -372,7 +365,7 @@ class ClassLoaderData : public CHeapObj<mtClass> {
return _unloading;
}
// Used to refcount an anonymous class's CLD in order to
// Used to refcount an unsafe anonymous class's CLD in order to
// indicate their aliveness.
void inc_keep_alive();
void dec_keep_alive();
@ -412,7 +405,7 @@ class ClassLoaderData : public CHeapObj<mtClass> {
static ClassLoaderData* class_loader_data(oop loader);
static ClassLoaderData* class_loader_data_or_null(oop loader);
static ClassLoaderData* anonymous_class_loader_data(Handle loader);
static ClassLoaderData* unsafe_anonymous_class_loader_data(Handle loader);
// Returns Klass* of associated class loader, or NULL if associated loader is 'bootstrap'.
// Also works if unloading.

View File

@ -94,9 +94,12 @@ void ClassLoaderDataGraph::dec_array_classes(size_t count) {
}
bool ClassLoaderDataGraph::should_clean_metaspaces_and_reset() {
bool do_cleaning = _safepoint_cleanup_needed && _should_clean_deallocate_lists;
// Only clean metaspaces after full GC.
bool do_cleaning = _safepoint_cleanup_needed;
#if INCLUDE_JVMTI
do_cleaning = do_cleaning || InstanceKlass::has_previous_versions();
do_cleaning = do_cleaning && (_should_clean_deallocate_lists || InstanceKlass::has_previous_versions());
#else
do_cleaning = do_cleaning && _should_clean_deallocate_lists;
#endif
_safepoint_cleanup_needed = false; // reset
return do_cleaning;

View File

@ -50,6 +50,7 @@
jshort ClassLoaderExt::_app_class_paths_start_index = ClassLoaderExt::max_classpath_index;
jshort ClassLoaderExt::_app_module_paths_start_index = ClassLoaderExt::max_classpath_index;
jshort ClassLoaderExt::_max_used_path_index = 0;
bool ClassLoaderExt::_has_app_classes = false;
bool ClassLoaderExt::_has_platform_classes = false;
@ -242,6 +243,9 @@ void ClassLoaderExt::record_result(const s2 classpath_index,
classloader_type = ClassLoader::PLATFORM_LOADER;
ClassLoaderExt::set_has_platform_classes();
}
if (classpath_index > ClassLoaderExt::max_used_path_index()) {
ClassLoaderExt::set_max_used_path_index(classpath_index);
}
result->set_shared_classpath_index(classpath_index);
result->set_class_loader_type(classloader_type);
}
@ -294,7 +298,7 @@ InstanceKlass* ClassLoaderExt::load_class(Symbol* name, const char* path, TRAPS)
name,
loader_data,
protection_domain,
NULL, // host_klass
NULL, // unsafe_anonymous_host
NULL, // cp_patches
THREAD);

View File

@ -49,6 +49,8 @@ private:
static jshort _app_class_paths_start_index;
// index of first modular JAR in shared modulepath entry table
static jshort _app_module_paths_start_index;
// the largest path index being used during CDS dump time
static jshort _max_used_path_index;
static bool _has_app_classes;
static bool _has_platform_classes;
@ -91,6 +93,12 @@ public:
static jshort app_module_paths_start_index() { return _app_module_paths_start_index; }
static jshort max_used_path_index() { return _max_used_path_index; }
static void set_max_used_path_index(jshort used_index) {
_max_used_path_index = used_index;
}
static void init_paths_start_index(jshort app_start) {
_app_class_paths_start_index = app_start;
}

View File

@ -128,7 +128,7 @@ public:
class LoaderTreeNode : public ResourceObj {
// We walk the CLDG and, for each CLD which is non-anonymous, add
// We walk the CLDG and, for each CLD which is non-unsafe_anonymous, add
// a tree node.
// To add a node we need its parent node; if the parent node does not yet
// exist - because we have not yet encountered the CLD for the parent loader -
@ -219,7 +219,7 @@ class LoaderTreeNode : public ResourceObj {
if (print_classes) {
if (_classes != NULL) {
for (LoadedClassInfo* lci = _classes; lci; lci = lci->_next) {
// Non-anonymous classes should live in the primary CLD of its loader
// Non-unsafe anonymous classes should live in the primary CLD of its loader
assert(lci->_cld == _cld, "must be");
branchtracker.print(st);
@ -252,12 +252,12 @@ class LoaderTreeNode : public ResourceObj {
for (LoadedClassInfo* lci = _anon_classes; lci; lci = lci->_next) {
branchtracker.print(st);
if (lci == _anon_classes) { // first iteration
st->print("%*s ", indentation, "Anonymous Classes:");
st->print("%*s ", indentation, "Unsafe Anonymous Classes:");
} else {
st->print("%*s ", indentation, "");
}
st->print("%s", lci->_klass->external_name());
// For anonymous classes, also print CLD if verbose. Should be a different one than the primary CLD.
// For unsafe anonymous classes, also print CLD if verbose. Should be a different one than the primary CLD.
assert(lci->_cld != _cld, "must be");
if (verbose) {
st->print(" (Loader Data: " PTR_FORMAT ")", p2i(lci->_cld));
@ -266,7 +266,7 @@ class LoaderTreeNode : public ResourceObj {
}
branchtracker.print(st);
st->print("%*s ", indentation, "");
st->print_cr("(%u anonymous class%s)", _num_anon_classes, (_num_anon_classes == 1) ? "" : "es");
st->print_cr("(%u unsafe anonymous class%s)", _num_anon_classes, (_num_anon_classes == 1) ? "" : "es");
// Empty line
branchtracker.print(st);
@ -318,14 +318,14 @@ public:
_next = info;
}
void add_classes(LoadedClassInfo* first_class, int num_classes, bool anonymous) {
LoadedClassInfo** p_list_to_add_to = anonymous ? &_anon_classes : &_classes;
void add_classes(LoadedClassInfo* first_class, int num_classes, bool is_unsafe_anonymous) {
LoadedClassInfo** p_list_to_add_to = is_unsafe_anonymous ? &_anon_classes : &_classes;
// Search tail.
while ((*p_list_to_add_to) != NULL) {
p_list_to_add_to = &(*p_list_to_add_to)->_next;
}
*p_list_to_add_to = first_class;
if (anonymous) {
if (is_unsafe_anonymous) {
_num_anon_classes += num_classes;
} else {
_num_classes += num_classes;
@ -420,7 +420,7 @@ class LoaderInfoScanClosure : public CLDClosure {
LoadedClassCollectClosure lccc(cld);
const_cast<ClassLoaderData*>(cld)->classes_do(&lccc);
if (lccc._num_classes > 0) {
info->add_classes(lccc._list, lccc._num_classes, cld->is_anonymous());
info->add_classes(lccc._list, lccc._num_classes, cld->is_unsafe_anonymous());
}
}
@ -480,7 +480,7 @@ public:
assert(info != NULL, "must be");
// Update CLD in node, but only if this is the primary CLD for this loader.
if (cld->is_anonymous() == false) {
if (cld->is_unsafe_anonymous() == false) {
assert(info->cld() == NULL, "there should be only one primary CLD per loader");
info->set_cld(cld);
}

View File

@ -58,7 +58,7 @@ void ClassLoaderStatsClosure::do_cld(ClassLoaderData* cld) {
cls = *cls_ptr;
}
if (!cld->is_anonymous()) {
if (!cld->is_unsafe_anonymous()) {
cls->_cld = cld;
}
@ -70,7 +70,7 @@ void ClassLoaderStatsClosure::do_cld(ClassLoaderData* cld) {
ClassStatsClosure csc;
cld->classes_do(&csc);
if(cld->is_anonymous()) {
if(cld->is_unsafe_anonymous()) {
cls->_anon_classes_count += csc._num_classes;
} else {
cls->_classes_count = csc._num_classes;
@ -79,7 +79,7 @@ void ClassLoaderStatsClosure::do_cld(ClassLoaderData* cld) {
ClassLoaderMetaspace* ms = cld->metaspace_or_null();
if (ms != NULL) {
if(cld->is_anonymous()) {
if(cld->is_unsafe_anonymous()) {
cls->_anon_chunk_sz += ms->allocated_chunks_bytes();
cls->_anon_block_sz += ms->allocated_blocks_bytes();
} else {

View File

@ -27,6 +27,7 @@
#include "classfile/compactHashtable.inline.hpp"
#include "classfile/javaClasses.hpp"
#include "logging/logMessage.hpp"
#include "memory/heapShared.inline.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/metaspaceShared.hpp"
#include "oops/compressedOops.inline.hpp"
@ -280,8 +281,9 @@ class CompactHashtable_OopIterator {
public:
CompactHashtable_OopIterator(OopClosure *cl) : _closure(cl) {}
inline void do_value(address base_address, u4 offset) const {
narrowOop o = (narrowOop)offset;
_closure->do_oop(&o);
narrowOop v = (narrowOop)offset;
oop obj = HeapShared::decode_with_archived_oop_encoding_mode(v);
_closure->do_oop(&obj);
}
};

View File

@ -231,6 +231,10 @@ public:
// For reading from/writing to the CDS archive
void serialize(SerializeClosure* soc);
inline bool empty() {
return (_entry_count == 0);
}
};
template <class T, class N> class CompactHashtable : public SimpleCompactHashtable {

View File

@ -28,7 +28,8 @@
#include "classfile/compactHashtable.hpp"
#include "classfile/javaClasses.hpp"
#include "memory/allocation.inline.hpp"
#include "oops/compressedOops.inline.hpp"
#include "memory/filemap.hpp"
#include "memory/heapShared.inline.hpp"
#include "oops/oop.hpp"
template <class T, class N>
@ -46,8 +47,8 @@ inline Symbol* CompactHashtable<T, N>::decode_entry(CompactHashtable<Symbol*, ch
template <class T, class N>
inline oop CompactHashtable<T, N>::decode_entry(CompactHashtable<oop, char>* const t,
u4 offset, const char* name, int len) {
narrowOop obj = (narrowOop)offset;
oop string = CompressedOops::decode(obj);
narrowOop v = (narrowOop)offset;
oop string = HeapShared::decode_with_archived_oop_encoding_mode(v);
if (java_lang_String::equals(string, (jchar*)name, len)) {
return string;
}

View File

@ -885,7 +885,7 @@ static void switchover_constant_pool(BytecodeConstantPool* bpool,
ConstantPool* cp = bpool->create_constant_pool(CHECK);
if (cp != klass->constants()) {
// Copy resolved anonymous class into new constant pool.
if (klass->is_anonymous()) {
if (klass->is_unsafe_anonymous()) {
cp->klass_at_put(klass->this_class_index(), klass);
}
klass->class_loader_data()->add_to_deallocate_list(klass->constants());

View File

@ -330,13 +330,13 @@ void Dictionary::classes_do(void f(InstanceKlass*, TRAPS), TRAPS) {
}
// All classes, and their class loaders, including initiating class loaders
void Dictionary::all_entries_do(void f(InstanceKlass*, ClassLoaderData*)) {
void Dictionary::all_entries_do(KlassClosure* closure) {
for (int index = 0; index < table_size(); index++) {
for (DictionaryEntry* probe = bucket(index);
probe != NULL;
probe = probe->next()) {
InstanceKlass* k = probe->instance_klass();
f(k, loader_data());
closure->do_klass(k);
}
}
}
@ -592,8 +592,8 @@ void Dictionary::print_on(outputStream* st) const {
ResourceMark rm;
assert(loader_data() != NULL, "loader data should not be null");
st->print_cr("Java dictionary (table_size=%d, classes=%d)",
table_size(), number_of_entries());
st->print_cr("Java dictionary (table_size=%d, classes=%d, resizable=%s)",
table_size(), number_of_entries(), BOOL_TO_STR(_resizable));
st->print_cr("^ indicates that initiating loader is different from defining loader");
for (int index = 0; index < table_size(); index++) {

View File

@ -74,7 +74,7 @@ public:
void classes_do(void f(InstanceKlass*));
void classes_do(void f(InstanceKlass*, TRAPS), TRAPS);
void all_entries_do(void f(InstanceKlass*, ClassLoaderData*));
void all_entries_do(KlassClosure* closure);
void classes_do(MetaspaceClosure* it);
void unlink();

View File

@ -50,7 +50,7 @@
#include "oops/symbol.hpp"
#include "oops/typeArrayOop.inline.hpp"
#include "prims/resolvedMethodTable.hpp"
#include "runtime/fieldDescriptor.hpp"
#include "runtime/fieldDescriptor.inline.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/interfaceSupport.inline.hpp"
@ -209,7 +209,7 @@ void java_lang_String::compute_offsets() {
}
#if INCLUDE_CDS
void java_lang_String::serialize(SerializeClosure* f) {
void java_lang_String::serialize_offsets(SerializeClosure* f) {
STRING_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
f->do_u4((u4*)&initialized);
}
@ -1038,6 +1038,7 @@ void java_lang_Class::archive_basic_type_mirrors(TRAPS) {
if (m != NULL) {
// Update the field at _array_klass_offset to point to the relocated array klass.
oop archived_m = MetaspaceShared::archive_heap_object(m, THREAD);
assert(archived_m != NULL, "sanity");
Klass *ak = (Klass*)(archived_m->metadata_field(_array_klass_offset));
assert(ak != NULL || t == T_VOID, "should not be NULL");
if (ak != NULL) {
@ -1212,7 +1213,7 @@ oop java_lang_Class::process_archived_mirror(Klass* k, oop mirror,
bool java_lang_Class::restore_archived_mirror(Klass *k,
Handle class_loader, Handle module,
Handle protection_domain, TRAPS) {
oop m = MetaspaceShared::materialize_archived_object(k->archived_java_mirror_raw());
oop m = MetaspaceShared::materialize_archived_object(k->archived_java_mirror_raw_narrow());
if (m == NULL) {
return false;
@ -1270,6 +1271,13 @@ int java_lang_Class::oop_size(oop java_class) {
return size;
}
int java_lang_Class::oop_size_raw(oop java_class) {
assert(_oop_size_offset != 0, "must be set");
int size = java_class->int_field_raw(_oop_size_offset);
assert(size > 0, "Oop size must be greater than zero, not %d", size);
return size;
}
void java_lang_Class::set_oop_size(HeapWord* java_class, int size) {
assert(_oop_size_offset != 0, "must be set");
assert(size > 0, "Oop size must be greater than zero, not %d", size);
@ -1280,6 +1288,12 @@ int java_lang_Class::static_oop_field_count(oop java_class) {
assert(_static_oop_field_count_offset != 0, "must be set");
return java_class->int_field(_static_oop_field_count_offset);
}
int java_lang_Class::static_oop_field_count_raw(oop java_class) {
assert(_static_oop_field_count_offset != 0, "must be set");
return java_class->int_field_raw(_static_oop_field_count_offset);
}
void java_lang_Class::set_static_oop_field_count(oop java_class, int size) {
assert(_static_oop_field_count_offset != 0, "must be set");
java_class->int_field_put(_static_oop_field_count_offset, size);
@ -1369,6 +1383,14 @@ Klass* java_lang_Class::as_Klass(oop java_class) {
return k;
}
Klass* java_lang_Class::as_Klass_raw(oop java_class) {
//%note memory_2
assert(java_lang_Class::is_instance(java_class), "must be a Class object");
Klass* k = ((Klass*)java_class->metadata_field_raw(_klass_offset));
assert(k == NULL || k->is_klass(), "type check");
return k;
}
void java_lang_Class::set_klass(oop java_class, Klass* klass) {
assert(java_lang_Class::is_instance(java_class), "must be a Class object");
@ -1534,7 +1556,7 @@ void java_lang_Class::compute_offsets() {
}
#if INCLUDE_CDS
void java_lang_Class::serialize(SerializeClosure* f) {
void java_lang_Class::serialize_offsets(SerializeClosure* f) {
f->do_u4((u4*)&offsets_computed);
f->do_u4((u4*)&_init_lock_offset);
@ -1608,7 +1630,7 @@ void java_lang_Thread::compute_offsets() {
}
#if INCLUDE_CDS
void java_lang_Thread::serialize(SerializeClosure* f) {
void java_lang_Thread::serialize_offsets(SerializeClosure* f) {
THREAD_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
}
#endif
@ -1860,7 +1882,7 @@ void java_lang_ThreadGroup::compute_offsets() {
}
#if INCLUDE_CDS
void java_lang_ThreadGroup::serialize(SerializeClosure* f) {
void java_lang_ThreadGroup::serialize_offsets(SerializeClosure* f) {
THREADGROUP_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
}
#endif
@ -1878,7 +1900,7 @@ void java_lang_Throwable::compute_offsets() {
}
#if INCLUDE_CDS
void java_lang_Throwable::serialize(SerializeClosure* f) {
void java_lang_Throwable::serialize_offsets(SerializeClosure* f) {
THROWABLE_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
}
#endif
@ -2654,7 +2676,7 @@ void java_lang_StackFrameInfo::compute_offsets() {
}
#if INCLUDE_CDS
void java_lang_StackFrameInfo::serialize(SerializeClosure* f) {
void java_lang_StackFrameInfo::serialize_offsets(SerializeClosure* f) {
STACKFRAMEINFO_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
STACKFRAMEINFO_INJECTED_FIELDS(INJECTED_FIELD_SERIALIZE_OFFSET);
}
@ -2672,7 +2694,7 @@ void java_lang_LiveStackFrameInfo::compute_offsets() {
}
#if INCLUDE_CDS
void java_lang_LiveStackFrameInfo::serialize(SerializeClosure* f) {
void java_lang_LiveStackFrameInfo::serialize_offsets(SerializeClosure* f) {
LIVESTACKFRAMEINFO_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
}
#endif
@ -2686,7 +2708,7 @@ void java_lang_reflect_AccessibleObject::compute_offsets() {
}
#if INCLUDE_CDS
void java_lang_reflect_AccessibleObject::serialize(SerializeClosure* f) {
void java_lang_reflect_AccessibleObject::serialize_offsets(SerializeClosure* f) {
ACCESSIBLEOBJECT_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
}
#endif
@ -2727,7 +2749,7 @@ void java_lang_reflect_Method::compute_offsets() {
}
#if INCLUDE_CDS
void java_lang_reflect_Method::serialize(SerializeClosure* f) {
void java_lang_reflect_Method::serialize_offsets(SerializeClosure* f) {
METHOD_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
}
#endif
@ -2914,7 +2936,7 @@ void java_lang_reflect_Constructor::compute_offsets() {
}
#if INCLUDE_CDS
void java_lang_reflect_Constructor::serialize(SerializeClosure* f) {
void java_lang_reflect_Constructor::serialize_offsets(SerializeClosure* f) {
CONSTRUCTOR_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
}
#endif
@ -3063,7 +3085,7 @@ void java_lang_reflect_Field::compute_offsets() {
}
#if INCLUDE_CDS
void java_lang_reflect_Field::serialize(SerializeClosure* f) {
void java_lang_reflect_Field::serialize_offsets(SerializeClosure* f) {
FIELD_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
}
#endif
@ -3186,7 +3208,7 @@ void reflect_ConstantPool::compute_offsets() {
}
#if INCLUDE_CDS
void reflect_ConstantPool::serialize(SerializeClosure* f) {
void reflect_ConstantPool::serialize_offsets(SerializeClosure* f) {
CONSTANTPOOL_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
}
#endif
@ -3203,7 +3225,7 @@ void java_lang_reflect_Parameter::compute_offsets() {
}
#if INCLUDE_CDS
void java_lang_reflect_Parameter::serialize(SerializeClosure* f) {
void java_lang_reflect_Parameter::serialize_offsets(SerializeClosure* f) {
PARAMETER_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
}
#endif
@ -3281,7 +3303,7 @@ void java_lang_Module::compute_offsets() {
}
#if INCLUDE_CDS
void java_lang_Module::serialize(SerializeClosure* f) {
void java_lang_Module::serialize_offsets(SerializeClosure* f) {
MODULE_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
MODULE_INJECTED_FIELDS(INJECTED_FIELD_SERIALIZE_OFFSET);
}
@ -3371,7 +3393,7 @@ void reflect_UnsafeStaticFieldAccessorImpl::compute_offsets() {
}
#if INCLUDE_CDS
void reflect_UnsafeStaticFieldAccessorImpl::serialize(SerializeClosure* f) {
void reflect_UnsafeStaticFieldAccessorImpl::serialize_offsets(SerializeClosure* f) {
UNSAFESTATICFIELDACCESSORIMPL_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
}
#endif
@ -3543,7 +3565,7 @@ void java_lang_ref_SoftReference::compute_offsets() {
}
#if INCLUDE_CDS
void java_lang_ref_SoftReference::serialize(SerializeClosure* f) {
void java_lang_ref_SoftReference::serialize_offsets(SerializeClosure* f) {
SOFTREFERENCE_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
}
#endif
@ -3584,7 +3606,7 @@ void java_lang_invoke_DirectMethodHandle::compute_offsets() {
}
#if INCLUDE_CDS
void java_lang_invoke_DirectMethodHandle::serialize(SerializeClosure* f) {
void java_lang_invoke_DirectMethodHandle::serialize_offsets(SerializeClosure* f) {
DIRECTMETHODHANDLE_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
}
#endif
@ -3616,7 +3638,7 @@ void java_lang_invoke_MethodHandle::compute_offsets() {
}
#if INCLUDE_CDS
void java_lang_invoke_MethodHandle::serialize(SerializeClosure* f) {
void java_lang_invoke_MethodHandle::serialize_offsets(SerializeClosure* f) {
METHODHANDLE_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
}
#endif
@ -3635,7 +3657,7 @@ void java_lang_invoke_MemberName::compute_offsets() {
}
#if INCLUDE_CDS
void java_lang_invoke_MemberName::serialize(SerializeClosure* f) {
void java_lang_invoke_MemberName::serialize_offsets(SerializeClosure* f) {
MEMBERNAME_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
MEMBERNAME_INJECTED_FIELDS(INJECTED_FIELD_SERIALIZE_OFFSET);
}
@ -3648,7 +3670,7 @@ void java_lang_invoke_ResolvedMethodName::compute_offsets() {
}
#if INCLUDE_CDS
void java_lang_invoke_ResolvedMethodName::serialize(SerializeClosure* f) {
void java_lang_invoke_ResolvedMethodName::serialize_offsets(SerializeClosure* f) {
RESOLVEDMETHOD_INJECTED_FIELDS(INJECTED_FIELD_SERIALIZE_OFFSET);
}
#endif
@ -3663,7 +3685,7 @@ void java_lang_invoke_LambdaForm::compute_offsets() {
}
#if INCLUDE_CDS
void java_lang_invoke_LambdaForm::serialize(SerializeClosure* f) {
void java_lang_invoke_LambdaForm::serialize_offsets(SerializeClosure* f) {
LAMBDAFORM_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
}
#endif
@ -3785,7 +3807,7 @@ oop java_lang_invoke_ResolvedMethodName::find_resolved_method(const methodHandle
}
oop new_resolved_method = k->allocate_instance(CHECK_NULL);
new_resolved_method->address_field_put(_vmtarget_offset, (address)m());
// Add a reference to the loader (actually mirror because anonymous classes will not have
// Add a reference to the loader (actually mirror because unsafe anonymous classes will not have
// distinct loaders) to ensure the metadata is kept alive.
// This mirror may be different than the one in clazz field.
new_resolved_method->obj_field_put(_vmholder_offset, m->method_holder()->java_mirror());
@ -3815,7 +3837,7 @@ void java_lang_invoke_MethodType::compute_offsets() {
}
#if INCLUDE_CDS
void java_lang_invoke_MethodType::serialize(SerializeClosure* f) {
void java_lang_invoke_MethodType::serialize_offsets(SerializeClosure* f) {
METHODTYPE_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
}
#endif
@ -3909,7 +3931,7 @@ void java_lang_invoke_CallSite::compute_offsets() {
}
#if INCLUDE_CDS
void java_lang_invoke_CallSite::serialize(SerializeClosure* f) {
void java_lang_invoke_CallSite::serialize_offsets(SerializeClosure* f) {
CALLSITE_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
}
#endif
@ -3931,7 +3953,7 @@ void java_lang_invoke_MethodHandleNatives_CallSiteContext::compute_offsets() {
}
#if INCLUDE_CDS
void java_lang_invoke_MethodHandleNatives_CallSiteContext::serialize(SerializeClosure* f) {
void java_lang_invoke_MethodHandleNatives_CallSiteContext::serialize_offsets(SerializeClosure* f) {
CALLSITECONTEXT_INJECTED_FIELDS(INJECTED_FIELD_SERIALIZE_OFFSET);
}
#endif
@ -3963,7 +3985,7 @@ void java_security_AccessControlContext::compute_offsets() {
}
#if INCLUDE_CDS
void java_security_AccessControlContext::serialize(SerializeClosure* f) {
void java_security_AccessControlContext::serialize_offsets(SerializeClosure* f) {
ACCESSCONTROLCONTEXT_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
}
#endif
@ -4006,6 +4028,11 @@ ClassLoaderData* java_lang_ClassLoader::loader_data(oop loader) {
return HeapAccess<>::load_at(loader, _loader_data_offset);
}
ClassLoaderData* java_lang_ClassLoader::loader_data_raw(oop loader) {
assert(loader != NULL && oopDesc::is_oop(loader), "loader must be oop");
return RawAccess<>::load_at(loader, _loader_data_offset);
}
ClassLoaderData* java_lang_ClassLoader::cmpxchg_loader_data(ClassLoaderData* new_data, oop loader, ClassLoaderData* expected_data) {
assert(loader != NULL && oopDesc::is_oop(loader), "loader must be oop");
return HeapAccess<>::atomic_cmpxchg_at(new_data, loader, _loader_data_offset, expected_data);
@ -4029,7 +4056,7 @@ void java_lang_ClassLoader::compute_offsets() {
}
#if INCLUDE_CDS
void java_lang_ClassLoader::serialize(SerializeClosure* f) {
void java_lang_ClassLoader::serialize_offsets(SerializeClosure* f) {
CLASSLOADER_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
CLASSLOADER_INJECTED_FIELDS(INJECTED_FIELD_SERIALIZE_OFFSET);
}
@ -4143,7 +4170,7 @@ void java_lang_System::compute_offsets() {
}
#if INCLUDE_CDS
void java_lang_System::serialize(SerializeClosure* f) {
void java_lang_System::serialize_offsets(SerializeClosure* f) {
SYSTEM_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
}
#endif
@ -4247,15 +4274,7 @@ int java_nio_Buffer::_limit_offset;
int java_util_concurrent_locks_AbstractOwnableSynchronizer::_owner_offset;
int reflect_ConstantPool::_oop_offset;
int reflect_UnsafeStaticFieldAccessorImpl::_base_offset;
int jdk_internal_module_ArchivedModuleGraph::_archivedSystemModules_offset;
int jdk_internal_module_ArchivedModuleGraph::_archivedModuleFinder_offset;
int jdk_internal_module_ArchivedModuleGraph::_archivedMainModule_offset;
int jdk_internal_module_ArchivedModuleGraph::_archivedConfiguration_offset;
int java_lang_Integer_IntegerCache::_archivedCache_offset;
int java_lang_module_Configuration::_EMPTY_CONFIGURATION_offset;
int java_util_ImmutableCollections_ListN::_EMPTY_LIST_offset;
int java_util_ImmutableCollections_SetN::_EMPTY_SET_offset;
int java_util_ImmutableCollections_MapN::_EMPTY_MAP_offset;
#define STACKTRACEELEMENT_FIELDS_DO(macro) \
macro(declaringClassObject_offset, k, "declaringClassObject", class_signature, false); \
@ -4274,7 +4293,7 @@ void java_lang_StackTraceElement::compute_offsets() {
}
#if INCLUDE_CDS
void java_lang_StackTraceElement::serialize(SerializeClosure* f) {
void java_lang_StackTraceElement::serialize_offsets(SerializeClosure* f) {
STACKTRACEELEMENT_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
}
#endif
@ -4349,7 +4368,7 @@ void java_lang_AssertionStatusDirectives::compute_offsets() {
}
#if INCLUDE_CDS
void java_lang_AssertionStatusDirectives::serialize(SerializeClosure* f) {
void java_lang_AssertionStatusDirectives::serialize_offsets(SerializeClosure* f) {
ASSERTIONSTATUSDIRECTIVES_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
}
#endif
@ -4390,7 +4409,7 @@ void java_nio_Buffer::compute_offsets() {
}
#if INCLUDE_CDS
void java_nio_Buffer::serialize(SerializeClosure* f) {
void java_nio_Buffer::serialize_offsets(SerializeClosure* f) {
BUFFER_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
}
#endif
@ -4409,7 +4428,7 @@ oop java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(
}
#if INCLUDE_CDS
void java_util_concurrent_locks_AbstractOwnableSynchronizer::serialize(SerializeClosure* f) {
void java_util_concurrent_locks_AbstractOwnableSynchronizer::serialize_offsets(SerializeClosure* f) {
AOS_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
}
#endif
@ -4418,99 +4437,6 @@ static int member_offset(int hardcoded_offset) {
return (hardcoded_offset * heapOopSize) + instanceOopDesc::base_offset_in_bytes();
}
#define INTEGERCACHE_FIELDS_DO(macro) \
macro(_archivedCache_offset, k, "archivedCache", java_lang_Integer_array_signature, true)
void java_lang_Integer_IntegerCache::compute_offsets() {
InstanceKlass* k = SystemDictionary::Integer_IntegerCache_klass();
assert(k != NULL, "must be loaded");
INTEGERCACHE_FIELDS_DO(FIELD_COMPUTE_OFFSET);
}
#if INCLUDE_CDS
void java_lang_Integer_IntegerCache::serialize(SerializeClosure* f) {
INTEGERCACHE_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
}
#endif
#define ARCHIVEDMODULEGRAPH_FIELDS_DO(macro) \
macro(_archivedSystemModules_offset, k, "archivedSystemModules", systemModules_signature, true); \
macro(_archivedModuleFinder_offset, k, "archivedModuleFinder", moduleFinder_signature, true); \
macro(_archivedMainModule_offset, k, "archivedMainModule", string_signature, true); \
macro(_archivedConfiguration_offset, k, "archivedConfiguration", configuration_signature, true)
void jdk_internal_module_ArchivedModuleGraph::compute_offsets() {
InstanceKlass* k = SystemDictionary::ArchivedModuleGraph_klass();
assert(k != NULL, "must be loaded");
ARCHIVEDMODULEGRAPH_FIELDS_DO(FIELD_COMPUTE_OFFSET);
}
#if INCLUDE_CDS
void jdk_internal_module_ArchivedModuleGraph::serialize(SerializeClosure* f) {
ARCHIVEDMODULEGRAPH_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
}
#endif
#define CONFIGURATION_FIELDS_DO(macro) \
macro(_EMPTY_CONFIGURATION_offset, k, "EMPTY_CONFIGURATION", configuration_signature, true)
void java_lang_module_Configuration::compute_offsets() {
InstanceKlass* k = SystemDictionary::Configuration_klass();
assert(k != NULL, "must be loaded");
CONFIGURATION_FIELDS_DO(FIELD_COMPUTE_OFFSET);
}
#if INCLUDE_CDS
void java_lang_module_Configuration::serialize(SerializeClosure* f) {
CONFIGURATION_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
}
#endif
#define LISTN_FIELDS_DO(macro) \
macro(_EMPTY_LIST_offset, k, "EMPTY_LIST", list_signature, true)
void java_util_ImmutableCollections_ListN::compute_offsets() {
InstanceKlass* k = SystemDictionary::ImmutableCollections_ListN_klass();
assert(k != NULL, "must be loaded");
LISTN_FIELDS_DO(FIELD_COMPUTE_OFFSET);
}
#if INCLUDE_CDS
void java_util_ImmutableCollections_ListN::serialize(SerializeClosure* f) {
LISTN_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
}
#endif
#define SETN_FIELDS_DO(macro) \
macro(_EMPTY_SET_offset, k, "EMPTY_SET", set_signature, true)
void java_util_ImmutableCollections_SetN::compute_offsets() {
InstanceKlass* k = SystemDictionary::ImmutableCollections_SetN_klass();
assert(k != NULL, "must be loaded");
SETN_FIELDS_DO(FIELD_COMPUTE_OFFSET);
}
#if INCLUDE_CDS
void java_util_ImmutableCollections_SetN::serialize(SerializeClosure* f) {
SETN_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
}
#endif
#define MAPN_FIELDS_DO(macro) \
macro(_EMPTY_MAP_offset, k, "EMPTY_MAP", map_signature, true)
void java_util_ImmutableCollections_MapN::compute_offsets() {
InstanceKlass* k = SystemDictionary::ImmutableCollections_MapN_klass();
assert(k != NULL, "must be loaded");
MAPN_FIELDS_DO(FIELD_COMPUTE_OFFSET);
}
#if INCLUDE_CDS
void java_util_ImmutableCollections_MapN::serialize(SerializeClosure* f) {
MAPN_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
}
#endif
// Compute hard-coded offsets
// Invoked before SystemDictionary::initialize, so pre-loaded classes
// are not available to determine the offset_of_static_fields.
@ -4527,6 +4453,7 @@ void JavaClasses::compute_hard_coded_offsets() {
java_lang_ref_Reference::discovered_offset = member_offset(java_lang_ref_Reference::hc_discovered_offset);
}
#define DO_COMPUTE_OFFSETS(k) k::compute_offsets();
// Compute non-hard-coded field offsets of all the classes in this file
void JavaClasses::compute_offsets() {
@ -4534,52 +4461,24 @@ void JavaClasses::compute_offsets() {
return; // field offsets are loaded from archive
}
// java_lang_Class::compute_offsets was called earlier in bootstrap
java_lang_System::compute_offsets();
java_lang_ClassLoader::compute_offsets();
java_lang_Throwable::compute_offsets();
java_lang_Thread::compute_offsets();
java_lang_ThreadGroup::compute_offsets();
java_lang_AssertionStatusDirectives::compute_offsets();
java_lang_ref_SoftReference::compute_offsets();
java_lang_invoke_MethodHandle::compute_offsets();
java_lang_invoke_DirectMethodHandle::compute_offsets();
java_lang_invoke_MemberName::compute_offsets();
java_lang_invoke_ResolvedMethodName::compute_offsets();
java_lang_invoke_LambdaForm::compute_offsets();
java_lang_invoke_MethodType::compute_offsets();
java_lang_invoke_CallSite::compute_offsets();
java_lang_invoke_MethodHandleNatives_CallSiteContext::compute_offsets();
java_security_AccessControlContext::compute_offsets();
// Initialize reflection classes. The layouts of these classes
// changed with the new reflection implementation in JDK 1.4, and
// since the Universe doesn't know what JDK version it is until this
// point we defer computation of these offsets until now.
java_lang_reflect_AccessibleObject::compute_offsets();
java_lang_reflect_Method::compute_offsets();
java_lang_reflect_Constructor::compute_offsets();
java_lang_reflect_Field::compute_offsets();
java_nio_Buffer::compute_offsets();
reflect_ConstantPool::compute_offsets();
reflect_UnsafeStaticFieldAccessorImpl::compute_offsets();
java_lang_reflect_Parameter::compute_offsets();
java_lang_Module::compute_offsets();
java_lang_StackTraceElement::compute_offsets();
java_lang_StackFrameInfo::compute_offsets();
java_lang_LiveStackFrameInfo::compute_offsets();
java_util_concurrent_locks_AbstractOwnableSynchronizer::compute_offsets();
java_lang_Integer_IntegerCache::compute_offsets();
java_lang_module_Configuration::compute_offsets();
java_util_ImmutableCollections_ListN::compute_offsets();
java_util_ImmutableCollections_MapN::compute_offsets();
java_util_ImmutableCollections_SetN::compute_offsets();
jdk_internal_module_ArchivedModuleGraph::compute_offsets();
// We have already called the compute_offsets() of the
// BASIC_JAVA_CLASSES_DO_PART1 classes (java_lang_String and java_lang_Class)
// earlier inside SystemDictionary::resolve_preloaded_classes()
BASIC_JAVA_CLASSES_DO_PART2(DO_COMPUTE_OFFSETS);
// generated interpreter code wants to know about the offsets we just computed:
AbstractAssembler::update_delayed_values();
}
#if INCLUDE_CDS
#define DO_SERIALIZE_OFFSETS(k) k::serialize_offsets(soc);
void JavaClasses::serialize_offsets(SerializeClosure* soc) {
BASIC_JAVA_CLASSES_DO(DO_SERIALIZE_OFFSETS);
}
#endif
#ifndef PRODUCT
// These functions exist to assert the validity of hard-coded field offsets to guard

View File

@ -47,6 +47,46 @@
// correspondingly. The names in the enums must be identical to the actual field
// names in order for the verification code to work.
#define BASIC_JAVA_CLASSES_DO_PART1(f) \
f(java_lang_Class) \
f(java_lang_String) \
//end
#define BASIC_JAVA_CLASSES_DO_PART2(f) \
f(java_lang_System) \
f(java_lang_ClassLoader) \
f(java_lang_Throwable) \
f(java_lang_Thread) \
f(java_lang_ThreadGroup) \
f(java_lang_AssertionStatusDirectives) \
f(java_lang_ref_SoftReference) \
f(java_lang_invoke_MethodHandle) \
f(java_lang_invoke_DirectMethodHandle) \
f(java_lang_invoke_MemberName) \
f(java_lang_invoke_ResolvedMethodName) \
f(java_lang_invoke_LambdaForm) \
f(java_lang_invoke_MethodType) \
f(java_lang_invoke_CallSite) \
f(java_lang_invoke_MethodHandleNatives_CallSiteContext) \
f(java_security_AccessControlContext) \
f(java_lang_reflect_AccessibleObject) \
f(java_lang_reflect_Method) \
f(java_lang_reflect_Constructor) \
f(java_lang_reflect_Field) \
f(java_nio_Buffer) \
f(reflect_ConstantPool) \
f(reflect_UnsafeStaticFieldAccessorImpl) \
f(java_lang_reflect_Parameter) \
f(java_lang_Module) \
f(java_lang_StackTraceElement) \
f(java_lang_StackFrameInfo) \
f(java_lang_LiveStackFrameInfo) \
f(java_util_concurrent_locks_AbstractOwnableSynchronizer) \
//end
#define BASIC_JAVA_CLASSES_DO(f) \
BASIC_JAVA_CLASSES_DO_PART1(f) \
BASIC_JAVA_CLASSES_DO_PART2(f)
// Interface to java.lang.String objects
@ -71,7 +111,7 @@ class java_lang_String : AllStatic {
};
static void compute_offsets();
static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
// Instance creation
static Handle create_from_unicode(jchar* unicode, int len, TRAPS);
@ -224,7 +264,7 @@ class java_lang_Class : AllStatic {
static oop create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS);
// Archiving
static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
static void archive_basic_type_mirrors(TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
static oop archive_mirror(Klass* k, TRAPS) NOT_CDS_JAVA_HEAP_RETURN_(NULL);
static oop process_archived_mirror(Klass* k, oop mirror, oop archived_mirror, Thread *THREAD)
@ -237,6 +277,7 @@ class java_lang_Class : AllStatic {
// Conversion
static Klass* as_Klass(oop java_class);
static Klass* as_Klass_raw(oop java_class);
static void set_klass(oop java_class, Klass* klass);
static BasicType as_BasicType(oop java_class, Klass** reference_klass = NULL);
static Symbol* as_signature(oop java_class, bool intern_if_not_found, TRAPS);
@ -270,8 +311,10 @@ class java_lang_Class : AllStatic {
static oop module(oop java_class);
static int oop_size(oop java_class);
static int oop_size_raw(oop java_class);
static void set_oop_size(HeapWord* java_class, int size);
static int static_oop_field_count(oop java_class);
static int static_oop_field_count_raw(oop java_class);
static void set_static_oop_field_count(oop java_class, int size);
static GrowableArray<Klass*>* fixup_mirror_list() {
@ -317,7 +360,7 @@ class java_lang_Thread : AllStatic {
static void compute_offsets();
public:
static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
// Instance creation
static oop create();
@ -419,7 +462,7 @@ class java_lang_ThreadGroup : AllStatic {
static void compute_offsets();
public:
static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
// parent ThreadGroup
static oop parent(oop java_thread_group);
@ -500,7 +543,7 @@ class java_lang_Throwable: AllStatic {
static void print_stack_usage(Handle stream);
static void compute_offsets();
static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
// Allocate space for backtrace (created but stack trace not filled in)
static void allocate_backtrace(Handle throwable, TRAPS);
@ -531,7 +574,7 @@ class java_lang_reflect_AccessibleObject: AllStatic {
static void compute_offsets();
public:
static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
// Accessors
static jboolean override(oop reflect);
@ -564,7 +607,7 @@ class java_lang_reflect_Method : public java_lang_reflect_AccessibleObject {
static void compute_offsets();
public:
static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
// Allocation
static Handle create(TRAPS);
@ -635,7 +678,7 @@ class java_lang_reflect_Constructor : public java_lang_reflect_AccessibleObject
static void compute_offsets();
public:
static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
// Allocation
static Handle create(TRAPS);
@ -695,7 +738,7 @@ class java_lang_reflect_Field : public java_lang_reflect_AccessibleObject {
static void compute_offsets();
public:
static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
// Allocation
static Handle create(TRAPS);
@ -752,7 +795,7 @@ class java_lang_reflect_Parameter {
static void compute_offsets();
public:
static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
// Allocation
static Handle create(TRAPS);
@ -784,7 +827,7 @@ class java_lang_Module {
static void compute_offsets();
public:
static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
// Allocation
static Handle create(Handle loader, Handle module_name, TRAPS);
@ -815,7 +858,7 @@ class reflect_ConstantPool {
static void compute_offsets();
public:
static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
// Allocation
static Handle create(TRAPS);
@ -839,7 +882,7 @@ class reflect_UnsafeStaticFieldAccessorImpl {
static void compute_offsets();
public:
static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
static int base_offset() {
return _base_offset;
@ -944,7 +987,7 @@ class java_lang_ref_SoftReference: public java_lang_ref_Reference {
static void set_clock(jlong value);
static void compute_offsets();
static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
};
// Interface to java.lang.invoke.MethodHandle objects
@ -961,7 +1004,7 @@ class java_lang_invoke_MethodHandle: AllStatic {
static void compute_offsets();
public:
static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
// Accessors
static oop type(oop mh);
@ -992,7 +1035,7 @@ class java_lang_invoke_DirectMethodHandle: AllStatic {
static void compute_offsets();
public:
static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
// Accessors
static oop member(oop mh);
@ -1019,7 +1062,7 @@ class java_lang_invoke_LambdaForm: AllStatic {
static void compute_offsets();
public:
static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
// Accessors
static oop vmentry(oop lform);
@ -1052,7 +1095,7 @@ class java_lang_invoke_ResolvedMethodName : AllStatic {
static void compute_offsets();
public:
static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
static int vmtarget_offset_in_bytes() { return _vmtarget_offset; }
@ -1091,7 +1134,7 @@ class java_lang_invoke_MemberName: AllStatic {
static void compute_offsets();
public:
static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
// Accessors
static oop clazz(oop mname);
static void set_clazz(oop mname, oop clazz);
@ -1156,7 +1199,7 @@ class java_lang_invoke_MethodType: AllStatic {
static void compute_offsets();
public:
static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
// Accessors
static oop rtype(oop mt);
static objArrayOop ptypes(oop mt);
@ -1192,7 +1235,7 @@ private:
static void compute_offsets();
public:
static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
// Accessors
static oop target( oop site);
static void set_target( oop site, oop target);
@ -1226,7 +1269,7 @@ private:
static void compute_offsets();
public:
static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
// Accessors
static DependencyContext vmdependencies(oop context);
@ -1250,7 +1293,7 @@ class java_security_AccessControlContext: AllStatic {
static void compute_offsets();
public:
static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
static oop create(objArrayHandle context, bool isPrivileged, Handle privileged_context, TRAPS);
static bool is_authorized(Handle context);
@ -1277,9 +1320,10 @@ class java_lang_ClassLoader : AllStatic {
public:
static void compute_offsets();
static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
static ClassLoaderData* loader_data(oop loader);
static ClassLoaderData* loader_data_raw(oop loader);
static ClassLoaderData* cmpxchg_loader_data(ClassLoaderData* new_data, oop loader, ClassLoaderData* expected_data);
static oop parent(oop loader);
@ -1330,7 +1374,7 @@ class java_lang_System : AllStatic {
static bool has_security_manager();
static void compute_offsets();
static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
// Debugging
friend class JavaClasses;
@ -1368,7 +1412,7 @@ class java_lang_StackTraceElement: AllStatic {
int version, int bci, Symbol* name, TRAPS);
static void compute_offsets();
static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
// Debugging
friend class JavaClasses;
@ -1412,7 +1456,7 @@ public:
static void set_version(oop info, short value);
static void compute_offsets();
static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
static void to_stack_trace_element(Handle stackFrame, Handle stack_trace_element, TRAPS);
@ -1434,7 +1478,7 @@ class java_lang_LiveStackFrameInfo: AllStatic {
static void set_mode(oop info, int value);
static void compute_offsets();
static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
// Debugging
friend class JavaClasses;
@ -1459,7 +1503,7 @@ class java_lang_AssertionStatusDirectives: AllStatic {
static void set_deflt(oop obj, bool val);
static void compute_offsets();
static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
// Debugging
friend class JavaClasses;
@ -1473,7 +1517,7 @@ class java_nio_Buffer: AllStatic {
public:
static int limit_offset();
static void compute_offsets();
static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
};
class java_util_concurrent_locks_AbstractOwnableSynchronizer : AllStatic {
@ -1482,67 +1526,7 @@ class java_util_concurrent_locks_AbstractOwnableSynchronizer : AllStatic {
public:
static void compute_offsets();
static oop get_owner_threadObj(oop obj);
static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
};
class java_lang_Integer_IntegerCache: AllStatic {
private:
static int _archivedCache_offset;
public:
static int archivedCache_offset() { return _archivedCache_offset; }
static void compute_offsets();
static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
};
class jdk_internal_module_ArchivedModuleGraph: AllStatic {
private:
static int _archivedSystemModules_offset;
static int _archivedModuleFinder_offset;
static int _archivedMainModule_offset;
static int _archivedConfiguration_offset;
public:
static int archivedSystemModules_offset() { return _archivedSystemModules_offset; }
static int archivedModuleFinder_offset() { return _archivedModuleFinder_offset; }
static int archivedMainModule_offset() { return _archivedMainModule_offset; }
static int archivedConfiguration_offset() { return _archivedConfiguration_offset; }
static void compute_offsets();
static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
};
class java_lang_module_Configuration: AllStatic {
private:
static int _EMPTY_CONFIGURATION_offset;
public:
static int EMPTY_CONFIGURATION_offset() { return _EMPTY_CONFIGURATION_offset; }
static void compute_offsets();
static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
};
class java_util_ImmutableCollections_ListN : AllStatic {
private:
static int _EMPTY_LIST_offset;
public:
static int EMPTY_LIST_offset() { return _EMPTY_LIST_offset; }
static void compute_offsets();
static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
};
class java_util_ImmutableCollections_SetN : AllStatic {
private:
static int _EMPTY_SET_offset;
public:
static int EMPTY_SET_offset() { return _EMPTY_SET_offset; }
static void compute_offsets();
static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
};
class java_util_ImmutableCollections_MapN : AllStatic {
private:
static int _EMPTY_MAP_offset;
public:
static int EMPTY_MAP_offset() { return _EMPTY_MAP_offset; }
static void compute_offsets();
static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
};
// Use to declare fields that need to be injected into Java classes
@ -1605,7 +1589,7 @@ class JavaClasses : AllStatic {
static void compute_hard_coded_offsets();
static void compute_offsets();
static void check_offsets() PRODUCT_RETURN;
static void serialize_offsets(SerializeClosure* soc) NOT_CDS_RETURN;
static InjectedField* get_injected(Symbol* class_name, int* field_count);
};

View File

@ -183,7 +183,7 @@ InstanceKlass* KlassFactory::create_from_stream(ClassFileStream* stream,
Symbol* name,
ClassLoaderData* loader_data,
Handle protection_domain,
const InstanceKlass* host_klass,
const InstanceKlass* unsafe_anonymous_host,
GrowableArray<Handle>* cp_patches,
TRAPS) {
assert(stream != NULL, "invariant");
@ -201,7 +201,7 @@ InstanceKlass* KlassFactory::create_from_stream(ClassFileStream* stream,
THREAD->statistical_info().incr_define_class_count();
// Skip this processing for VM anonymous classes
if (host_klass == NULL) {
if (unsafe_anonymous_host == NULL) {
stream = check_class_file_load_hook(stream,
name,
loader_data,
@ -214,7 +214,7 @@ InstanceKlass* KlassFactory::create_from_stream(ClassFileStream* stream,
name,
loader_data,
protection_domain,
host_klass,
unsafe_anonymous_host,
cp_patches,
ClassFileParser::BROADCAST, // publicity level
CHECK_NULL);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -72,7 +72,7 @@ class KlassFactory : AllStatic {
Symbol* name,
ClassLoaderData* loader_data,
Handle protection_domain,
const InstanceKlass* host_klass,
const InstanceKlass* unsafe_anonymous_host,
GrowableArray<Handle>* cp_patches,
TRAPS);
public:

View File

@ -110,7 +110,7 @@ public:
ClassLoaderData* loader_data() const { return _loader_data; }
void set_loader_data(ClassLoaderData* cld) {
assert(!cld->is_anonymous(), "Unexpected anonymous class loader data");
assert(!cld->is_unsafe_anonymous(), "Unexpected unsafe anonymous class loader data");
_loader_data = cld;
}

View File

@ -65,9 +65,10 @@ void ResolutionErrorEntry::set_error(Symbol* e) {
}
void ResolutionErrorEntry::set_message(Symbol* c) {
assert(c != NULL, "must set a value");
_message = c;
_message->increment_refcount();
if (_message != NULL) {
_message->increment_refcount();
}
}
// create new error entry
@ -87,7 +88,9 @@ void ResolutionErrorTable::free_entry(ResolutionErrorEntry *entry) {
// decrement error refcount
assert(entry->error() != NULL, "error should be set");
entry->error()->decrement_refcount();
entry->message()->decrement_refcount();
if (entry->message() != NULL) {
entry->message()->decrement_refcount();
}
Hashtable<ConstantPool*, mtClass>::free_entry(entry);
}

View File

@ -115,10 +115,15 @@ bool SharedPathsMiscInfo::check() {
return fail("Corrupted archive file header");
}
jshort cur_index = 0;
jshort max_cp_index = FileMapInfo::current_info()->header()->max_used_path_index();
jshort module_paths_start_index =
FileMapInfo::current_info()->header()->app_module_paths_start_index();
while (_cur_ptr < _end_ptr) {
jint type;
const char* path = _cur_ptr;
_cur_ptr += strlen(path) + 1;
if (!read_jint(&type)) {
return fail("Corrupted archive file header");
}
@ -129,13 +134,19 @@ bool SharedPathsMiscInfo::check() {
print_path(&ls, type, path);
ls.cr();
}
if (!check(type, path)) {
if (!PrintSharedArchiveAndExit) {
return false;
// skip checking the class path(s) which was not referenced during CDS dump
if ((cur_index <= max_cp_index) || (cur_index >= module_paths_start_index)) {
if (!check(type, path)) {
if (!PrintSharedArchiveAndExit) {
return false;
}
} else {
ClassLoader::trace_class_path("ok");
}
} else {
ClassLoader::trace_class_path("ok");
ClassLoader::trace_class_path("skipped check");
}
cur_index++;
}
return true;

View File

@ -64,9 +64,9 @@
// --------------------------------------------------------------------------
StringTable* StringTable::_the_table = NULL;
bool StringTable::_shared_string_mapped = false;
CompactHashtable<oop, char> StringTable::_shared_table;
bool StringTable::_alt_hash = false;
volatile bool StringTable::_shared_string_mapped = false;
volatile bool StringTable::_alt_hash = false;
static juint murmur_seed = 0;
@ -176,18 +176,18 @@ class StringTableLookupOop : public StackObj {
}
};
static size_t ceil_pow_2(uintx val) {
static size_t ceil_log2(size_t val) {
size_t ret;
for (ret = 1; ((size_t)1 << ret) < val; ++ret);
return ret;
}
StringTable::StringTable() : _local_table(NULL), _current_size(0), _has_work(0),
_needs_rehashing(false), _weak_handles(NULL), _items(0), _uncleaned_items(0) {
_needs_rehashing(false), _weak_handles(NULL), _items_count(0), _uncleaned_items_count(0) {
_weak_handles = new OopStorage("StringTable weak",
StringTableWeakAlloc_lock,
StringTableWeakActive_lock);
size_t start_size_log_2 = ceil_pow_2(StringTableSize);
size_t start_size_log_2 = ceil_log2(StringTableSize);
_current_size = ((size_t)1) << start_size_log_2;
log_trace(stringtable)("Start size: " SIZE_FORMAT " (" SIZE_FORMAT ")",
_current_size, start_size_log_2);
@ -195,32 +195,31 @@ StringTable::StringTable() : _local_table(NULL), _current_size(0), _has_work(0),
}
size_t StringTable::item_added() {
return Atomic::add((size_t)1, &(the_table()->_items));
return Atomic::add((size_t)1, &(the_table()->_items_count));
}
size_t StringTable::add_items_to_clean(size_t ndead) {
size_t total = Atomic::add((size_t)ndead, &(the_table()->_uncleaned_items));
size_t StringTable::add_items_count_to_clean(size_t ndead) {
size_t total = Atomic::add((size_t)ndead, &(the_table()->_uncleaned_items_count));
log_trace(stringtable)(
"Uncleaned items:" SIZE_FORMAT " added: " SIZE_FORMAT " total:" SIZE_FORMAT,
the_table()->_uncleaned_items, ndead, total);
the_table()->_uncleaned_items_count, ndead, total);
return total;
}
void StringTable::item_removed() {
Atomic::add((size_t)-1, &(the_table()->_items));
Atomic::add((size_t)-1, &(the_table()->_items_count));
}
double StringTable::get_load_factor() {
return (_items*1.0)/_current_size;
return (double)_items_count/_current_size;
}
double StringTable::get_dead_factor() {
return (_uncleaned_items*1.0)/_current_size;
return (double)_uncleaned_items_count/_current_size;
}
size_t StringTable::table_size(Thread* thread) {
return ((size_t)(1)) << _local_table->get_size_log2(thread != NULL ? thread
: Thread::current());
size_t StringTable::table_size() {
return ((size_t)1) << _local_table->get_size_log2(Thread::current());
}
void StringTable::trigger_concurrent_work() {
@ -406,7 +405,7 @@ void StringTable::unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f,
// This is the serial case without ParState.
// Just set the correct number and check for a cleaning phase.
the_table()->_uncleaned_items = stiac._count;
the_table()->_uncleaned_items_count = stiac._count;
StringTable::the_table()->check_concurrent_work();
if (processed != NULL) {
@ -433,7 +432,7 @@ void StringTable::possibly_parallel_unlink(
_par_state_string->weak_oops_do(&stiac, &dnc);
// Accumulate the dead strings.
the_table()->add_items_to_clean(stiac._count);
the_table()->add_items_count_to_clean(stiac._count);
*processed = (int) stiac._count_total;
*removed = (int) stiac._count;
@ -465,7 +464,7 @@ void StringTable::grow(JavaThread* jt) {
}
}
gt.done(jt);
_current_size = table_size(jt);
_current_size = table_size();
log_debug(stringtable)("Grown to size:" SIZE_FORMAT, _current_size);
}
@ -843,7 +842,7 @@ void StringTable::write_to_archive() {
assert(MetaspaceShared::is_heap_object_archiving_allowed(), "must be");
_shared_table.reset();
int num_buckets = the_table()->_items / SharedSymbolTableBucketSize;
int num_buckets = the_table()->_items_count / SharedSymbolTableBucketSize;
// calculation of num_buckets can result in zero buckets, we need at least one
CompactStringTableWriter writer(num_buckets > 1 ? num_buckets : 1,
&MetaspaceShared::stats()->string);

View File

@ -58,21 +58,22 @@ private:
static StringTable* _the_table;
// Shared string table
static CompactHashtable<oop, char> _shared_table;
static bool _shared_string_mapped;
static bool _alt_hash;
static volatile bool _shared_string_mapped;
static volatile bool _alt_hash;
private:
// Set if one bucket is out of balance due to hash algorithm deficiency
StringTableHash* _local_table;
size_t _current_size;
volatile bool _has_work;
// Set if one bucket is out of balance due to hash algorithm deficiency
volatile bool _needs_rehashing;
OopStorage* _weak_handles;
volatile size_t _items;
volatile size_t _items_count;
DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t));
volatile size_t _uncleaned_items;
volatile size_t _uncleaned_items_count;
DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t));
double get_load_factor();
@ -83,7 +84,7 @@ private:
static size_t item_added();
static void item_removed();
size_t add_items_to_clean(size_t ndead);
size_t add_items_count_to_clean(size_t ndead);
StringTable();
@ -100,7 +101,7 @@ private:
public:
// The string table
static StringTable* the_table() { return _the_table; }
size_t table_size(Thread* thread = NULL);
size_t table_size();
static OopStorage* weak_storage() { return the_table()->_weak_handles; }
@ -116,7 +117,7 @@ private:
// Must be called before a parallel walk where strings might die.
static void reset_dead_counter() {
the_table()->_uncleaned_items = 0;
the_table()->_uncleaned_items_count = 0;
}
// After the parallel walk this method must be called to trigger
// cleaning. Note it might trigger a resize instead.
@ -127,7 +128,7 @@ private:
// If GC uses ParState directly it should add the number of cleared
// strings to this method.
static void inc_dead_counter(size_t ndead) {
the_table()->add_items_to_clean(ndead);
the_table()->add_items_count_to_clean(ndead);
}
// Delete pointers to otherwise-unreachable objects.

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,23 +26,11 @@
#define SHARE_VM_CLASSFILE_SYMBOLTABLE_HPP
#include "memory/allocation.hpp"
#include "memory/padded.hpp"
#include "oops/symbol.hpp"
#include "utilities/concurrentHashTable.hpp"
#include "utilities/hashtable.hpp"
// The symbol table holds all Symbol*s and corresponding interned strings.
// Symbol*s and literal strings should be canonicalized.
//
// The interned strings are created lazily.
//
// It is implemented as an open hash table with a fixed number of buckets.
//
// %note:
// - symbolTableEntrys are allocated in blocks to reduce the space overhead.
class BoolObjectClosure;
class outputStream;
class SerializeClosure;
// TempNewSymbol acts as a handle class in a handle/body idiom and is
// responsible for proper resource management of the body (which is a Symbol*).
// The body is resource managed by a reference counting scheme.
@ -59,7 +47,7 @@ class SerializeClosure;
class TempNewSymbol : public StackObj {
Symbol* _temp;
public:
public:
TempNewSymbol() : _temp(NULL) {}
// Conversion from a Symbol* to a TempNewSymbol.
@ -97,35 +85,69 @@ class TempNewSymbol : public StackObj {
};
template <class T, class N> class CompactHashtable;
class CompactSymbolTableWriter;
class SerializeClosure;
class SymbolTable : public RehashableHashtable<Symbol*, mtSymbol> {
class SymbolTableConfig;
typedef ConcurrentHashTable<Symbol*,
SymbolTableConfig, mtSymbol> SymbolTableHash;
class SymbolTableCreateEntry;
class SymbolTable : public CHeapObj<mtSymbol> {
friend class VMStructs;
friend class Symbol;
friend class ClassFileParser;
friend class SymbolTableConfig;
friend class SymbolTableCreateEntry;
private:
static void delete_symbol(Symbol* sym);
void grow(JavaThread* jt);
void clean_dead_entries(JavaThread* jt);
// The symbol table
static SymbolTable* _the_table;
// Set if one bucket is out of balance due to hash algorithm deficiency
static bool _needs_rehashing;
static bool _lookup_shared_first;
// Shared symbol table.
static CompactHashtable<Symbol*, char> _shared_table;
static volatile bool _lookup_shared_first;
static volatile bool _alt_hash;
// For statistics
static int _symbols_removed;
static int _symbols_counted;
volatile size_t _symbols_removed;
volatile size_t _symbols_counted;
// shared symbol table.
static CompactHashtable<Symbol*, char> _shared_table;
SymbolTableHash* _local_table;
size_t _current_size;
volatile bool _has_work;
// Set if one bucket is out of balance due to hash algorithm deficiency
volatile bool _needs_rehashing;
Symbol* allocate_symbol(const u1* name, int len, bool c_heap, TRAPS); // Assumes no characters larger than 0x7F
volatile size_t _items_count;
volatile size_t _uncleaned_items_count;
double get_load_factor();
double get_dead_factor();
void check_concurrent_work();
void trigger_concurrent_work();
static void item_added();
static void item_removed();
static void set_item_clean_count(size_t ncl);
static void mark_item_clean_count();
SymbolTable();
Symbol* allocate_symbol(const char* name, int len, bool c_heap, TRAPS); // Assumes no characters larger than 0x7F
Symbol* do_lookup(const char* name, int len, uintx hash);
Symbol* do_add_if_needed(const char* name, int len, uintx hash, bool heap, TRAPS);
// Adding elements
Symbol* basic_add(int index, u1* name, int len, unsigned int hashValue,
bool c_heap, TRAPS);
bool basic_add(ClassLoaderData* loader_data,
const constantPoolHandle& cp, int names_count,
const char** names, int* lengths, int* cp_indices,
unsigned int* hashValues, TRAPS);
static void add(ClassLoaderData* loader_data,
const constantPoolHandle& cp, int names_count,
const char** names, int* lengths, int* cp_indices,
unsigned int* hashValues, TRAPS);
static void new_symbols(ClassLoaderData* loader_data,
const constantPoolHandle& cp, int names_count,
@ -136,15 +158,8 @@ private:
}
static Symbol* lookup_shared(const char* name, int len, unsigned int hash);
Symbol* lookup_dynamic(int index, const char* name, int len, unsigned int hash);
Symbol* lookup(int index, const char* name, int len, unsigned int hash);
SymbolTable()
: RehashableHashtable<Symbol*, mtSymbol>(SymbolTableSize, sizeof (HashtableEntry<Symbol*, mtSymbol>)) {}
SymbolTable(HashtableBucket<mtSymbol>* t, int number_of_entries)
: RehashableHashtable<Symbol*, mtSymbol>(SymbolTableSize, sizeof (HashtableEntry<Symbol*, mtSymbol>), t,
number_of_entries) {}
Symbol* lookup_dynamic(const char* name, int len, unsigned int hash);
Symbol* lookup_common(const char* name, int len, unsigned int hash);
// Arena for permanent symbols (null class loader) that are never unloaded
static Arena* _arena;
@ -152,88 +167,45 @@ private:
static void initialize_symbols(int arena_alloc_size = 0);
static volatile int _parallel_claimed_idx;
void concurrent_work(JavaThread* jt);
void print_table_statistics(outputStream* st, const char* table_name);
void try_rehash_table();
bool do_rehash();
typedef SymbolTable::BucketUnlinkContext BucketUnlinkContext;
// Release any dead symbols. Unlinked bucket entries are collected in the given
// context to be freed later.
// This allows multiple threads to work on the table at once.
static void buckets_unlink(int start_idx, int end_idx, BucketUnlinkContext* context);
public:
// The symbol table
static SymbolTable* the_table() { return _the_table; }
size_t table_size();
enum {
symbol_alloc_batch_size = 8,
// Pick initial size based on java -version size measurements
symbol_alloc_arena_size = 360*K
symbol_alloc_arena_size = 360*K // TODO (revisit)
};
// The symbol table
static SymbolTable* the_table() { return _the_table; }
// Size of one bucket in the string table. Used when checking for rollover.
static uint bucket_size() { return sizeof(HashtableBucket<mtSymbol>); }
static void create_table() {
assert(_the_table == NULL, "One symbol table allowed.");
_the_table = new SymbolTable();
initialize_symbols(symbol_alloc_arena_size);
}
static unsigned int hash_symbol(const char* s, int len);
static unsigned int hash_shared_symbol(const char* s, int len);
static void unlink() {
do_check_concurrent_work();
}
static void do_check_concurrent_work();
static void do_concurrent_work(JavaThread* jt);
static bool has_work() { return the_table()->_has_work; }
// Probing
static Symbol* lookup(const char* name, int len, TRAPS);
// lookup only, won't add. Also calculate hash.
static Symbol* lookup_only(const char* name, int len, unsigned int& hash);
// Only copy to C string to be added if lookup failed.
// adds new symbol if not found
static Symbol* lookup(const Symbol* sym, int begin, int end, TRAPS);
static void release(Symbol* sym);
// Look up the address of the literal in the SymbolTable for this Symbol*
static Symbol** lookup_symbol_addr(Symbol* sym);
// jchar (UTF16) version of lookups
static Symbol* lookup_unicode(const jchar* name, int len, TRAPS);
static Symbol* lookup_only_unicode(const jchar* name, int len, unsigned int& hash);
static void add(ClassLoaderData* loader_data,
const constantPoolHandle& cp, int names_count,
const char** names, int* lengths, int* cp_indices,
unsigned int* hashValues, TRAPS);
// Release any dead symbols
static void unlink() {
int processed = 0;
int removed = 0;
unlink(&processed, &removed);
}
static void unlink(int* processed, int* removed);
// Release any dead symbols, possibly parallel version
static void possibly_parallel_unlink(int* processed, int* removed);
// iterate over symbols
static void symbols_do(SymbolClosure *cl);
static void metaspace_pointers_do(MetaspaceClosure* it);
// Symbol creation
static Symbol* new_symbol(const char* utf8_buffer, int length, TRAPS) {
assert(utf8_buffer != NULL, "just checking");
return lookup(utf8_buffer, length, THREAD);
}
static Symbol* new_symbol(const char* name, TRAPS) {
return new_symbol(name, (int)strlen(name), THREAD);
}
static Symbol* new_symbol(const Symbol* sym, int begin, int end, TRAPS) {
assert(begin <= end && end <= sym->utf8_length(), "just checking");
return lookup(sym, begin, end, THREAD);
}
// Create a symbol in the arena for symbols that are not deleted
static Symbol* new_permanent_symbol(const char* name, TRAPS);
// Symbol lookup
static Symbol* lookup(int index, const char* name, int len, TRAPS);
// Needed for preloading classes in signatures when compiling.
// Returns the symbol is already present in symbol table, otherwise
// NULL. NO ALLOCATION IS GUARANTEED!
@ -246,27 +218,45 @@ public:
return lookup_only_unicode(name, len, ignore_hash);
}
// Histogram
static void print_histogram() PRODUCT_RETURN;
static void print() PRODUCT_RETURN;
// Symbol creation
static Symbol* new_symbol(const char* utf8_buffer, int length, TRAPS) {
assert(utf8_buffer != NULL, "just checking");
return lookup(utf8_buffer, length, THREAD);
}
static Symbol* new_symbol(const char* name, TRAPS) {
return new_symbol(name, (int)strlen(name), THREAD);
}
static Symbol* new_symbol(const Symbol* sym, int begin, int end, TRAPS) {
assert(begin <= end && end <= sym->utf8_length(), "just checking");
return lookup(sym, begin, end, THREAD);
}
// Create a symbol in the arena for symbols that are not deleted
static Symbol* new_permanent_symbol(const char* name, TRAPS);
// Debugging
static void verify();
static void dump(outputStream* st, bool verbose=false);
static void read(const char* filename, TRAPS);
// Rehash the string table if it gets out of balance
static void rehash_table();
static bool needs_rehashing()
{ return SymbolTable::the_table()->_needs_rehashing; }
// Heap dumper and CDS
static void symbols_do(SymbolClosure *cl);
// Sharing
static void write_to_archive();
static void serialize(SerializeClosure* soc);
static u4 encode_shared(Symbol* sym);
static Symbol* decode_shared(u4 offset);
private:
static void copy_shared_symbol_table(CompactSymbolTableWriter* ch_table);
public:
static void write_to_archive() NOT_CDS_RETURN;
static void serialize(SerializeClosure* soc) NOT_CDS_RETURN;
static void metaspace_pointers_do(MetaspaceClosure* it);
// Rehash the symbol table if it gets out of balance
static void rehash_table();
static bool needs_rehashing() { return _needs_rehashing; }
// Parallel chunked scanning
static void clear_parallel_claimed_index() { _parallel_claimed_idx = 0; }
static int parallel_claimed_index() { return _parallel_claimed_idx; }
// Jcmd
static void dump(outputStream* st, bool verbose=false);
// Debugging
static void verify();
static void read(const char* filename, TRAPS);
// Histogram
static void print_histogram() PRODUCT_RETURN;
};
#endif // SHARE_VM_CLASSFILE_SYMBOLTABLE_HPP

View File

@ -988,18 +988,18 @@ InstanceKlass* SystemDictionary::parse_stream(Symbol* class_name,
Handle class_loader,
Handle protection_domain,
ClassFileStream* st,
const InstanceKlass* host_klass,
const InstanceKlass* unsafe_anonymous_host,
GrowableArray<Handle>* cp_patches,
TRAPS) {
EventClassLoad class_load_start_event;
ClassLoaderData* loader_data;
if (host_klass != NULL) {
// Create a new CLD for anonymous class, that uses the same class loader
// as the host_klass
guarantee(oopDesc::equals(host_klass->class_loader(), class_loader()), "should be the same");
loader_data = ClassLoaderData::anonymous_class_loader_data(class_loader);
if (unsafe_anonymous_host != NULL) {
// Create a new CLD for an unsafe anonymous class, that uses the same class loader
// as the unsafe_anonymous_host
guarantee(oopDesc::equals(unsafe_anonymous_host->class_loader(), class_loader()), "should be the same");
loader_data = ClassLoaderData::unsafe_anonymous_class_loader_data(class_loader);
} else {
loader_data = ClassLoaderData::class_loader_data(class_loader());
}
@ -1016,12 +1016,12 @@ InstanceKlass* SystemDictionary::parse_stream(Symbol* class_name,
class_name,
loader_data,
protection_domain,
host_klass,
unsafe_anonymous_host,
cp_patches,
CHECK_NULL);
if (host_klass != NULL && k != NULL) {
// Anonymous classes must update ClassLoaderData holder (was host_klass loader)
if (unsafe_anonymous_host != NULL && k != NULL) {
// Unsafe anonymous classes must update ClassLoaderData holder (was unsafe_anonymous_host loader)
// so that they can be unloaded when the mirror is no longer referenced.
k->class_loader_data()->initialize_holder(Handle(THREAD, k->java_mirror()));
@ -1056,8 +1056,8 @@ InstanceKlass* SystemDictionary::parse_stream(Symbol* class_name,
post_class_load_event(&class_load_start_event, k, loader_data);
}
}
assert(host_klass != NULL || NULL == cp_patches,
"cp_patches only found with host_klass");
assert(unsafe_anonymous_host != NULL || NULL == cp_patches,
"cp_patches only found with unsafe_anonymous_host");
return k;
}
@ -1115,7 +1115,7 @@ InstanceKlass* SystemDictionary::resolve_from_stream(Symbol* class_name,
class_name,
loader_data,
protection_domain,
NULL, // host_klass
NULL, // unsafe_anonymous_host
NULL, // cp_patches
CHECK_NULL);
}
@ -1160,10 +1160,12 @@ InstanceKlass* SystemDictionary::resolve_from_stream(Symbol* class_name,
#if INCLUDE_CDS
void SystemDictionary::set_shared_dictionary(HashtableBucket<mtClass>* t, int length,
int number_of_entries) {
assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces");
assert(length == _shared_dictionary_size * sizeof(HashtableBucket<mtClass>),
"bad shared dictionary size.");
_shared_dictionary = new Dictionary(ClassLoaderData::the_null_class_loader_data(),
_shared_dictionary_size, t, number_of_entries);
_shared_dictionary_size, t, number_of_entries,
false /* explicitly set _resizable to false */);
}
@ -1858,10 +1860,19 @@ bool SystemDictionary::do_unloading(GCTimer* gc_timer,
}
}
// TODO: just return if !unloading_occurred.
if (unloading_occurred) {
GCTraceTime(Debug, gc, phases) t("Dictionary", gc_timer);
constraints()->purge_loader_constraints();
resolution_errors()->purge_resolution_errors();
{
GCTraceTime(Debug, gc, phases) t("SymbolTable", gc_timer);
// Check if there's work to do in the SymbolTable
SymbolTable::do_check_concurrent_work();
}
{
GCTraceTime(Debug, gc, phases) t("Dictionary", gc_timer);
constraints()->purge_loader_constraints();
resolution_errors()->purge_resolution_errors();
}
}
{
@ -1874,7 +1885,7 @@ bool SystemDictionary::do_unloading(GCTimer* gc_timer,
if (do_cleaning) {
GCTraceTime(Debug, gc, phases) t("ResolvedMethodTable", gc_timer);
ResolvedMethodTable::unlink();
ResolvedMethodTable::trigger_cleanup();
}
return unloading_occurred;
@ -3001,7 +3012,7 @@ class CombineDictionariesClosure : public CLDClosure {
_master_dictionary(master_dictionary) {}
void do_cld(ClassLoaderData* cld) {
ResourceMark rm;
if (cld->is_anonymous()) {
if (cld->is_unsafe_anonymous()) {
return;
}
if (cld->is_system_class_loader_data() || cld->is_platform_class_loader_data()) {

View File

@ -187,11 +187,6 @@ class OopStorage;
do_klass(jdk_internal_loader_ClassLoaders_AppClassLoader_klass, jdk_internal_loader_ClassLoaders_AppClassLoader, Pre ) \
do_klass(jdk_internal_loader_ClassLoaders_PlatformClassLoader_klass, jdk_internal_loader_ClassLoaders_PlatformClassLoader, Pre ) \
do_klass(CodeSource_klass, java_security_CodeSource, Pre ) \
do_klass(Configuration_klass, java_lang_module_Configuration, Pre ) \
do_klass(ImmutableCollections_ListN_klass, java_util_ImmutableCollections_ListN, Pre ) \
do_klass(ImmutableCollections_MapN_klass, java_util_ImmutableCollections_MapN, Pre ) \
do_klass(ImmutableCollections_SetN_klass, java_util_ImmutableCollections_SetN, Pre ) \
do_klass(ArchivedModuleGraph_klass, jdk_internal_module_ArchivedModuleGraph, Pre ) \
\
do_klass(StackTraceElement_klass, java_lang_StackTraceElement, Opt ) \
\
@ -215,7 +210,6 @@ class OopStorage;
do_klass(Byte_klass, java_lang_Byte, Pre ) \
do_klass(Short_klass, java_lang_Short, Pre ) \
do_klass(Integer_klass, java_lang_Integer, Pre ) \
do_klass(Integer_IntegerCache_klass, java_lang_Integer_IntegerCache, Pre ) \
do_klass(Long_klass, java_lang_Long, Pre ) \
\
/* JVMCI classes. These are loaded on-demand. */ \
@ -304,7 +298,7 @@ public:
class_loader,
protection_domain,
st,
NULL, // host klass
NULL, // unsafe_anonymous_host
NULL, // cp_patches
THREAD);
}
@ -312,7 +306,7 @@ public:
Handle class_loader,
Handle protection_domain,
ClassFileStream* st,
const InstanceKlass* host_klass,
const InstanceKlass* unsafe_anonymous_host,
GrowableArray<Handle>* cp_patches,
TRAPS);

View File

@ -755,11 +755,11 @@ bool SystemDictionaryShared::add_verification_constraint(InstanceKlass* k, Symbo
Symbol* from_name, bool from_field_is_protected, bool from_is_array, bool from_is_object) {
assert(DumpSharedSpaces, "called at dump time only");
// Skip anonymous classes, which are not archived as they are not in
// dictionary (see assert_no_anonymoys_classes_in_dictionaries() in
// Skip unsafe anonymous classes, which are not archived as they are not in
// dictionary (see assert_no_unsafe_anonymous_classes_in_dictionaries() in
// VM_PopulateDumpSharedSpace::doit()).
if (k->class_loader_data()->is_anonymous()) {
return true; // anonymous classes are not archived, skip
if (k->class_loader_data()->is_unsafe_anonymous()) {
return true; // unsafe anonymous classes are not archived, skip
}
SharedDictionaryEntry* entry = ((SharedDictionary*)(k->class_loader_data()->dictionary()))->find_entry_for(k);

View File

@ -293,9 +293,6 @@ public:
static void allocate_shared_data_arrays(int size, TRAPS);
static void oops_do(OopClosure* f);
static void roots_oops_do(OopClosure* f) {
oops_do(f);
}
// Check if sharing is supported for the class loader.
static bool is_sharing_possible(ClassLoaderData* loader_data);

View File

@ -2823,20 +2823,20 @@ void ClassVerifier::verify_invoke_instructions(
current_class()->super()->name()))) {
bool subtype = false;
bool have_imr_indirect = cp->tag_at(index).value() == JVM_CONSTANT_InterfaceMethodref;
if (!current_class()->is_anonymous()) {
if (!current_class()->is_unsafe_anonymous()) {
subtype = ref_class_type.is_assignable_from(
current_type(), this, false, CHECK_VERIFY(this));
} else {
VerificationType host_klass_type =
VerificationType::reference_type(current_class()->host_klass()->name());
subtype = ref_class_type.is_assignable_from(host_klass_type, this, false, CHECK_VERIFY(this));
VerificationType unsafe_anonymous_host_type =
VerificationType::reference_type(current_class()->unsafe_anonymous_host()->name());
subtype = ref_class_type.is_assignable_from(unsafe_anonymous_host_type, this, false, CHECK_VERIFY(this));
// If invokespecial of IMR, need to recheck for same or
// direct interface relative to the host class
have_imr_indirect = (have_imr_indirect &&
!is_same_or_direct_interface(
current_class()->host_klass(),
host_klass_type, ref_class_type));
current_class()->unsafe_anonymous_host(),
unsafe_anonymous_host_type, ref_class_type));
}
if (!subtype) {
verify_error(ErrorContext::bad_code(bci),
@ -2866,15 +2866,15 @@ void ClassVerifier::verify_invoke_instructions(
} else { // other methods
// Ensures that target class is assignable to method class.
if (opcode == Bytecodes::_invokespecial) {
if (!current_class()->is_anonymous()) {
if (!current_class()->is_unsafe_anonymous()) {
current_frame->pop_stack(current_type(), CHECK_VERIFY(this));
} else {
// anonymous class invokespecial calls: check if the
// objectref is a subtype of the host_klass of the current class
// to allow an anonymous class to reference methods in the host_klass
// objectref is a subtype of the unsafe_anonymous_host of the current class
// to allow an anonymous class to reference methods in the unsafe_anonymous_host
VerificationType top = current_frame->pop_stack(CHECK_VERIFY(this));
VerificationType hosttype =
VerificationType::reference_type(current_class()->host_klass()->name());
VerificationType::reference_type(current_class()->unsafe_anonymous_host()->name());
bool subtype = hosttype.is_assignable_from(top, this, false, CHECK_VERIFY(this));
if (!subtype) {
verify_error( ErrorContext::bad_type(current_frame->offset(),

View File

@ -124,7 +124,6 @@
template(getBootClassPathEntryForClass_name, "getBootClassPathEntryForClass") \
template(jdk_internal_vm_PostVMInitHook, "jdk/internal/vm/PostVMInitHook") \
template(sun_net_www_ParseUtil, "sun/net/www/ParseUtil") \
template(jdk_internal_module_ArchivedModuleGraph, "jdk/internal/module/ArchivedModuleGraph") \
\
template(jdk_internal_loader_ClassLoaders_AppClassLoader, "jdk/internal/loader/ClassLoaders$AppClassLoader") \
template(jdk_internal_loader_ClassLoaders_PlatformClassLoader, "jdk/internal/loader/ClassLoaders$PlatformClassLoader") \
@ -649,17 +648,7 @@
JFR_TEMPLATES(template) \
\
/* cds */ \
template(configuration_signature, "Ljava/lang/module/Configuration;") \
template(java_lang_module_Configuration, "java/lang/module/Configuration") \
template(java_util_ImmutableCollections_ListN, "java/util/ImmutableCollections$ListN") \
template(java_util_ImmutableCollections_MapN, "java/util/ImmutableCollections$MapN") \
template(java_util_ImmutableCollections_SetN, "java/util/ImmutableCollections$SetN") \
template(jdk_internal_loader_ClassLoaders, "jdk/internal/loader/ClassLoaders") \
template(list_signature, "Ljava/util/List;") \
template(map_signature, "Ljava/util/Map;") \
template(moduleFinder_signature, "Ljava/lang/module/ModuleFinder;") \
template(set_signature, "Ljava/util/Set;") \
template(systemModules_signature, "Ljdk/internal/module/SystemModules;") \
template(toFileURL_name, "toFileURL") \
template(toFileURL_signature, "(Ljava/lang/String;)Ljava/net/URL;") \
template(url_void_signature, "(Ljava/net/URL;)V") \

View File

@ -202,7 +202,7 @@ public:
virtual address verified_entry_point() const = 0;
virtual void log_identity(xmlStream* log) const = 0;
virtual void log_state_change() const = 0;
virtual void log_state_change(oop cause = NULL) const = 0;
virtual bool make_not_used() = 0;
virtual bool make_not_entrant() = 0;
virtual bool make_entrant() = 0;

View File

@ -422,7 +422,7 @@ void nmethod::init_defaults() {
#if INCLUDE_JVMCI
_jvmci_installed_code = NULL;
_speculation_log = NULL;
_jvmci_installed_code_triggers_unloading = false;
_jvmci_installed_code_triggers_invalidation = false;
#endif
}
@ -690,9 +690,9 @@ nmethod::nmethod(
_speculation_log = speculation_log;
oop obj = JNIHandles::resolve(installed_code);
if (obj == NULL || (obj->is_a(HotSpotNmethod::klass()) && HotSpotNmethod::isDefault(obj))) {
_jvmci_installed_code_triggers_unloading = false;
_jvmci_installed_code_triggers_invalidation = false;
} else {
_jvmci_installed_code_triggers_unloading = true;
_jvmci_installed_code_triggers_invalidation = true;
}
if (compiler->is_jvmci()) {
@ -786,6 +786,13 @@ void nmethod::log_identity(xmlStream* log) const {
if (TieredCompilation) {
log->print(" level='%d'", comp_level());
}
#if INCLUDE_JVMCI
char buffer[O_BUFLEN];
char* jvmci_name = jvmci_installed_code_name(buffer, O_BUFLEN);
if (jvmci_name != NULL) {
log->print(" jvmci_installed_code_name='%s'", jvmci_name);
}
#endif
}
@ -1083,7 +1090,7 @@ void nmethod::make_unloaded(oop cause) {
_state = unloaded;
// Log the unloading.
log_state_change();
log_state_change(cause);
#if INCLUDE_JVMCI
// The method can only be unloaded after the pointer to the installed code
@ -1107,7 +1114,7 @@ void nmethod::invalidate_osr_method() {
}
}
void nmethod::log_state_change() const {
void nmethod::log_state_change(oop cause) const {
if (LogCompilation) {
if (xtty != NULL) {
ttyLocker ttyl; // keep the following output all in one block
@ -1120,6 +1127,9 @@ void nmethod::log_state_change() const {
(_state == zombie ? " zombie='1'" : ""));
}
log_identity(xtty);
if (cause != NULL) {
xtty->print(" cause='%s'", cause->klass()->external_name());
}
xtty->stamp();
xtty->end_elem();
}
@ -1150,7 +1160,8 @@ bool nmethod::make_not_entrant_or_zombie(int state) {
// Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
nmethodLocker nml(this);
methodHandle the_method(method());
NoSafepointVerifier nsv;
// This can be called while the system is already at a safepoint which is ok
NoSafepointVerifier nsv(true, !SafepointSynchronize::is_at_safepoint());
// during patching, depending on the nmethod state we must notify the GC that
// code has been unloaded, unregistering it. We cannot do this right while
@ -1507,13 +1518,12 @@ bool nmethod::do_unloading_oops(address low_boundary, BoolObjectClosure* is_aliv
bool nmethod::do_unloading_jvmci() {
if (_jvmci_installed_code != NULL) {
if (JNIHandles::is_global_weak_cleared(_jvmci_installed_code)) {
if (_jvmci_installed_code_triggers_unloading) {
// jweak reference processing has already cleared the referent
make_unloaded(NULL);
return true;
} else {
clear_jvmci_installed_code();
if (_jvmci_installed_code_triggers_invalidation) {
// The reference to the installed code has been dropped so invalidate
// this nmethod and allow the sweeper to reclaim it.
make_not_entrant();
}
clear_jvmci_installed_code();
}
}
return false;
@ -2948,7 +2958,7 @@ oop nmethod::speculation_log() {
return JNIHandles::resolve(_speculation_log);
}
char* nmethod::jvmci_installed_code_name(char* buf, size_t buflen) {
char* nmethod::jvmci_installed_code_name(char* buf, size_t buflen) const {
if (!this->is_compiled_by_jvmci()) {
return NULL;
}

View File

@ -78,7 +78,7 @@ class nmethod : public CompiledMethod {
// That is, installed code other than a "default"
// HotSpotNMethod causes nmethod unloading.
// This field is ignored once _jvmci_installed_code is NULL.
bool _jvmci_installed_code_triggers_unloading;
bool _jvmci_installed_code_triggers_invalidation;
#endif
// To support simple linked-list chaining of nmethods:
@ -456,7 +456,7 @@ public:
// Copies the value of the name field in the InstalledCode
// object (if any) associated with this nmethod into buf.
// Returns the value of buf if it was updated otherwise NULL.
char* jvmci_installed_code_name(char* buf, size_t buflen);
char* jvmci_installed_code_name(char* buf, size_t buflen) const;
// Updates the state of the InstalledCode (if any) associated with
// this nmethod based on the current value of _state.
@ -486,7 +486,7 @@ public:
protected:
virtual bool do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive);
#if INCLUDE_JVMCI
// See comment for _jvmci_installed_code_triggers_unloading field.
// See comment for _jvmci_installed_code_triggers_invalidation field.
// Returns whether this nmethod was unloaded.
virtual bool do_unloading_jvmci();
#endif
@ -555,7 +555,7 @@ public:
// Logging
void log_identity(xmlStream* log) const;
void log_new_nmethod() const;
void log_state_change() const;
void log_state_change(oop cause = NULL) const;
// Prints block-level comments, including nmethod specific block labels:
virtual void print_block_comment(outputStream* stream, address block_begin) const {

View File

@ -243,7 +243,7 @@ public:
// sets for old regions.
r->rem_set()->clear(true /* only_cardset */);
} else {
assert(!r->is_old() || !r->rem_set()->is_tracked(),
assert(r->is_archive() || !r->is_old() || !r->rem_set()->is_tracked(),
"Missed to clear unused remembered set of region %u (%s) that is %s",
r->hrm_index(), r->get_type_str(), r->rem_set()->get_state_str());
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,7 +25,7 @@
#ifndef SHARE_VM_GC_G1_DIRTYCARDQUEUE_HPP
#define SHARE_VM_GC_G1_DIRTYCARDQUEUE_HPP
#include "gc/g1/ptrQueue.hpp"
#include "gc/shared/ptrQueue.hpp"
#include "memory/allocation.hpp"
class FreeIdSet;

View File

@ -361,7 +361,7 @@ bool G1ArchiveAllocator::alloc_new_region() {
hr->set_closed_archive();
}
_g1h->g1_policy()->remset_tracker()->update_at_allocate(hr);
_g1h->old_set_add(hr);
_g1h->archive_set_add(hr);
_g1h->hr_printer()->alloc(hr);
_allocated_regions.append(hr);
_allocation_region = hr;

View File

@ -27,9 +27,10 @@
#include "gc/g1/g1BarrierSetAssembler.hpp"
#include "gc/g1/g1CardTable.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1SATBMarkQueueSet.hpp"
#include "gc/g1/g1ThreadLocalData.hpp"
#include "gc/g1/heapRegion.hpp"
#include "gc/g1/satbMarkQueue.hpp"
#include "gc/shared/satbMarkQueue.hpp"
#include "logging/log.hpp"
#include "oops/access.inline.hpp"
#include "oops/compressedOops.inline.hpp"

View File

@ -25,7 +25,6 @@
#include "precompiled.hpp"
#include "classfile/metadataOnStackMark.hpp"
#include "classfile/stringTable.hpp"
#include "classfile/symbolTable.hpp"
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
#include "gc/g1/g1Allocator.inline.hpp"
@ -71,6 +70,7 @@
#include "gc/shared/generationSpec.hpp"
#include "gc/shared/isGCActiveMark.hpp"
#include "gc/shared/oopStorageParState.hpp"
#include "gc/shared/parallelCleaning.hpp"
#include "gc/shared/preservedMarks.inline.hpp"
#include "gc/shared/suspendibleThreadSet.hpp"
#include "gc/shared/referenceProcessor.inline.hpp"
@ -84,7 +84,6 @@
#include "oops/access.inline.hpp"
#include "oops/compressedOops.inline.hpp"
#include "oops/oop.inline.hpp"
#include "prims/resolvedMethodTable.hpp"
#include "runtime/atomic.hpp"
#include "runtime/flags/flagSetting.hpp"
#include "runtime/handles.inline.hpp"
@ -645,7 +644,7 @@ bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges,
curr_region->set_closed_archive();
}
_hr_printer.alloc(curr_region);
_old_set.add(curr_region);
_archive_set.add(curr_region);
HeapWord* top;
HeapRegion* next_region;
if (curr_region != last_region) {
@ -802,7 +801,7 @@ void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) {
guarantee(curr_region->is_archive(),
"Expected archive region at index %u", curr_region->hrm_index());
uint curr_index = curr_region->hrm_index();
_old_set.remove(curr_region);
_archive_set.remove(curr_region);
curr_region->set_free();
curr_region->set_top(curr_region->bottom());
if (curr_region != last_region) {
@ -1127,7 +1126,7 @@ bool G1CollectedHeap::do_full_collection(bool explicit_gc,
const bool do_clear_all_soft_refs = clear_all_soft_refs ||
soft_ref_policy()->should_clear_all_soft_refs();
G1FullCollector collector(this, &_full_gc_memory_manager, explicit_gc, do_clear_all_soft_refs);
G1FullCollector collector(this, explicit_gc, do_clear_all_soft_refs);
GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true);
collector.prepare_collection();
@ -1407,6 +1406,68 @@ void G1CollectedHeap::shrink(size_t shrink_bytes) {
_verifier->verify_region_sets_optional();
}
class OldRegionSetChecker : public HeapRegionSetChecker {
public:
void check_mt_safety() {
// Master Old Set MT safety protocol:
// (a) If we're at a safepoint, operations on the master old set
// should be invoked:
// - by the VM thread (which will serialize them), or
// - by the GC workers while holding the FreeList_lock, if we're
// at a safepoint for an evacuation pause (this lock is taken
// anyway when an GC alloc region is retired so that a new one
// is allocated from the free list), or
// - by the GC workers while holding the OldSets_lock, if we're at a
// safepoint for a cleanup pause.
// (b) If we're not at a safepoint, operations on the master old set
// should be invoked while holding the Heap_lock.
if (SafepointSynchronize::is_at_safepoint()) {
guarantee(Thread::current()->is_VM_thread() ||
FreeList_lock->owned_by_self() || OldSets_lock->owned_by_self(),
"master old set MT safety protocol at a safepoint");
} else {
guarantee(Heap_lock->owned_by_self(), "master old set MT safety protocol outside a safepoint");
}
}
bool is_correct_type(HeapRegion* hr) { return hr->is_old(); }
const char* get_description() { return "Old Regions"; }
};
class ArchiveRegionSetChecker : public HeapRegionSetChecker {
public:
void check_mt_safety() {
guarantee(!Universe::is_fully_initialized() || SafepointSynchronize::is_at_safepoint(),
"May only change archive regions during initialization or safepoint.");
}
bool is_correct_type(HeapRegion* hr) { return hr->is_archive(); }
const char* get_description() { return "Archive Regions"; }
};
class HumongousRegionSetChecker : public HeapRegionSetChecker {
public:
void check_mt_safety() {
// Humongous Set MT safety protocol:
// (a) If we're at a safepoint, operations on the master humongous
// set should be invoked by either the VM thread (which will
// serialize them) or by the GC workers while holding the
// OldSets_lock.
// (b) If we're not at a safepoint, operations on the master
// humongous set should be invoked while holding the Heap_lock.
if (SafepointSynchronize::is_at_safepoint()) {
guarantee(Thread::current()->is_VM_thread() ||
OldSets_lock->owned_by_self(),
"master humongous set MT safety protocol at a safepoint");
} else {
guarantee(Heap_lock->owned_by_self(),
"master humongous set MT safety protocol outside a safepoint");
}
}
bool is_correct_type(HeapRegion* hr) { return hr->is_humongous(); }
const char* get_description() { return "Humongous Regions"; }
};
G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
CollectedHeap(),
_young_gen_sampling_thread(NULL),
@ -1414,13 +1475,9 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
_collector_policy(collector_policy),
_card_table(NULL),
_soft_ref_policy(),
_memory_manager("G1 Young Generation", "end of minor GC"),
_full_gc_memory_manager("G1 Old Generation", "end of major GC"),
_eden_pool(NULL),
_survivor_pool(NULL),
_old_pool(NULL),
_old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
_humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
_old_set("Old Region Set", new OldRegionSetChecker()),
_archive_set("Archive Region Set", new ArchiveRegionSetChecker()),
_humongous_set("Humongous Region Set", new HumongousRegionSetChecker()),
_bot(NULL),
_listener(),
_hrm(),
@ -1747,20 +1804,6 @@ jint G1CollectedHeap::initialize() {
return JNI_OK;
}
void G1CollectedHeap::initialize_serviceability() {
_eden_pool = new G1EdenPool(this);
_survivor_pool = new G1SurvivorPool(this);
_old_pool = new G1OldGenPool(this);
_full_gc_memory_manager.add_pool(_eden_pool);
_full_gc_memory_manager.add_pool(_survivor_pool);
_full_gc_memory_manager.add_pool(_old_pool);
_memory_manager.add_pool(_eden_pool);
_memory_manager.add_pool(_survivor_pool);
_memory_manager.add_pool(_old_pool, false /* always_affected_by_gc */);
}
void G1CollectedHeap::stop() {
// Stop all concurrent threads. We do this to make sure these threads
// do not continue to execute and access resources (e.g. logging)
@ -2857,9 +2900,9 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
active_workers = workers()->update_active_workers(active_workers);
log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());
TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
TraceMemoryManagerStats tms(&_memory_manager, gc_cause(),
collector_state()->yc_type() == Mixed /* allMemoryPoolsAffected */);
G1MonitoringScope ms(g1mm(),
false /* full_gc */,
collector_state()->yc_type() == Mixed /* all_memory_pools_affected */);
G1HeapTransition heap_transition(this);
size_t heap_used_bytes_before_gc = used();
@ -3256,402 +3299,26 @@ void G1CollectedHeap::print_termination_stats(uint worker_id,
undo_waste * HeapWordSize / K);
}
class G1StringAndSymbolCleaningTask : public AbstractGangTask {
private:
BoolObjectClosure* _is_alive;
G1StringDedupUnlinkOrOopsDoClosure _dedup_closure;
OopStorage::ParState<false /* concurrent */, false /* const */> _par_state_string;
int _initial_string_table_size;
int _initial_symbol_table_size;
bool _process_strings;
int _strings_processed;
int _strings_removed;
bool _process_symbols;
int _symbols_processed;
int _symbols_removed;
bool _process_string_dedup;
public:
G1StringAndSymbolCleaningTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols, bool process_string_dedup) :
AbstractGangTask("String/Symbol Unlinking"),
_is_alive(is_alive),
_dedup_closure(is_alive, NULL, false),
_par_state_string(StringTable::weak_storage()),
_process_strings(process_strings), _strings_processed(0), _strings_removed(0),
_process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0),
_process_string_dedup(process_string_dedup) {
_initial_string_table_size = (int) StringTable::the_table()->table_size();
_initial_symbol_table_size = SymbolTable::the_table()->table_size();
if (process_symbols) {
SymbolTable::clear_parallel_claimed_index();
}
if (process_strings) {
StringTable::reset_dead_counter();
}
}
~G1StringAndSymbolCleaningTask() {
guarantee(!_process_symbols || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
"claim value %d after unlink less than initial symbol table size %d",
SymbolTable::parallel_claimed_index(), _initial_symbol_table_size);
log_info(gc, stringtable)(
"Cleaned string and symbol table, "
"strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed, "
"symbols: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed",
strings_processed(), strings_removed(),
symbols_processed(), symbols_removed());
if (_process_strings) {
StringTable::finish_dead_counter();
}
}
void work(uint worker_id) {
int strings_processed = 0;
int strings_removed = 0;
int symbols_processed = 0;
int symbols_removed = 0;
if (_process_strings) {
StringTable::possibly_parallel_unlink(&_par_state_string, _is_alive, &strings_processed, &strings_removed);
Atomic::add(strings_processed, &_strings_processed);
Atomic::add(strings_removed, &_strings_removed);
}
if (_process_symbols) {
SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed);
Atomic::add(symbols_processed, &_symbols_processed);
Atomic::add(symbols_removed, &_symbols_removed);
}
if (_process_string_dedup) {
G1StringDedup::parallel_unlink(&_dedup_closure, worker_id);
}
}
size_t strings_processed() const { return (size_t)_strings_processed; }
size_t strings_removed() const { return (size_t)_strings_removed; }
size_t symbols_processed() const { return (size_t)_symbols_processed; }
size_t symbols_removed() const { return (size_t)_symbols_removed; }
};
class G1CodeCacheUnloadingTask {
private:
static Monitor* _lock;
BoolObjectClosure* const _is_alive;
const bool _unloading_occurred;
const uint _num_workers;
// Variables used to claim nmethods.
CompiledMethod* _first_nmethod;
CompiledMethod* volatile _claimed_nmethod;
// The list of nmethods that need to be processed by the second pass.
CompiledMethod* volatile _postponed_list;
volatile uint _num_entered_barrier;
public:
G1CodeCacheUnloadingTask(uint num_workers, BoolObjectClosure* is_alive, bool unloading_occurred) :
_is_alive(is_alive),
_unloading_occurred(unloading_occurred),
_num_workers(num_workers),
_first_nmethod(NULL),
_claimed_nmethod(NULL),
_postponed_list(NULL),
_num_entered_barrier(0)
{
CompiledMethod::increase_unloading_clock();
// Get first alive nmethod
CompiledMethodIterator iter = CompiledMethodIterator();
if(iter.next_alive()) {
_first_nmethod = iter.method();
}
_claimed_nmethod = _first_nmethod;
}
~G1CodeCacheUnloadingTask() {
CodeCache::verify_clean_inline_caches();
CodeCache::set_needs_cache_clean(false);
guarantee(CodeCache::scavenge_root_nmethods() == NULL, "Must be");
CodeCache::verify_icholder_relocations();
}
private:
void add_to_postponed_list(CompiledMethod* nm) {
CompiledMethod* old;
do {
old = _postponed_list;
nm->set_unloading_next(old);
} while (Atomic::cmpxchg(nm, &_postponed_list, old) != old);
}
void clean_nmethod(CompiledMethod* nm) {
bool postponed = nm->do_unloading_parallel(_is_alive, _unloading_occurred);
if (postponed) {
// This nmethod referred to an nmethod that has not been cleaned/unloaded yet.
add_to_postponed_list(nm);
}
// Mark that this nmethod has been cleaned/unloaded.
// After this call, it will be safe to ask if this nmethod was unloaded or not.
nm->set_unloading_clock(CompiledMethod::global_unloading_clock());
}
void clean_nmethod_postponed(CompiledMethod* nm) {
nm->do_unloading_parallel_postponed();
}
static const int MaxClaimNmethods = 16;
void claim_nmethods(CompiledMethod** claimed_nmethods, int *num_claimed_nmethods) {
CompiledMethod* first;
CompiledMethodIterator last;
do {
*num_claimed_nmethods = 0;
first = _claimed_nmethod;
last = CompiledMethodIterator(first);
if (first != NULL) {
for (int i = 0; i < MaxClaimNmethods; i++) {
if (!last.next_alive()) {
break;
}
claimed_nmethods[i] = last.method();
(*num_claimed_nmethods)++;
}
}
} while (Atomic::cmpxchg(last.method(), &_claimed_nmethod, first) != first);
}
CompiledMethod* claim_postponed_nmethod() {
CompiledMethod* claim;
CompiledMethod* next;
do {
claim = _postponed_list;
if (claim == NULL) {
return NULL;
}
next = claim->unloading_next();
} while (Atomic::cmpxchg(next, &_postponed_list, claim) != claim);
return claim;
}
public:
// Mark that we're done with the first pass of nmethod cleaning.
void barrier_mark(uint worker_id) {
MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
_num_entered_barrier++;
if (_num_entered_barrier == _num_workers) {
ml.notify_all();
}
}
// See if we have to wait for the other workers to
// finish their first-pass nmethod cleaning work.
void barrier_wait(uint worker_id) {
if (_num_entered_barrier < _num_workers) {
MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
while (_num_entered_barrier < _num_workers) {
ml.wait(Mutex::_no_safepoint_check_flag, 0, false);
}
}
}
// Cleaning and unloading of nmethods. Some work has to be postponed
// to the second pass, when we know which nmethods survive.
void work_first_pass(uint worker_id) {
// The first nmethods is claimed by the first worker.
if (worker_id == 0 && _first_nmethod != NULL) {
clean_nmethod(_first_nmethod);
_first_nmethod = NULL;
}
int num_claimed_nmethods;
CompiledMethod* claimed_nmethods[MaxClaimNmethods];
while (true) {
claim_nmethods(claimed_nmethods, &num_claimed_nmethods);
if (num_claimed_nmethods == 0) {
break;
}
for (int i = 0; i < num_claimed_nmethods; i++) {
clean_nmethod(claimed_nmethods[i]);
}
}
}
void work_second_pass(uint worker_id) {
CompiledMethod* nm;
// Take care of postponed nmethods.
while ((nm = claim_postponed_nmethod()) != NULL) {
clean_nmethod_postponed(nm);
}
}
};
Monitor* G1CodeCacheUnloadingTask::_lock = new Monitor(Mutex::leaf, "Code Cache Unload lock", false, Monitor::_safepoint_check_never);
class G1KlassCleaningTask : public StackObj {
volatile int _clean_klass_tree_claimed;
ClassLoaderDataGraphKlassIteratorAtomic _klass_iterator;
public:
G1KlassCleaningTask() :
_clean_klass_tree_claimed(0),
_klass_iterator() {
}
private:
bool claim_clean_klass_tree_task() {
if (_clean_klass_tree_claimed) {
return false;
}
return Atomic::cmpxchg(1, &_clean_klass_tree_claimed, 0) == 0;
}
InstanceKlass* claim_next_klass() {
Klass* klass;
do {
klass =_klass_iterator.next_klass();
} while (klass != NULL && !klass->is_instance_klass());
// this can be null so don't call InstanceKlass::cast
return static_cast<InstanceKlass*>(klass);
}
public:
void clean_klass(InstanceKlass* ik) {
ik->clean_weak_instanceklass_links();
}
void work() {
ResourceMark rm;
// One worker will clean the subklass/sibling klass tree.
if (claim_clean_klass_tree_task()) {
Klass::clean_subklass_tree();
}
// All workers will help cleaning the classes,
InstanceKlass* klass;
while ((klass = claim_next_klass()) != NULL) {
clean_klass(klass);
}
}
};
class G1ResolvedMethodCleaningTask : public StackObj {
volatile int _resolved_method_task_claimed;
public:
G1ResolvedMethodCleaningTask() :
_resolved_method_task_claimed(0) {}
bool claim_resolved_method_task() {
if (_resolved_method_task_claimed) {
return false;
}
return Atomic::cmpxchg(1, &_resolved_method_task_claimed, 0) == 0;
}
// These aren't big, one thread can do it all.
void work() {
if (claim_resolved_method_task()) {
ResolvedMethodTable::unlink();
}
}
};
// To minimize the remark pause times, the tasks below are done in parallel.
class G1ParallelCleaningTask : public AbstractGangTask {
private:
bool _unloading_occurred;
G1StringAndSymbolCleaningTask _string_symbol_task;
G1CodeCacheUnloadingTask _code_cache_task;
G1KlassCleaningTask _klass_cleaning_task;
G1ResolvedMethodCleaningTask _resolved_method_cleaning_task;
public:
// The constructor is run in the VMThread.
G1ParallelCleaningTask(BoolObjectClosure* is_alive, uint num_workers, bool unloading_occurred) :
AbstractGangTask("Parallel Cleaning"),
_unloading_occurred(unloading_occurred),
_string_symbol_task(is_alive, true, true, G1StringDedup::is_enabled()),
_code_cache_task(num_workers, is_alive, unloading_occurred),
_klass_cleaning_task(),
_resolved_method_cleaning_task() {
}
// The parallel work done by all worker threads.
void work(uint worker_id) {
// Do first pass of code cache cleaning.
_code_cache_task.work_first_pass(worker_id);
// Let the threads mark that the first pass is done.
_code_cache_task.barrier_mark(worker_id);
// Clean the Strings and Symbols.
_string_symbol_task.work(worker_id);
// Clean unreferenced things in the ResolvedMethodTable
_resolved_method_cleaning_task.work();
// Wait for all workers to finish the first code cache cleaning pass.
_code_cache_task.barrier_wait(worker_id);
// Do the second code cache cleaning work, which realize on
// the liveness information gathered during the first pass.
_code_cache_task.work_second_pass(worker_id);
// Clean all klasses that were not unloaded.
// The weak metadata in klass doesn't need to be
// processed if there was no unloading.
if (_unloading_occurred) {
_klass_cleaning_task.work();
}
}
};
void G1CollectedHeap::complete_cleaning(BoolObjectClosure* is_alive,
bool class_unloading_occurred) {
uint n_workers = workers()->active_workers();
G1ParallelCleaningTask g1_unlink_task(is_alive, n_workers, class_unloading_occurred);
G1StringDedupUnlinkOrOopsDoClosure dedup_closure(is_alive, NULL, false);
ParallelCleaningTask g1_unlink_task(is_alive, &dedup_closure, n_workers, class_unloading_occurred);
workers()->run_task(&g1_unlink_task);
}
void G1CollectedHeap::partial_cleaning(BoolObjectClosure* is_alive,
bool process_strings,
bool process_symbols,
bool process_string_dedup) {
if (!process_strings && !process_symbols && !process_string_dedup) {
if (!process_strings && !process_string_dedup) {
// Nothing to clean.
return;
}
G1StringAndSymbolCleaningTask g1_unlink_task(is_alive, process_strings, process_symbols, process_string_dedup);
G1StringDedupUnlinkOrOopsDoClosure dedup_closure(is_alive, NULL, false);
StringCleaningTask g1_unlink_task(is_alive, process_string_dedup ? &dedup_closure : NULL, process_strings);
workers()->run_task(&g1_unlink_task);
}
class G1RedirtyLoggedCardsTask : public AbstractGangTask {
@ -4045,7 +3712,7 @@ void G1CollectedHeap::post_evacuate_collection_set(EvacuationInfo& evacuation_in
process_discovered_references(per_thread_states);
// FIXME
// CM's reference processing also cleans up the string and symbol tables.
// CM's reference processing also cleans up the string table.
// Should we do that here also? We could, but it is a serial operation
// and could significantly increase the pause time.
@ -4650,7 +4317,6 @@ bool G1CollectedHeap::check_young_list_empty() {
#endif // ASSERT
class TearDownRegionSetsClosure : public HeapRegionClosure {
private:
HeapRegionSet *_old_set;
public:
@ -4663,9 +4329,9 @@ public:
r->uninstall_surv_rate_group();
} else {
// We ignore free regions, we'll empty the free list afterwards.
// We ignore humongous regions, we're not tearing down the
// humongous regions set.
assert(r->is_free() || r->is_humongous(),
// We ignore humongous and archive regions, we're not tearing down these
// sets.
assert(r->is_archive() || r->is_free() || r->is_humongous(),
"it cannot be another type");
}
return false;
@ -4708,14 +4374,17 @@ void G1CollectedHeap::set_used(size_t bytes) {
class RebuildRegionSetsClosure : public HeapRegionClosure {
private:
bool _free_list_only;
HeapRegionSet* _old_set;
HeapRegionManager* _hrm;
size_t _total_used;
bool _free_list_only;
HeapRegionSet* _old_set;
HeapRegionManager* _hrm;
size_t _total_used;
public:
RebuildRegionSetsClosure(bool free_list_only,
HeapRegionSet* old_set, HeapRegionManager* hrm) :
HeapRegionSet* old_set,
HeapRegionManager* hrm) :
_free_list_only(free_list_only),
_old_set(old_set), _hrm(hrm), _total_used(0) {
assert(_hrm->num_free_regions() == 0, "pre-condition");
@ -4733,11 +4402,11 @@ public:
_hrm->insert_into_free_list(r);
} else if (!_free_list_only) {
if (r->is_humongous()) {
// We ignore humongous regions. We left the humongous set unchanged.
if (r->is_archive() || r->is_humongous()) {
// We ignore archive and humongous regions. We left these sets unchanged.
} else {
assert(r->is_young() || r->is_free() || r->is_old(), "invariant");
// We now move all (non-humongous, non-old) regions to old gen, and register them as such.
// We now move all (non-humongous, non-old, non-archive) regions to old gen, and register them as such.
r->move_to_old();
_old_set->add(r);
}
@ -4811,7 +4480,7 @@ void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
_hr_printer.retire(alloc_region);
// We update the eden sizes here, when the region is retired,
// instead of when it's allocated, since this is the point that its
// used space has been recored in _summary_bytes_used.
// used space has been recorded in _summary_bytes_used.
g1mm()->update_eden_size();
}
@ -4862,7 +4531,7 @@ void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
alloc_region->note_end_of_copying(during_im);
g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
if (dest.is_old()) {
_old_set.add(alloc_region);
old_set_add(alloc_region);
}
_hr_printer.retire(alloc_region);
}
@ -4987,17 +4656,14 @@ void G1CollectedHeap::rebuild_strong_code_roots() {
CodeCache::blobs_do(&blob_cl);
}
void G1CollectedHeap::initialize_serviceability() {
_g1mm->initialize_serviceability();
}
GrowableArray<GCMemoryManager*> G1CollectedHeap::memory_managers() {
GrowableArray<GCMemoryManager*> memory_managers(2);
memory_managers.append(&_memory_manager);
memory_managers.append(&_full_gc_memory_manager);
return memory_managers;
return _g1mm->memory_managers();
}
GrowableArray<MemoryPool*> G1CollectedHeap::memory_pools() {
GrowableArray<MemoryPool*> memory_pools(3);
memory_pools.append(_eden_pool);
memory_pools.append(_survivor_pool);
memory_pools.append(_old_pool);
return memory_pools;
return _g1mm->memory_pools();
}

View File

@ -51,7 +51,6 @@
#include "gc/shared/preservedMarks.hpp"
#include "gc/shared/softRefPolicy.hpp"
#include "memory/memRegion.hpp"
#include "services/memoryManager.hpp"
#include "utilities/stack.hpp"
// A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
@ -67,6 +66,7 @@ class G1ParScanThreadState;
class G1ParScanThreadStateSet;
class G1ParScanThreadState;
class MemoryPool;
class MemoryManager;
class ObjectClosure;
class SpaceClosure;
class CompactibleSpaceClosure;
@ -160,23 +160,13 @@ private:
SoftRefPolicy _soft_ref_policy;
GCMemoryManager _memory_manager;
GCMemoryManager _full_gc_memory_manager;
MemoryPool* _eden_pool;
MemoryPool* _survivor_pool;
MemoryPool* _old_pool;
static size_t _humongous_object_threshold_in_words;
// It keeps track of the old regions.
// These sets keep track of old, archive and humongous regions respectively.
HeapRegionSet _old_set;
// It keeps track of the humongous regions.
HeapRegionSet _archive_set;
HeapRegionSet _humongous_set;
virtual void initialize_serviceability();
void eagerly_reclaim_humongous_regions();
// Start a new incremental collection set for the next pause.
void start_new_collection_set();
@ -970,6 +960,7 @@ public:
virtual SoftRefPolicy* soft_ref_policy();
virtual void initialize_serviceability();
virtual GrowableArray<GCMemoryManager*> memory_managers();
virtual GrowableArray<MemoryPool*> memory_pools();
@ -1046,8 +1037,10 @@ public:
inline void old_set_add(HeapRegion* hr);
inline void old_set_remove(HeapRegion* hr);
inline void archive_set_add(HeapRegion* hr);
size_t non_young_capacity_bytes() {
return (_old_set.length() + _humongous_set.length()) * HeapRegion::GrainBytes;
return (old_regions_count() + _archive_set.length() + humongous_regions_count()) * HeapRegion::GrainBytes;
}
// Determine whether the given region is one that we are using as an
@ -1232,20 +1225,11 @@ public:
const G1SurvivorRegions* survivor() const { return &_survivor; }
uint survivor_regions_count() const {
return _survivor.length();
}
uint eden_regions_count() const {
return _eden.length();
}
uint young_regions_count() const {
return _eden.length() + _survivor.length();
}
uint eden_regions_count() const { return _eden.length(); }
uint survivor_regions_count() const { return _survivor.length(); }
uint young_regions_count() const { return _eden.length() + _survivor.length(); }
uint old_regions_count() const { return _old_set.length(); }
uint archive_regions_count() const { return _archive_set.length(); }
uint humongous_regions_count() const { return _humongous_set.length(); }
#ifdef ASSERT
@ -1324,9 +1308,8 @@ public:
// Partial cleaning used when class unloading is disabled.
// Let the caller choose what structures to clean out:
// - StringTable
// - SymbolTable
// - StringDeduplication structures
void partial_cleaning(BoolObjectClosure* is_alive, bool unlink_strings, bool unlink_symbols, bool unlink_string_dedup);
void partial_cleaning(BoolObjectClosure* is_alive, bool unlink_strings, bool unlink_string_dedup);
// Complete cleaning used when class unloading is enabled.
// Cleans out all structures handled by partial_cleaning and also the CodeCache.

View File

@ -92,6 +92,10 @@ inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
_old_set.remove(hr);
}
inline void G1CollectedHeap::archive_set_add(HeapRegion* hr) {
_archive_set.add(hr);
}
// It dirties the cards that cover the block so that the post
// write barrier never queues anything when updating objects on this
// block. It is assumed (and in fact we assert) that the block

View File

@ -328,10 +328,10 @@ bool G1CollectionSet::verify_young_ages() {
return cl.valid();
}
class G1PrintCollectionSetClosure : public HeapRegionClosure {
class G1PrintCollectionSetDetailClosure : public HeapRegionClosure {
outputStream* _st;
public:
G1PrintCollectionSetClosure(outputStream* st) : HeapRegionClosure(), _st(st) { }
G1PrintCollectionSetDetailClosure(outputStream* st) : HeapRegionClosure(), _st(st) { }
virtual bool do_heap_region(HeapRegion* r) {
assert(r->in_collection_set(), "Region %u should be in collection set", r->hrm_index());
@ -347,7 +347,7 @@ public:
void G1CollectionSet::print(outputStream* st) {
st->print_cr("\nCollection_set:");
G1PrintCollectionSetClosure cl(st);
G1PrintCollectionSetDetailClosure cl(st);
iterate(&cl);
}
#endif // !PRODUCT

View File

@ -24,7 +24,6 @@
#include "precompiled.hpp"
#include "classfile/metadataOnStackMark.hpp"
#include "classfile/symbolTable.hpp"
#include "code/codeCache.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
@ -1578,8 +1577,8 @@ void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) {
// Is alive closure.
G1CMIsAliveClosure g1_is_alive(_g1h);
// Inner scope to exclude the cleaning of the string and symbol
// tables from the displayed time.
// Inner scope to exclude the cleaning of the string table
// from the displayed time.
{
GCTraceTime(Debug, gc, phases) debug("Reference Processing", _gc_timer_cm);
@ -1673,16 +1672,16 @@ void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) {
WeakProcessor::weak_oops_do(&g1_is_alive, &do_nothing_cl);
}
// Unload Klasses, String, Symbols, Code Cache, etc.
// Unload Klasses, String, Code Cache, etc.
if (ClassUnloadingWithConcurrentMark) {
GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm);
bool purged_classes = SystemDictionary::do_unloading(_gc_timer_cm, false /* Defer cleaning */);
_g1h->complete_cleaning(&g1_is_alive, purged_classes);
} else {
GCTraceTime(Debug, gc, phases) debug("Cleanup", _gc_timer_cm);
// No need to clean string table and symbol table as they are treated as strong roots when
// No need to clean string table as it is treated as strong roots when
// class unloading is disabled.
_g1h->partial_cleaning(&g1_is_alive, false, false, G1StringDedup::is_enabled());
_g1h->partial_cleaning(&g1_is_alive, false, G1StringDedup::is_enabled());
}
}

View File

@ -52,7 +52,7 @@ inline bool G1CMSubjectToDiscoveryClosure::do_object_b(oop obj) {
return false;
}
assert(_g1h->is_in_reserved(obj), "Trying to discover obj " PTR_FORMAT " not in heap", p2i(obj));
return _g1h->heap_region_containing(obj)->is_old_or_humongous();
return _g1h->heap_region_containing(obj)->is_old_or_humongous_or_archive();
}
inline bool G1ConcurrentMark::mark_in_next_bitmap(uint const worker_id, oop const obj, size_t const obj_size) {

View File

@ -103,9 +103,9 @@ uint G1FullCollector::calc_active_workers() {
return worker_count;
}
G1FullCollector::G1FullCollector(G1CollectedHeap* heap, GCMemoryManager* memory_manager, bool explicit_gc, bool clear_soft_refs) :
G1FullCollector::G1FullCollector(G1CollectedHeap* heap, bool explicit_gc, bool clear_soft_refs) :
_heap(heap),
_scope(memory_manager, explicit_gc, clear_soft_refs),
_scope(heap->g1mm(), explicit_gc, clear_soft_refs),
_num_workers(calc_active_workers()),
_oop_queue_set(_num_workers),
_array_queue_set(_num_workers),
@ -226,8 +226,8 @@ void G1FullCollector::phase1_mark_live_objects() {
_heap->complete_cleaning(&_is_alive, purged_class);
} else {
GCTraceTime(Debug, gc, phases) debug("Phase 1: String and Symbol Tables Cleanup", scope()->timer());
// If no class unloading just clean out strings and symbols.
_heap->partial_cleaning(&_is_alive, true, true, G1StringDedup::is_enabled());
// If no class unloading just clean out strings.
_heap->partial_cleaning(&_is_alive, true, G1StringDedup::is_enabled());
}
scope()->tracer()->report_object_count_after_gc(&_is_alive);

View File

@ -72,7 +72,7 @@ class G1FullCollector : StackObj {
ReferenceProcessorSubjectToDiscoveryMutator _is_subject_mutator;
public:
G1FullCollector(G1CollectedHeap* heap, GCMemoryManager* memory_manager, bool explicit_gc, bool clear_soft_refs);
G1FullCollector(G1CollectedHeap* heap, bool explicit_gc, bool clear_soft_refs);
~G1FullCollector();
void prepare_collection();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,7 +25,7 @@
#include "precompiled.hpp"
#include "gc/g1/g1FullGCScope.hpp"
G1FullGCScope::G1FullGCScope(GCMemoryManager* memory_manager, bool explicit_gc, bool clear_soft) :
G1FullGCScope::G1FullGCScope(G1MonitoringSupport* monitoring_support, bool explicit_gc, bool clear_soft) :
_rm(),
_explicit_gc(explicit_gc),
_g1h(G1CollectedHeap::heap()),
@ -36,8 +36,7 @@ G1FullGCScope::G1FullGCScope(GCMemoryManager* memory_manager, bool explicit_gc,
_active(),
_cpu_time(),
_soft_refs(clear_soft, _g1h->soft_ref_policy()),
_collector_stats(_g1h->g1mm()->full_collection_counters()),
_memory_stats(memory_manager, _g1h->gc_cause()),
_monitoring_scope(monitoring_support, true /* full_gc */, true /* all_memory_pools_affected */),
_heap_transition(_g1h) {
_timer.register_gc_start();
_tracer.report_gc_start(_g1h->gc_cause(), _timer.gc_start());

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,7 +27,6 @@
#include "gc/g1/g1CollectedHeap.hpp"
#include "gc/g1/g1HeapTransition.hpp"
#include "gc/shared/collectorCounters.hpp"
#include "gc/shared/gcId.hpp"
#include "gc/shared/gcTrace.hpp"
#include "gc/shared/gcTraceTime.hpp"
@ -51,12 +50,11 @@ class G1FullGCScope : public StackObj {
IsGCActiveMark _active;
GCTraceCPUTime _cpu_time;
ClearedAllSoftRefs _soft_refs;
TraceCollectorStats _collector_stats;
TraceMemoryManagerStats _memory_stats;
G1MonitoringScope _monitoring_scope;
G1HeapTransition _heap_transition;
public:
G1FullGCScope(GCMemoryManager* memory_manager, bool explicit_gc, bool clear_soft);
G1FullGCScope(G1MonitoringSupport* monitoring_support, bool explicit_gc, bool clear_soft);
~G1FullGCScope();
bool is_explicit_gc();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -37,7 +37,6 @@ class G1HeapRegionTraceType : AllStatic {
StartsHumongous,
ContinuesHumongous,
Old,
Pinned,
OpenArchive,
ClosedArchive,
G1HeapRegionTypeEndSentinel
@ -51,7 +50,6 @@ class G1HeapRegionTraceType : AllStatic {
case StartsHumongous: return "Starts Humongous";
case ContinuesHumongous: return "Continues Humongous";
case Old: return "Old";
case Pinned: return "Pinned";
case OpenArchive: return "OpenArchive";
case ClosedArchive: return "ClosedArchive";
default: ShouldNotReachHere(); return NULL;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -33,6 +33,7 @@ G1HeapTransition::Data::Data(G1CollectedHeap* g1_heap) {
_eden_length = g1_heap->eden_regions_count();
_survivor_length = g1_heap->survivor_regions_count();
_old_length = g1_heap->old_regions_count();
_archive_length = g1_heap->archive_regions_count();
_humongous_length = g1_heap->humongous_regions_count();
_metaspace_used_bytes = MetaspaceUtils::used_bytes();
}
@ -43,16 +44,19 @@ struct DetailedUsage : public StackObj {
size_t _eden_used;
size_t _survivor_used;
size_t _old_used;
size_t _archive_used;
size_t _humongous_used;
size_t _eden_region_count;
size_t _survivor_region_count;
size_t _old_region_count;
size_t _archive_region_count;
size_t _humongous_region_count;
DetailedUsage() :
_eden_used(0), _survivor_used(0), _old_used(0), _humongous_used(0),
_eden_region_count(0), _survivor_region_count(0), _old_region_count(0), _humongous_region_count(0) {}
_eden_used(0), _survivor_used(0), _old_used(0), _archive_used(0), _humongous_used(0),
_eden_region_count(0), _survivor_region_count(0), _old_region_count(0),
_archive_region_count(0), _humongous_region_count(0) {}
};
class DetailedUsageClosure: public HeapRegionClosure {
@ -62,6 +66,9 @@ public:
if (r->is_old()) {
_usage._old_used += r->used();
_usage._old_region_count++;
} else if (r->is_archive()) {
_usage._archive_used += r->used();
_usage._archive_region_count++;
} else if (r->is_survivor()) {
_usage._survivor_used += r->used();
_usage._survivor_region_count++;
@ -94,6 +101,8 @@ void G1HeapTransition::print() {
after._survivor_length, usage._survivor_region_count);
assert(usage._old_region_count == after._old_length, "Expected old to be " SIZE_FORMAT " but was " SIZE_FORMAT,
after._old_length, usage._old_region_count);
assert(usage._archive_region_count == after._archive_length, "Expected archive to be " SIZE_FORMAT " but was " SIZE_FORMAT,
after._archive_length, usage._archive_region_count);
assert(usage._humongous_region_count == after._humongous_length, "Expected humongous to be " SIZE_FORMAT " but was " SIZE_FORMAT,
after._humongous_length, usage._humongous_region_count);
}
@ -112,6 +121,11 @@ void G1HeapTransition::print() {
log_trace(gc, heap)(" Used: " SIZE_FORMAT "K, Waste: " SIZE_FORMAT "K",
usage._old_used / K, ((after._old_length * HeapRegion::GrainBytes) - usage._old_used) / K);
log_info(gc, heap)("Archive regions: " SIZE_FORMAT "->" SIZE_FORMAT,
_before._archive_length, after._archive_length);
log_trace(gc, heap)(" Used: " SIZE_FORMAT "K, Waste: " SIZE_FORMAT "K",
usage._archive_used / K, ((after._archive_length * HeapRegion::GrainBytes) - usage._archive_used) / K);
log_info(gc, heap)("Humongous regions: " SIZE_FORMAT "->" SIZE_FORMAT,
_before._humongous_length, after._humongous_length);
log_trace(gc, heap)(" Used: " SIZE_FORMAT "K, Waste: " SIZE_FORMAT "K",

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -34,6 +34,7 @@ class G1HeapTransition {
size_t _eden_length;
size_t _survivor_length;
size_t _old_length;
size_t _archive_length;
size_t _humongous_length;
size_t _metaspace_used_bytes;

View File

@ -488,19 +488,22 @@ void G1HeapVerifier::verify(VerifyOption vo) {
class VerifyRegionListsClosure : public HeapRegionClosure {
private:
HeapRegionSet* _old_set;
HeapRegionSet* _archive_set;
HeapRegionSet* _humongous_set;
HeapRegionManager* _hrm;
HeapRegionManager* _hrm;
public:
uint _old_count;
uint _archive_count;
uint _humongous_count;
uint _free_count;
VerifyRegionListsClosure(HeapRegionSet* old_set,
HeapRegionSet* archive_set,
HeapRegionSet* humongous_set,
HeapRegionManager* hrm) :
_old_set(old_set), _humongous_set(humongous_set), _hrm(hrm),
_old_count(), _humongous_count(), _free_count(){ }
_old_set(old_set), _archive_set(archive_set), _humongous_set(humongous_set), _hrm(hrm),
_old_count(), _archive_count(), _humongous_count(), _free_count(){ }
bool do_heap_region(HeapRegion* hr) {
if (hr->is_young()) {
@ -511,6 +514,9 @@ public:
} else if (hr->is_empty()) {
assert(_hrm->is_free(hr), "Heap region %u is empty but not on the free list.", hr->hrm_index());
_free_count++;
} else if (hr->is_archive()) {
assert(hr->containing_set() == _archive_set, "Heap region %u is archive but not in the archive set.", hr->hrm_index());
_archive_count++;
} else if (hr->is_old()) {
assert(hr->containing_set() == _old_set, "Heap region %u is old but not in the old set.", hr->hrm_index());
_old_count++;
@ -523,8 +529,9 @@ public:
return false;
}
void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) {
void verify_counts(HeapRegionSet* old_set, HeapRegionSet* archive_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) {
guarantee(old_set->length() == _old_count, "Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count);
guarantee(archive_set->length() == _archive_count, "Archive set count mismatch. Expected %u, actual %u.", archive_set->length(), _archive_count);
guarantee(humongous_set->length() == _humongous_count, "Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count);
guarantee(free_list->num_free_regions() == _free_count, "Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count);
}
@ -539,9 +546,9 @@ void G1HeapVerifier::verify_region_sets() {
// Finally, make sure that the region accounting in the lists is
// consistent with what we see in the heap.
VerifyRegionListsClosure cl(&_g1h->_old_set, &_g1h->_humongous_set, &_g1h->_hrm);
VerifyRegionListsClosure cl(&_g1h->_old_set, &_g1h->_archive_set, &_g1h->_humongous_set, &_g1h->_hrm);
_g1h->heap_region_iterate(&cl);
cl.verify_counts(&_g1h->_old_set, &_g1h->_humongous_set, &_g1h->_hrm);
cl.verify_counts(&_g1h->_old_set, &_g1h->_archive_set, &_g1h->_humongous_set, &_g1h->_hrm);
}
void G1HeapVerifier::prepare_for_verify() {
@ -755,6 +762,11 @@ class G1CheckCSetFastTableClosure : public HeapRegionClosure {
return true;
}
if (cset_state.is_in_cset()) {
if (hr->is_archive()) {
log_error(gc, verify)("## is_archive in collection set for region %u", i);
_failures = true;
return true;
}
if (hr->is_young() != (cset_state.is_young())) {
log_error(gc, verify)("## is_young %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
hr->is_young(), cset_state.value(), i);

View File

@ -40,50 +40,41 @@ G1MemoryPoolSuper::G1MemoryPoolSuper(G1CollectedHeap* g1h,
assert(UseG1GC, "sanity");
}
G1EdenPool::G1EdenPool(G1CollectedHeap* g1h) :
G1EdenPool::G1EdenPool(G1CollectedHeap* g1h, size_t initial_size) :
G1MemoryPoolSuper(g1h,
"G1 Eden Space",
g1h->g1mm()->eden_space_committed(), /* init_size */
_undefined_max,
initial_size,
MemoryUsage::undefined_size(),
false /* support_usage_threshold */) { }
MemoryUsage G1EdenPool::get_memory_usage() {
size_t initial_sz = initial_size();
size_t max_sz = max_size();
size_t used = used_in_bytes();
size_t committed = _g1mm->eden_space_committed();
return MemoryUsage(initial_sz, used, committed, max_sz);
return MemoryUsage(initial_size(), used_in_bytes(), committed, max_size());
}
G1SurvivorPool::G1SurvivorPool(G1CollectedHeap* g1h) :
G1SurvivorPool::G1SurvivorPool(G1CollectedHeap* g1h, size_t initial_size) :
G1MemoryPoolSuper(g1h,
"G1 Survivor Space",
g1h->g1mm()->survivor_space_committed(), /* init_size */
_undefined_max,
initial_size,
MemoryUsage::undefined_size(),
false /* support_usage_threshold */) { }
MemoryUsage G1SurvivorPool::get_memory_usage() {
size_t initial_sz = initial_size();
size_t max_sz = max_size();
size_t used = used_in_bytes();
size_t committed = _g1mm->survivor_space_committed();
return MemoryUsage(initial_sz, used, committed, max_sz);
return MemoryUsage(initial_size(), used_in_bytes(), committed, max_size());
}
G1OldGenPool::G1OldGenPool(G1CollectedHeap* g1h) :
G1OldGenPool::G1OldGenPool(G1CollectedHeap* g1h, size_t initial_size, size_t max_size) :
G1MemoryPoolSuper(g1h,
"G1 Old Gen",
g1h->g1mm()->old_space_committed(), /* init_size */
g1h->g1mm()->old_gen_max(),
initial_size,
max_size,
true /* support_usage_threshold */) { }
MemoryUsage G1OldGenPool::get_memory_usage() {
size_t initial_sz = initial_size();
size_t max_sz = max_size();
size_t used = used_in_bytes();
size_t committed = _g1mm->old_space_committed();
size_t committed = _g1mm->old_gen_committed();
return MemoryUsage(initial_sz, used, committed, max_sz);
return MemoryUsage(initial_size(), used_in_bytes(), committed, max_size());
}

View File

@ -53,7 +53,6 @@ class G1CollectedHeap;
// (G1EdenPool, G1SurvivorPool, G1OldGenPool).
class G1MemoryPoolSuper : public CollectedMemoryPool {
protected:
const static size_t _undefined_max = (size_t) -1;
G1MonitoringSupport* _g1mm;
// Would only be called from subclasses.
@ -67,42 +66,30 @@ protected:
// Memory pool that represents the G1 eden.
class G1EdenPool : public G1MemoryPoolSuper {
public:
G1EdenPool(G1CollectedHeap* g1h);
G1EdenPool(G1CollectedHeap* g1h, size_t initial_size);
size_t used_in_bytes() { return _g1mm->eden_space_used(); }
size_t used_in_bytes() {
return _g1mm->eden_space_used();
}
size_t max_size() const {
return _undefined_max;
}
MemoryUsage get_memory_usage();
};
// Memory pool that represents the G1 survivor.
class G1SurvivorPool : public G1MemoryPoolSuper {
public:
G1SurvivorPool(G1CollectedHeap* g1h);
G1SurvivorPool(G1CollectedHeap* g1h, size_t initial_size);
size_t used_in_bytes() { return _g1mm->survivor_space_used(); }
size_t used_in_bytes() {
return _g1mm->survivor_space_used();
}
size_t max_size() const {
return _undefined_max;
}
MemoryUsage get_memory_usage();
};
// Memory pool that represents the G1 old gen.
class G1OldGenPool : public G1MemoryPoolSuper {
public:
G1OldGenPool(G1CollectedHeap* g1h);
G1OldGenPool(G1CollectedHeap* g1h, size_t initial_size, size_t max_size);
size_t used_in_bytes() { return _g1mm->old_gen_used(); }
size_t used_in_bytes() {
return _g1mm->old_space_used();
}
size_t max_size() const {
return _g1mm->old_gen_max();
}
MemoryUsage get_memory_usage();
};

View File

@ -26,83 +26,95 @@
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1MonitoringSupport.hpp"
#include "gc/g1/g1Policy.hpp"
#include "gc/shared/collectorCounters.hpp"
#include "gc/g1/g1MemoryPool.hpp"
#include "gc/shared/hSpaceCounters.hpp"
#include "memory/metaspaceCounters.hpp"
#include "services/memoryPool.hpp"
G1GenerationCounters::G1GenerationCounters(G1MonitoringSupport* g1mm,
const char* name,
int ordinal, int spaces,
size_t min_capacity,
size_t max_capacity,
size_t curr_capacity)
class G1GenerationCounters : public GenerationCounters {
protected:
G1MonitoringSupport* _g1mm;
public:
G1GenerationCounters(G1MonitoringSupport* g1mm,
const char* name, int ordinal, int spaces,
size_t min_capacity, size_t max_capacity,
size_t curr_capacity)
: GenerationCounters(name, ordinal, spaces, min_capacity,
max_capacity, curr_capacity), _g1mm(g1mm) { }
};
// We pad the capacity three times given that the young generation
// contains three spaces (eden and two survivors).
G1YoungGenerationCounters::G1YoungGenerationCounters(G1MonitoringSupport* g1mm,
const char* name)
class G1YoungGenerationCounters : public G1GenerationCounters {
public:
// We pad the capacity three times given that the young generation
// contains three spaces (eden and two survivors).
G1YoungGenerationCounters(G1MonitoringSupport* g1mm, const char* name, size_t max_size)
: G1GenerationCounters(g1mm, name, 0 /* ordinal */, 3 /* spaces */,
G1MonitoringSupport::pad_capacity(0, 3) /* min_capacity */,
G1MonitoringSupport::pad_capacity(g1mm->young_gen_max(), 3),
G1MonitoringSupport::pad_capacity(0, 3) /* curr_capacity */) {
if (UsePerfData) {
update_all();
G1MonitoringSupport::pad_capacity(0, 3) /* min_capacity */,
G1MonitoringSupport::pad_capacity(max_size, 3),
G1MonitoringSupport::pad_capacity(0, 3) /* curr_capacity */) {
if (UsePerfData) {
update_all();
}
}
}
G1OldGenerationCounters::G1OldGenerationCounters(G1MonitoringSupport* g1mm,
const char* name)
virtual void update_all() {
size_t committed =
G1MonitoringSupport::pad_capacity(_g1mm->young_gen_committed(), 3);
_current_size->set_value(committed);
}
};
class G1OldGenerationCounters : public G1GenerationCounters {
public:
G1OldGenerationCounters(G1MonitoringSupport* g1mm, const char* name, size_t max_size)
: G1GenerationCounters(g1mm, name, 1 /* ordinal */, 1 /* spaces */,
G1MonitoringSupport::pad_capacity(0) /* min_capacity */,
G1MonitoringSupport::pad_capacity(g1mm->old_gen_max()),
G1MonitoringSupport::pad_capacity(0) /* curr_capacity */) {
if (UsePerfData) {
update_all();
G1MonitoringSupport::pad_capacity(0) /* min_capacity */,
G1MonitoringSupport::pad_capacity(max_size),
G1MonitoringSupport::pad_capacity(0) /* curr_capacity */) {
if (UsePerfData) {
update_all();
}
}
}
void G1YoungGenerationCounters::update_all() {
size_t committed =
G1MonitoringSupport::pad_capacity(_g1mm->young_gen_committed(), 3);
_current_size->set_value(committed);
}
void G1OldGenerationCounters::update_all() {
size_t committed =
G1MonitoringSupport::pad_capacity(_g1mm->old_gen_committed());
_current_size->set_value(committed);
}
virtual void update_all() {
size_t committed =
G1MonitoringSupport::pad_capacity(_g1mm->old_gen_committed());
_current_size->set_value(committed);
}
};
G1MonitoringSupport::G1MonitoringSupport(G1CollectedHeap* g1h) :
_g1h(g1h),
_incremental_memory_manager("G1 Young Generation", "end of minor GC"),
_full_gc_memory_manager("G1 Old Generation", "end of major GC"),
_eden_space_pool(NULL),
_survivor_space_pool(NULL),
_old_gen_pool(NULL),
_incremental_collection_counters(NULL),
_full_collection_counters(NULL),
_conc_collection_counters(NULL),
_young_collection_counters(NULL),
_old_collection_counters(NULL),
_young_gen_counters(NULL),
_old_gen_counters(NULL),
_old_space_counters(NULL),
_eden_counters(NULL),
_from_counters(NULL),
_to_counters(NULL),
_eden_space_counters(NULL),
_from_space_counters(NULL),
_to_space_counters(NULL),
_overall_reserved(0),
_overall_committed(0),
_overall_used(0),
_young_region_num(0),
_young_gen_committed(0),
_eden_committed(0),
_eden_used(0),
_survivor_committed(0),
_survivor_used(0),
_old_committed(0),
_old_used(0) {
_old_gen_committed(0),
_eden_space_committed(0),
_eden_space_used(0),
_survivor_space_committed(0),
_survivor_space_used(0),
_old_gen_used(0) {
_overall_reserved = g1h->max_capacity();
recalculate_sizes();
// Counters for GC collections
// Counters for garbage collections
//
// name "collector.0". In a generational collector this would be the
// young generation collection.
@ -117,77 +129,96 @@ G1MonitoringSupport::G1MonitoringSupport(G1CollectedHeap* g1h) :
_conc_collection_counters =
new CollectorCounters("G1 stop-the-world phases", 2);
// timer sampling for all counters supporting sampling only update the
// used value. See the take_sample() method. G1 requires both used and
// capacity updated so sampling is not currently used. It might
// be sufficient to update all counters in take_sample() even though
// take_sample() only returns "used". When sampling was used, there
// were some anomolous values emitted which may have been the consequence
// of not updating all values simultaneously (i.e., see the calculation done
// in eden_space_used(), is it possible that the values used to
// calculate either eden_used or survivor_used are being updated by
// the collector when the sample is being done?).
const bool sampled = false;
// "Generation" and "Space" counters.
//
// name "generation.1" This is logically the old generation in
// generational GC terms. The "1, 1" parameters are for
// the n-th generation (=1) with 1 space.
// Counters are created from minCapacity, maxCapacity, and capacity
_old_collection_counters = new G1OldGenerationCounters(this, "old");
_old_gen_counters = new G1OldGenerationCounters(this, "old", _g1h->max_capacity());
// name "generation.1.space.0"
// Counters are created from maxCapacity, capacity, initCapacity,
// and used.
_old_space_counters = new HSpaceCounters(_old_collection_counters->name_space(),
_old_space_counters = new HSpaceCounters(_old_gen_counters->name_space(),
"space", 0 /* ordinal */,
pad_capacity(overall_reserved()) /* max_capacity */,
pad_capacity(old_space_committed()) /* init_capacity */);
pad_capacity(g1h->max_capacity()) /* max_capacity */,
pad_capacity(_old_gen_committed) /* init_capacity */);
// Young collection set
// name "generation.0". This is logically the young generation.
// The "0, 3" are parameters for the n-th generation (=0) with 3 spaces.
// See _old_collection_counters for additional counters
_young_collection_counters = new G1YoungGenerationCounters(this, "young");
_young_gen_counters = new G1YoungGenerationCounters(this, "young", _g1h->max_capacity());
const char* young_collection_name_space = _young_collection_counters->name_space();
const char* young_collection_name_space = _young_gen_counters->name_space();
// name "generation.0.space.0"
// See _old_space_counters for additional counters
_eden_counters = new HSpaceCounters(young_collection_name_space,
_eden_space_counters = new HSpaceCounters(young_collection_name_space,
"eden", 0 /* ordinal */,
pad_capacity(overall_reserved()) /* max_capacity */,
pad_capacity(eden_space_committed()) /* init_capacity */);
pad_capacity(g1h->max_capacity()) /* max_capacity */,
pad_capacity(_eden_space_committed) /* init_capacity */);
// name "generation.0.space.1"
// See _old_space_counters for additional counters
// Set the arguments to indicate that this survivor space is not used.
_from_counters = new HSpaceCounters(young_collection_name_space,
_from_space_counters = new HSpaceCounters(young_collection_name_space,
"s0", 1 /* ordinal */,
pad_capacity(0) /* max_capacity */,
pad_capacity(0) /* init_capacity */);
// Given that this survivor space is not used, we update it here
// once to reflect that its used space is 0 so that we don't have to
// worry about updating it again later.
_from_space_counters->update_used(0);
// name "generation.0.space.2"
// See _old_space_counters for additional counters
_to_counters = new HSpaceCounters(young_collection_name_space,
_to_space_counters = new HSpaceCounters(young_collection_name_space,
"s1", 2 /* ordinal */,
pad_capacity(overall_reserved()) /* max_capacity */,
pad_capacity(survivor_space_committed()) /* init_capacity */);
pad_capacity(g1h->max_capacity()) /* max_capacity */,
pad_capacity(_survivor_space_committed) /* init_capacity */);
}
if (UsePerfData) {
// Given that this survivor space is not used, we update it here
// once to reflect that its used space is 0 so that we don't have to
// worry about updating it again later.
_from_counters->update_used(0);
}
G1MonitoringSupport::~G1MonitoringSupport() {
delete _eden_space_pool;
delete _survivor_space_pool;
delete _old_gen_pool;
}
void G1MonitoringSupport::initialize_serviceability() {
_eden_space_pool = new G1EdenPool(_g1h, _eden_space_committed);
_survivor_space_pool = new G1SurvivorPool(_g1h, _survivor_space_committed);
_old_gen_pool = new G1OldGenPool(_g1h, _old_gen_committed, _g1h->max_capacity());
_full_gc_memory_manager.add_pool(_eden_space_pool);
_full_gc_memory_manager.add_pool(_survivor_space_pool);
_full_gc_memory_manager.add_pool(_old_gen_pool);
_incremental_memory_manager.add_pool(_eden_space_pool);
_incremental_memory_manager.add_pool(_survivor_space_pool);
_incremental_memory_manager.add_pool(_old_gen_pool, false /* always_affected_by_gc */);
}
GrowableArray<GCMemoryManager*> G1MonitoringSupport::memory_managers() {
GrowableArray<GCMemoryManager*> memory_managers(2);
memory_managers.append(&_incremental_memory_manager);
memory_managers.append(&_full_gc_memory_manager);
return memory_managers;
}
GrowableArray<MemoryPool*> G1MonitoringSupport::memory_pools() {
GrowableArray<MemoryPool*> memory_pools(3);
memory_pools.append(_eden_space_pool);
memory_pools.append(_survivor_space_pool);
memory_pools.append(_old_gen_pool);
return memory_pools;
}
void G1MonitoringSupport::recalculate_sizes() {
// Recalculate all the sizes from scratch. We assume that this is
// called at a point where no concurrent updates to the various
// values we read here are possible (i.e., at a STW phase at the end
// of a GC).
assert_heap_locked_or_at_safepoint(true);
// Recalculate all the sizes from scratch.
uint young_list_length = _g1h->young_regions_count();
uint survivor_list_length = _g1h->survivor_regions_count();
@ -200,14 +231,13 @@ void G1MonitoringSupport::recalculate_sizes() {
uint eden_list_max_length = young_list_max_length - survivor_list_length;
_overall_used = _g1h->used_unlocked();
_eden_used = (size_t) eden_list_length * HeapRegion::GrainBytes;
_survivor_used = (size_t) survivor_list_length * HeapRegion::GrainBytes;
_young_region_num = young_list_length;
_old_used = subtract_up_to_zero(_overall_used, _eden_used + _survivor_used);
_eden_space_used = (size_t) eden_list_length * HeapRegion::GrainBytes;
_survivor_space_used = (size_t) survivor_list_length * HeapRegion::GrainBytes;
_old_gen_used = subtract_up_to_zero(_overall_used, _eden_space_used + _survivor_space_used);
// First calculate the committed sizes that can be calculated independently.
_survivor_committed = _survivor_used;
_old_committed = HeapRegion::align_up_to_region_byte_size(_old_used);
_survivor_space_committed = _survivor_space_used;
_old_gen_committed = HeapRegion::align_up_to_region_byte_size(_old_gen_used);
// Next, start with the overall committed size.
_overall_committed = _g1h->capacity();
@ -215,70 +245,64 @@ void G1MonitoringSupport::recalculate_sizes() {
// Remove the committed size we have calculated so far (for the
// survivor and old space).
assert(committed >= (_survivor_committed + _old_committed), "sanity");
committed -= _survivor_committed + _old_committed;
assert(committed >= (_survivor_space_committed + _old_gen_committed), "sanity");
committed -= _survivor_space_committed + _old_gen_committed;
// Next, calculate and remove the committed size for the eden.
_eden_committed = (size_t) eden_list_max_length * HeapRegion::GrainBytes;
_eden_space_committed = (size_t) eden_list_max_length * HeapRegion::GrainBytes;
// Somewhat defensive: be robust in case there are inaccuracies in
// the calculations
_eden_committed = MIN2(_eden_committed, committed);
committed -= _eden_committed;
_eden_space_committed = MIN2(_eden_space_committed, committed);
committed -= _eden_space_committed;
// Finally, give the rest to the old space...
_old_committed += committed;
_old_gen_committed += committed;
// ..and calculate the young gen committed.
_young_gen_committed = _eden_committed + _survivor_committed;
_young_gen_committed = _eden_space_committed + _survivor_space_committed;
assert(_overall_committed ==
(_eden_committed + _survivor_committed + _old_committed),
(_eden_space_committed + _survivor_space_committed + _old_gen_committed),
"the committed sizes should add up");
// Somewhat defensive: cap the eden used size to make sure it
// never exceeds the committed size.
_eden_used = MIN2(_eden_used, _eden_committed);
_eden_space_used = MIN2(_eden_space_used, _eden_space_committed);
// _survivor_committed and _old_committed are calculated in terms of
// the corresponding _*_used value, so the next two conditions
// should hold.
assert(_survivor_used <= _survivor_committed, "post-condition");
assert(_old_used <= _old_committed, "post-condition");
}
void G1MonitoringSupport::recalculate_eden_size() {
// When a new eden region is allocated, only the eden_used size is
// affected (since we have recalculated everything else at the last GC).
uint young_region_num = _g1h->young_regions_count();
if (young_region_num > _young_region_num) {
uint diff = young_region_num - _young_region_num;
_eden_used += (size_t) diff * HeapRegion::GrainBytes;
// Somewhat defensive: cap the eden used size to make sure it
// never exceeds the committed size.
_eden_used = MIN2(_eden_used, _eden_committed);
_young_region_num = young_region_num;
}
assert(_survivor_space_used <= _survivor_space_committed, "post-condition");
assert(_old_gen_used <= _old_gen_committed, "post-condition");
}
void G1MonitoringSupport::update_sizes() {
recalculate_sizes();
if (UsePerfData) {
eden_counters()->update_capacity(pad_capacity(eden_space_committed()));
eden_counters()->update_used(eden_space_used());
// only the to survivor space (s1) is active, so we don't need to
// update the counters for the from survivor space (s0)
to_counters()->update_capacity(pad_capacity(survivor_space_committed()));
to_counters()->update_used(survivor_space_used());
old_space_counters()->update_capacity(pad_capacity(old_space_committed()));
old_space_counters()->update_used(old_space_used());
old_collection_counters()->update_all();
young_collection_counters()->update_all();
_eden_space_counters->update_capacity(pad_capacity(_eden_space_committed));
_eden_space_counters->update_used(_eden_space_used);
// only the "to" survivor space is active, so we don't need to
// update the counters for the "from" survivor space
_to_space_counters->update_capacity(pad_capacity(_survivor_space_committed));
_to_space_counters->update_used(_survivor_space_used);
_old_space_counters->update_capacity(pad_capacity(_old_gen_committed));
_old_space_counters->update_used(_old_gen_used);
_young_gen_counters->update_all();
_old_gen_counters->update_all();
MetaspaceCounters::update_performance_counters();
CompressedClassSpaceCounters::update_performance_counters();
}
}
void G1MonitoringSupport::update_eden_size() {
recalculate_eden_size();
// Recalculate everything - this is fast enough.
recalculate_sizes();
if (UsePerfData) {
eden_counters()->update_used(eden_space_used());
_eden_space_counters->update_used(_eden_space_used);
}
}
G1MonitoringScope::G1MonitoringScope(G1MonitoringSupport* g1mm, bool full_gc, bool all_memory_pools_affected) :
_tcs(full_gc ? g1mm->_full_collection_counters : g1mm->_incremental_collection_counters),
_tms(full_gc ? &g1mm->_full_gc_memory_manager : &g1mm->_incremental_memory_manager,
G1CollectedHeap::heap()->gc_cause(), all_memory_pools_affected) {
}

View File

@ -25,11 +25,15 @@
#ifndef SHARE_VM_GC_G1_G1MONITORINGSUPPORT_HPP
#define SHARE_VM_GC_G1_G1MONITORINGSUPPORT_HPP
#include "gc/shared/collectorCounters.hpp"
#include "gc/shared/generationCounters.hpp"
#include "services/memoryManager.hpp"
#include "services/memoryService.hpp"
class CollectorCounters;
class G1CollectedHeap;
class HSpaceCounters;
class MemoryPool;
// Class for monitoring logical spaces in G1. It provides data for
// both G1's jstat counters as well as G1's memory pools.
@ -116,9 +120,18 @@ class HSpaceCounters;
class G1MonitoringSupport : public CHeapObj<mtGC> {
friend class VMStructs;
friend class G1MonitoringScope;
G1CollectedHeap* _g1h;
// java.lang.management MemoryManager and MemoryPool support
GCMemoryManager _incremental_memory_manager;
GCMemoryManager _full_gc_memory_manager;
MemoryPool* _eden_space_pool;
MemoryPool* _survivor_space_pool;
MemoryPool* _old_gen_pool;
// jstat performance counters
// incremental collections both young and mixed
CollectorCounters* _incremental_collection_counters;
@ -129,37 +142,36 @@ class G1MonitoringSupport : public CHeapObj<mtGC> {
// young collection set counters. The _eden_counters,
// _from_counters, and _to_counters are associated with
// this "generational" counter.
GenerationCounters* _young_collection_counters;
GenerationCounters* _young_gen_counters;
// old collection set counters. The _old_space_counters
// below are associated with this "generational" counter.
GenerationCounters* _old_collection_counters;
GenerationCounters* _old_gen_counters;
// Counters for the capacity and used for
// the whole heap
HSpaceCounters* _old_space_counters;
// the young collection
HSpaceCounters* _eden_counters;
HSpaceCounters* _eden_space_counters;
// the survivor collection (only one, _to_counters, is actively used)
HSpaceCounters* _from_counters;
HSpaceCounters* _to_counters;
HSpaceCounters* _from_space_counters;
HSpaceCounters* _to_space_counters;
// When it's appropriate to recalculate the various sizes (at the
// end of a GC, when a new eden region is allocated, etc.) we store
// them here so that we can easily report them when needed and not
// have to recalculate them every time.
size_t _overall_reserved;
size_t _overall_committed;
size_t _overall_used;
uint _young_region_num;
size_t _young_gen_committed;
size_t _eden_committed;
size_t _eden_used;
size_t _survivor_committed;
size_t _survivor_used;
size_t _old_gen_committed;
size_t _old_committed;
size_t _old_used;
size_t _eden_space_committed;
size_t _eden_space_used;
size_t _survivor_space_committed;
size_t _survivor_space_used;
size_t _old_gen_used;
// It returns x - y if x > y, 0 otherwise.
// As described in the comment above, some of the inputs to the
@ -178,11 +190,16 @@ class G1MonitoringSupport : public CHeapObj<mtGC> {
// Recalculate all the sizes.
void recalculate_sizes();
// Recalculate only what's necessary when a new eden region is allocated.
void recalculate_eden_size();
public:
public:
G1MonitoringSupport(G1CollectedHeap* g1h);
~G1MonitoringSupport();
void initialize_serviceability();
GrowableArray<GCMemoryManager*> memory_managers();
GrowableArray<MemoryPool*> memory_pools();
// Unfortunately, the jstat tool assumes that no space has 0
// capacity. In our case, given that each space is logical, it's
@ -202,73 +219,35 @@ class G1MonitoringSupport : public CHeapObj<mtGC> {
// Recalculate all the sizes from scratch and update all the jstat
// counters accordingly.
void update_sizes();
// Recalculate only what's necessary when a new eden region is
// allocated and update any jstat counters that need to be updated.
void update_eden_size();
CollectorCounters* incremental_collection_counters() {
return _incremental_collection_counters;
}
CollectorCounters* full_collection_counters() {
return _full_collection_counters;
}
CollectorCounters* conc_collection_counters() {
return _conc_collection_counters;
}
GenerationCounters* young_collection_counters() {
return _young_collection_counters;
}
GenerationCounters* old_collection_counters() {
return _old_collection_counters;
}
HSpaceCounters* old_space_counters() { return _old_space_counters; }
HSpaceCounters* eden_counters() { return _eden_counters; }
HSpaceCounters* from_counters() { return _from_counters; }
HSpaceCounters* to_counters() { return _to_counters; }
// Monitoring support used by
// MemoryService
// jstat counters
// Tracing
size_t overall_reserved() { return _overall_reserved; }
size_t overall_committed() { return _overall_committed; }
size_t overall_used() { return _overall_used; }
size_t young_gen_committed() { return _young_gen_committed; }
size_t young_gen_committed() { return _young_gen_committed; }
size_t young_gen_max() { return overall_reserved(); }
size_t eden_space_committed() { return _eden_committed; }
size_t eden_space_used() { return _eden_used; }
size_t survivor_space_committed() { return _survivor_committed; }
size_t survivor_space_used() { return _survivor_used; }
size_t eden_space_committed() { return _eden_space_committed; }
size_t eden_space_used() { return _eden_space_used; }
size_t survivor_space_committed() { return _survivor_space_committed; }
size_t survivor_space_used() { return _survivor_space_used; }
size_t old_gen_committed() { return old_space_committed(); }
size_t old_gen_max() { return overall_reserved(); }
size_t old_space_committed() { return _old_committed; }
size_t old_space_used() { return _old_used; }
size_t old_gen_committed() { return _old_gen_committed; }
size_t old_gen_used() { return _old_gen_used; }
};
class G1GenerationCounters: public GenerationCounters {
protected:
G1MonitoringSupport* _g1mm;
// Scope object for java.lang.management support.
class G1MonitoringScope : public StackObj {
TraceCollectorStats _tcs;
TraceMemoryManagerStats _tms;
public:
G1GenerationCounters(G1MonitoringSupport* g1mm,
const char* name, int ordinal, int spaces,
size_t min_capacity, size_t max_capacity,
size_t curr_capacity);
};
class G1YoungGenerationCounters: public G1GenerationCounters {
public:
G1YoungGenerationCounters(G1MonitoringSupport* g1mm, const char* name);
virtual void update_all();
};
class G1OldGenerationCounters: public G1GenerationCounters {
public:
G1OldGenerationCounters(G1MonitoringSupport* g1mm, const char* name);
virtual void update_all();
G1MonitoringScope(G1MonitoringSupport* g1mm, bool full_gc, bool all_memory_pools_affected);
};
#endif // SHARE_VM_GC_G1_G1MONITORINGSUPPORT_HPP

View File

@ -132,7 +132,7 @@ private:
virtual bool do_heap_region(HeapRegion* r) {
uint hrm_index = r->hrm_index();
if (!r->in_collection_set() && r->is_old_or_humongous()) {
if (!r->in_collection_set() && r->is_old_or_humongous_or_archive()) {
_scan_top[hrm_index] = r->top();
} else {
_scan_top[hrm_index] = r->bottom();
@ -571,7 +571,7 @@ void G1RemSet::refine_card_concurrently(jbyte* card_ptr,
// In the normal (non-stale) case, the synchronization between the
// enqueueing of the card and processing it here will have ensured
// we see the up-to-date region type here.
if (!r->is_old_or_humongous()) {
if (!r->is_old_or_humongous_or_archive()) {
return;
}
@ -600,7 +600,7 @@ void G1RemSet::refine_card_concurrently(jbyte* card_ptr,
// Check whether the region formerly in the cache should be
// ignored, as discussed earlier for the original card. The
// region could have been freed while in the cache.
if (!r->is_old_or_humongous()) {
if (!r->is_old_or_humongous_or_archive()) {
return;
}
} // Else we still have the original card.

View File

@ -226,6 +226,7 @@ private:
RegionTypeCounter _humongous;
RegionTypeCounter _free;
RegionTypeCounter _old;
RegionTypeCounter _archive;
RegionTypeCounter _all;
size_t _max_rs_mem_sz;
@ -248,7 +249,7 @@ private:
public:
HRRSStatsIter() : _young("Young"), _humongous("Humongous"),
_free("Free"), _old("Old"), _all("All"),
_free("Free"), _old("Old"), _archive("Archive"), _all("All"),
_max_rs_mem_sz(0), _max_rs_mem_sz_region(NULL),
_max_code_root_mem_sz(0), _max_code_root_mem_sz_region(NULL)
{}
@ -280,6 +281,8 @@ public:
current = &_humongous;
} else if (r->is_old()) {
current = &_old;
} else if (r->is_archive()) {
current = &_archive;
} else {
ShouldNotReachHere();
}
@ -290,7 +293,7 @@ public:
}
void print_summary_on(outputStream* out) {
RegionTypeCounter* counters[] = { &_young, &_humongous, &_free, &_old, NULL };
RegionTypeCounter* counters[] = { &_young, &_humongous, &_free, &_old, &_archive, NULL };
out->print_cr(" Current rem set statistics");
out->print_cr(" Total per region rem sets sizes = " SIZE_FORMAT "%s."

View File

@ -141,8 +141,9 @@ bool G1RemSetTrackingPolicy::update_before_rebuild(HeapRegion* r, size_t live_by
void G1RemSetTrackingPolicy::update_after_rebuild(HeapRegion* r) {
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
if (r->is_old_or_humongous()) {
if (r->is_old_or_humongous_or_archive()) {
if (r->rem_set()->is_updating()) {
assert(!r->is_archive(), "Archive region %u with remembered set", r->hrm_index());
r->rem_set()->set_state_complete();
}
G1CollectedHeap* g1h = G1CollectedHeap::heap();

View File

@ -27,7 +27,7 @@
#include "gc/g1/g1SATBMarkQueueSet.hpp"
#include "gc/g1/g1ThreadLocalData.hpp"
#include "gc/g1/heapRegion.hpp"
#include "gc/g1/satbMarkQueue.hpp"
#include "gc/shared/satbMarkQueue.hpp"
#include "oops/oop.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"

View File

@ -25,7 +25,7 @@
#ifndef SHARE_VM_GC_G1_G1SATBMARKQUEUE_HPP
#define SHARE_VM_GC_G1_G1SATBMARKQUEUE_HPP
#include "gc/g1/satbMarkQueue.hpp"
#include "gc/shared/satbMarkQueue.hpp"
class G1CollectedHeap;
class JavaThread;

View File

@ -26,7 +26,7 @@
#include "gc/g1/dirtyCardQueue.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/satbMarkQueue.hpp"
#include "gc/shared/satbMarkQueue.hpp"
#include "runtime/thread.hpp"
#include "utilities/debug.hpp"
#include "utilities/sizes.hpp"

View File

@ -426,6 +426,8 @@ class HeapRegion: public G1ContiguousSpace {
bool is_old_or_humongous() const { return _type.is_old_or_humongous(); }
bool is_old_or_humongous_or_archive() const { return _type.is_old_or_humongous_or_archive(); }
// A pinned region contains objects which are not moved by garbage collections.
// Humongous regions and archive regions are pinned.
bool is_pinned() const { return _type.is_pinned(); }

Some files were not shown because too many files have changed in this diff Show More