mirror of
https://github.com/openjdk/jdk.git
synced 2026-02-10 10:28:37 +00:00
Merge
This commit is contained in:
commit
f8d533e6b6
@ -67,7 +67,6 @@ MAPFILE = $(GAMMADIR)/make/aix/makefiles/mapfile-vers-debug
|
||||
# not justified.
|
||||
LFLAGS_QIPA=
|
||||
|
||||
G_SUFFIX = _g
|
||||
VERSION = optimized
|
||||
SYSDEFS += -DASSERT -DFASTDEBUG
|
||||
PICFLAGS = DEFAULT
|
||||
|
||||
@ -117,7 +117,6 @@ SUNWprivate_1.1 {
|
||||
JVM_GetClassDeclaredMethods;
|
||||
JVM_GetClassFieldsCount;
|
||||
JVM_GetClassInterfaces;
|
||||
JVM_GetClassLoader;
|
||||
JVM_GetClassMethodsCount;
|
||||
JVM_GetClassModifiers;
|
||||
JVM_GetClassName;
|
||||
|
||||
@ -117,7 +117,6 @@ SUNWprivate_1.1 {
|
||||
JVM_GetClassDeclaredMethods;
|
||||
JVM_GetClassFieldsCount;
|
||||
JVM_GetClassInterfaces;
|
||||
JVM_GetClassLoader;
|
||||
JVM_GetClassMethodsCount;
|
||||
JVM_GetClassModifiers;
|
||||
JVM_GetClassName;
|
||||
|
||||
@ -115,7 +115,6 @@
|
||||
_JVM_GetClassDeclaredMethods
|
||||
_JVM_GetClassFieldsCount
|
||||
_JVM_GetClassInterfaces
|
||||
_JVM_GetClassLoader
|
||||
_JVM_GetClassMethodsCount
|
||||
_JVM_GetClassModifiers
|
||||
_JVM_GetClassName
|
||||
|
||||
@ -115,7 +115,6 @@
|
||||
_JVM_GetClassDeclaredMethods
|
||||
_JVM_GetClassFieldsCount
|
||||
_JVM_GetClassInterfaces
|
||||
_JVM_GetClassLoader
|
||||
_JVM_GetClassMethodsCount
|
||||
_JVM_GetClassModifiers
|
||||
_JVM_GetClassName
|
||||
|
||||
@ -117,7 +117,6 @@ SUNWprivate_1.1 {
|
||||
JVM_GetClassDeclaredMethods;
|
||||
JVM_GetClassFieldsCount;
|
||||
JVM_GetClassInterfaces;
|
||||
JVM_GetClassLoader;
|
||||
JVM_GetClassMethodsCount;
|
||||
JVM_GetClassModifiers;
|
||||
JVM_GetClassName;
|
||||
|
||||
@ -117,7 +117,6 @@ SUNWprivate_1.1 {
|
||||
JVM_GetClassDeclaredMethods;
|
||||
JVM_GetClassFieldsCount;
|
||||
JVM_GetClassInterfaces;
|
||||
JVM_GetClassLoader;
|
||||
JVM_GetClassMethodsCount;
|
||||
JVM_GetClassModifiers;
|
||||
JVM_GetClassName;
|
||||
|
||||
@ -117,7 +117,6 @@ SUNWprivate_1.1 {
|
||||
JVM_GetClassDeclaredMethods;
|
||||
JVM_GetClassFieldsCount;
|
||||
JVM_GetClassInterfaces;
|
||||
JVM_GetClassLoader;
|
||||
JVM_GetClassMethodsCount;
|
||||
JVM_GetClassModifiers;
|
||||
JVM_GetClassName;
|
||||
|
||||
@ -117,7 +117,6 @@ SUNWprivate_1.1 {
|
||||
JVM_GetClassDeclaredMethods;
|
||||
JVM_GetClassFieldsCount;
|
||||
JVM_GetClassInterfaces;
|
||||
JVM_GetClassLoader;
|
||||
JVM_GetClassMethodsCount;
|
||||
JVM_GetClassModifiers;
|
||||
JVM_GetClassName;
|
||||
|
||||
@ -117,7 +117,6 @@ SUNWprivate_1.1 {
|
||||
JVM_GetClassDeclaredMethods;
|
||||
JVM_GetClassFieldsCount;
|
||||
JVM_GetClassInterfaces;
|
||||
JVM_GetClassLoader;
|
||||
JVM_GetClassMethodsCount;
|
||||
JVM_GetClassModifiers;
|
||||
JVM_GetClassName;
|
||||
|
||||
@ -268,8 +268,35 @@ class Assembler : public AbstractAssembler {
|
||||
|
||||
ISEL_OPCODE = (31u << OPCODE_SHIFT | 15u << 1),
|
||||
|
||||
MTLR_OPCODE = (31u << OPCODE_SHIFT | 467u << 1 | 8 << SPR_0_4_SHIFT),
|
||||
MFLR_OPCODE = (31u << OPCODE_SHIFT | 339u << 1 | 8 << SPR_0_4_SHIFT),
|
||||
// Special purpose registers
|
||||
MTSPR_OPCODE = (31u << OPCODE_SHIFT | 467u << 1),
|
||||
MFSPR_OPCODE = (31u << OPCODE_SHIFT | 339u << 1),
|
||||
|
||||
MTXER_OPCODE = (MTSPR_OPCODE | 1 << SPR_0_4_SHIFT),
|
||||
MFXER_OPCODE = (MFSPR_OPCODE | 1 << SPR_0_4_SHIFT),
|
||||
|
||||
MTDSCR_OPCODE = (MTSPR_OPCODE | 3 << SPR_0_4_SHIFT),
|
||||
MFDSCR_OPCODE = (MFSPR_OPCODE | 3 << SPR_0_4_SHIFT),
|
||||
|
||||
MTLR_OPCODE = (MTSPR_OPCODE | 8 << SPR_0_4_SHIFT),
|
||||
MFLR_OPCODE = (MFSPR_OPCODE | 8 << SPR_0_4_SHIFT),
|
||||
|
||||
MTCTR_OPCODE = (MTSPR_OPCODE | 9 << SPR_0_4_SHIFT),
|
||||
MFCTR_OPCODE = (MFSPR_OPCODE | 9 << SPR_0_4_SHIFT),
|
||||
|
||||
MTTFHAR_OPCODE = (MTSPR_OPCODE | 128 << SPR_0_4_SHIFT),
|
||||
MFTFHAR_OPCODE = (MFSPR_OPCODE | 128 << SPR_0_4_SHIFT),
|
||||
MTTFIAR_OPCODE = (MTSPR_OPCODE | 129 << SPR_0_4_SHIFT),
|
||||
MFTFIAR_OPCODE = (MFSPR_OPCODE | 129 << SPR_0_4_SHIFT),
|
||||
MTTEXASR_OPCODE = (MTSPR_OPCODE | 130 << SPR_0_4_SHIFT),
|
||||
MFTEXASR_OPCODE = (MFSPR_OPCODE | 130 << SPR_0_4_SHIFT),
|
||||
MTTEXASRU_OPCODE = (MTSPR_OPCODE | 131 << SPR_0_4_SHIFT),
|
||||
MFTEXASRU_OPCODE = (MFSPR_OPCODE | 131 << SPR_0_4_SHIFT),
|
||||
|
||||
MTVRSAVE_OPCODE = (MTSPR_OPCODE | 256 << SPR_0_4_SHIFT),
|
||||
MFVRSAVE_OPCODE = (MFSPR_OPCODE | 256 << SPR_0_4_SHIFT),
|
||||
|
||||
MFTB_OPCODE = (MFSPR_OPCODE | 268 << SPR_0_4_SHIFT),
|
||||
|
||||
MTCRF_OPCODE = (31u << OPCODE_SHIFT | 144u << 1),
|
||||
MFCR_OPCODE = (31u << OPCODE_SHIFT | 19u << 1),
|
||||
@ -291,9 +318,6 @@ class Assembler : public AbstractAssembler {
|
||||
|
||||
// CTR-related opcodes
|
||||
BCCTR_OPCODE = (19u << OPCODE_SHIFT | 528u << 1),
|
||||
MTCTR_OPCODE = (31u << OPCODE_SHIFT | 467u << 1 | 9 << SPR_0_4_SHIFT),
|
||||
MFCTR_OPCODE = (31u << OPCODE_SHIFT | 339u << 1 | 9 << SPR_0_4_SHIFT),
|
||||
|
||||
|
||||
LWZ_OPCODE = (32u << OPCODE_SHIFT),
|
||||
LWZX_OPCODE = (31u << OPCODE_SHIFT | 23u << 1),
|
||||
@ -585,6 +609,37 @@ class Assembler : public AbstractAssembler {
|
||||
MTVSCR_OPCODE = (4u << OPCODE_SHIFT | 1604u ),
|
||||
MFVSCR_OPCODE = (4u << OPCODE_SHIFT | 1540u ),
|
||||
|
||||
// AES (introduced with Power 8)
|
||||
VCIPHER_OPCODE = (4u << OPCODE_SHIFT | 1288u),
|
||||
VCIPHERLAST_OPCODE = (4u << OPCODE_SHIFT | 1289u),
|
||||
VNCIPHER_OPCODE = (4u << OPCODE_SHIFT | 1352u),
|
||||
VNCIPHERLAST_OPCODE = (4u << OPCODE_SHIFT | 1353u),
|
||||
VSBOX_OPCODE = (4u << OPCODE_SHIFT | 1480u),
|
||||
|
||||
// SHA (introduced with Power 8)
|
||||
VSHASIGMAD_OPCODE = (4u << OPCODE_SHIFT | 1730u),
|
||||
VSHASIGMAW_OPCODE = (4u << OPCODE_SHIFT | 1666u),
|
||||
|
||||
// Vector Binary Polynomial Multiplication (introduced with Power 8)
|
||||
VPMSUMB_OPCODE = (4u << OPCODE_SHIFT | 1032u),
|
||||
VPMSUMD_OPCODE = (4u << OPCODE_SHIFT | 1224u),
|
||||
VPMSUMH_OPCODE = (4u << OPCODE_SHIFT | 1096u),
|
||||
VPMSUMW_OPCODE = (4u << OPCODE_SHIFT | 1160u),
|
||||
|
||||
// Vector Permute and Xor (introduced with Power 8)
|
||||
VPERMXOR_OPCODE = (4u << OPCODE_SHIFT | 45u),
|
||||
|
||||
// Transactional Memory instructions (introduced with Power 8)
|
||||
TBEGIN_OPCODE = (31u << OPCODE_SHIFT | 654u << 1),
|
||||
TEND_OPCODE = (31u << OPCODE_SHIFT | 686u << 1),
|
||||
TABORT_OPCODE = (31u << OPCODE_SHIFT | 910u << 1),
|
||||
TABORTWC_OPCODE = (31u << OPCODE_SHIFT | 782u << 1),
|
||||
TABORTWCI_OPCODE = (31u << OPCODE_SHIFT | 846u << 1),
|
||||
TABORTDC_OPCODE = (31u << OPCODE_SHIFT | 814u << 1),
|
||||
TABORTDCI_OPCODE = (31u << OPCODE_SHIFT | 878u << 1),
|
||||
TSR_OPCODE = (31u << OPCODE_SHIFT | 750u << 1),
|
||||
TCHECK_OPCODE = (31u << OPCODE_SHIFT | 718u << 1),
|
||||
|
||||
// Icache and dcache related instructions
|
||||
DCBA_OPCODE = (31u << OPCODE_SHIFT | 758u << 1),
|
||||
DCBZ_OPCODE = (31u << OPCODE_SHIFT | 1014u << 1),
|
||||
@ -1420,6 +1475,25 @@ class Assembler : public AbstractAssembler {
|
||||
inline void mcrf( ConditionRegister crd, ConditionRegister cra);
|
||||
inline void mtcr( Register s);
|
||||
|
||||
// Special purpose registers
|
||||
// Exception Register
|
||||
inline void mtxer(Register s1);
|
||||
inline void mfxer(Register d);
|
||||
// Vector Register Save Register
|
||||
inline void mtvrsave(Register s1);
|
||||
inline void mfvrsave(Register d);
|
||||
// Timebase
|
||||
inline void mftb(Register d);
|
||||
// Introduced with Power 8:
|
||||
// Data Stream Control Register
|
||||
inline void mtdscr(Register s1);
|
||||
inline void mfdscr(Register d );
|
||||
// Transactional Memory Registers
|
||||
inline void mftfhar(Register d);
|
||||
inline void mftfiar(Register d);
|
||||
inline void mftexasr(Register d);
|
||||
inline void mftexasru(Register d);
|
||||
|
||||
// PPC 1, section 2.4.1 Branch Instructions
|
||||
inline void b( address a, relocInfo::relocType rt = relocInfo::none);
|
||||
inline void b( Label& L);
|
||||
@ -1860,6 +1934,39 @@ class Assembler : public AbstractAssembler {
|
||||
inline void mtvscr( VectorRegister b);
|
||||
inline void mfvscr( VectorRegister d);
|
||||
|
||||
// AES (introduced with Power 8)
|
||||
inline void vcipher( VectorRegister d, VectorRegister a, VectorRegister b);
|
||||
inline void vcipherlast( VectorRegister d, VectorRegister a, VectorRegister b);
|
||||
inline void vncipher( VectorRegister d, VectorRegister a, VectorRegister b);
|
||||
inline void vncipherlast(VectorRegister d, VectorRegister a, VectorRegister b);
|
||||
inline void vsbox( VectorRegister d, VectorRegister a);
|
||||
|
||||
// SHA (introduced with Power 8)
|
||||
// Not yet implemented.
|
||||
|
||||
// Vector Binary Polynomial Multiplication (introduced with Power 8)
|
||||
inline void vpmsumb( VectorRegister d, VectorRegister a, VectorRegister b);
|
||||
inline void vpmsumd( VectorRegister d, VectorRegister a, VectorRegister b);
|
||||
inline void vpmsumh( VectorRegister d, VectorRegister a, VectorRegister b);
|
||||
inline void vpmsumw( VectorRegister d, VectorRegister a, VectorRegister b);
|
||||
|
||||
// Vector Permute and Xor (introduced with Power 8)
|
||||
inline void vpermxor( VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c);
|
||||
|
||||
// Transactional Memory instructions (introduced with Power 8)
|
||||
inline void tbegin_(); // R=0
|
||||
inline void tbeginrot_(); // R=1 Rollback-Only Transaction
|
||||
inline void tend_(); // A=0
|
||||
inline void tendall_(); // A=1
|
||||
inline void tabort_(Register a);
|
||||
inline void tabortwc_(int t, Register a, Register b);
|
||||
inline void tabortwci_(int t, Register a, int si);
|
||||
inline void tabortdc_(int t, Register a, Register b);
|
||||
inline void tabortdci_(int t, Register a, int si);
|
||||
inline void tsuspend_(); // tsr with L=0
|
||||
inline void tresume_(); // tsr with L=1
|
||||
inline void tcheck(int f);
|
||||
|
||||
// The following encoders use r0 as second operand. These instructions
|
||||
// read r0 as '0'.
|
||||
inline void lwzx( Register d, Register s2);
|
||||
|
||||
@ -312,6 +312,25 @@ inline void Assembler::mcrf( ConditionRegister crd, ConditionRegister cra)
|
||||
{ emit_int32(MCRF_OPCODE | bf(crd) | bfa(cra)); }
|
||||
inline void Assembler::mtcr( Register s) { Assembler::mtcrf(0xff, s); }
|
||||
|
||||
// Special purpose registers
|
||||
// Exception Register
|
||||
inline void Assembler::mtxer(Register s1) { emit_int32(MTXER_OPCODE | rs(s1)); }
|
||||
inline void Assembler::mfxer(Register d ) { emit_int32(MFXER_OPCODE | rt(d)); }
|
||||
// Vector Register Save Register
|
||||
inline void Assembler::mtvrsave(Register s1) { emit_int32(MTVRSAVE_OPCODE | rs(s1)); }
|
||||
inline void Assembler::mfvrsave(Register d ) { emit_int32(MFVRSAVE_OPCODE | rt(d)); }
|
||||
// Timebase
|
||||
inline void Assembler::mftb(Register d ) { emit_int32(MFTB_OPCODE | rt(d)); }
|
||||
// Introduced with Power 8:
|
||||
// Data Stream Control Register
|
||||
inline void Assembler::mtdscr(Register s1) { emit_int32(MTDSCR_OPCODE | rs(s1)); }
|
||||
inline void Assembler::mfdscr(Register d ) { emit_int32(MFDSCR_OPCODE | rt(d)); }
|
||||
// Transactional Memory Registers
|
||||
inline void Assembler::mftfhar(Register d ) { emit_int32(MFTFHAR_OPCODE | rt(d)); }
|
||||
inline void Assembler::mftfiar(Register d ) { emit_int32(MFTFIAR_OPCODE | rt(d)); }
|
||||
inline void Assembler::mftexasr(Register d ) { emit_int32(MFTEXASR_OPCODE | rt(d)); }
|
||||
inline void Assembler::mftexasru(Register d ) { emit_int32(MFTEXASRU_OPCODE | rt(d)); }
|
||||
|
||||
// SAP JVM 2006-02-13 PPC branch instruction.
|
||||
// PPC 1, section 2.4.1 Branch Instructions
|
||||
inline void Assembler::b( address a, relocInfo::relocType rt) { emit_data(BXX_OPCODE| li(disp( intptr_t(a), intptr_t(pc()))) |aa(0)|lk(0), rt); }
|
||||
@ -735,6 +754,39 @@ inline void Assembler::vsrah( VectorRegister d, VectorRegister a, VectorRegist
|
||||
inline void Assembler::mtvscr( VectorRegister b) { emit_int32( MTVSCR_OPCODE | vrb(b)); }
|
||||
inline void Assembler::mfvscr( VectorRegister d) { emit_int32( MFVSCR_OPCODE | vrt(d)); }
|
||||
|
||||
// AES (introduced with Power 8)
|
||||
inline void Assembler::vcipher( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCIPHER_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vcipherlast( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCIPHERLAST_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vncipher( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VNCIPHER_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vncipherlast(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VNCIPHERLAST_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vsbox( VectorRegister d, VectorRegister a) { emit_int32( VSBOX_OPCODE | vrt(d) | vra(a) ); }
|
||||
|
||||
// SHA (introduced with Power 8)
|
||||
// Not yet implemented.
|
||||
|
||||
// Vector Binary Polynomial Multiplication (introduced with Power 8)
|
||||
inline void Assembler::vpmsumb( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPMSUMB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vpmsumd( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPMSUMD_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vpmsumh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPMSUMH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
inline void Assembler::vpmsumw( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPMSUMW_OPCODE | vrt(d) | vra(a) | vrb(b)); }
|
||||
|
||||
// Vector Permute and Xor (introduced with Power 8)
|
||||
inline void Assembler::vpermxor( VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c) { emit_int32( VPMSUMW_OPCODE | vrt(d) | vra(a) | vrb(b) | vrc(c)); }
|
||||
|
||||
// Transactional Memory instructions (introduced with Power 8)
|
||||
inline void Assembler::tbegin_() { emit_int32( TBEGIN_OPCODE | rc(1)); }
|
||||
inline void Assembler::tbeginrot_() { emit_int32( TBEGIN_OPCODE | /*R=1*/ 1u << (31-10) | rc(1)); }
|
||||
inline void Assembler::tend_() { emit_int32( TEND_OPCODE | rc(1)); }
|
||||
inline void Assembler::tendall_() { emit_int32( TEND_OPCODE | /*A=1*/ 1u << (31-6) | rc(1)); }
|
||||
inline void Assembler::tabort_(Register a) { emit_int32( TABORT_OPCODE | ra(a) | rc(1)); }
|
||||
inline void Assembler::tabortwc_(int t, Register a, Register b) { emit_int32( TABORTWC_OPCODE | to(t) | ra(a) | rb(b) | rc(1)); }
|
||||
inline void Assembler::tabortwci_(int t, Register a, int si) { emit_int32( TABORTWCI_OPCODE | to(t) | ra(a) | sh1620(si) | rc(1)); }
|
||||
inline void Assembler::tabortdc_(int t, Register a, Register b) { emit_int32( TABORTDC_OPCODE | to(t) | ra(a) | rb(b) | rc(1)); }
|
||||
inline void Assembler::tabortdci_(int t, Register a, int si) { emit_int32( TABORTDCI_OPCODE | to(t) | ra(a) | sh1620(si) | rc(1)); }
|
||||
inline void Assembler::tsuspend_() { emit_int32( TSR_OPCODE | rc(1)); }
|
||||
inline void Assembler::tresume_() { emit_int32( TSR_OPCODE | /*L=1*/ 1u << (31-10) | rc(1)); }
|
||||
inline void Assembler::tcheck(int f) { emit_int32( TCHECK_OPCODE | bf(f)); }
|
||||
|
||||
// ra0 version
|
||||
inline void Assembler::lwzx( Register d, Register s2) { emit_int32( LWZX_OPCODE | rt(d) | rb(s2));}
|
||||
inline void Assembler::lwz( Register d, int si16 ) { emit_int32( LWZ_OPCODE | rt(d) | d1(si16));}
|
||||
|
||||
@ -37,6 +37,8 @@ const int StackAlignmentInBytes = 16;
|
||||
// signatures accordingly.
|
||||
const bool CCallingConventionRequiresIntsAsLongs = true;
|
||||
|
||||
#define SUPPORTS_NATIVE_CX8
|
||||
|
||||
// The PPC CPUs are NOT multiple-copy-atomic.
|
||||
#define CPU_NOT_MULTIPLE_COPY_ATOMIC
|
||||
|
||||
|
||||
@ -25,7 +25,6 @@
|
||||
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/assembler.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "interp_masm_ppc_64.hpp"
|
||||
#include "interpreter/interpreterRuntime.hpp"
|
||||
|
||||
@ -24,7 +24,6 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/assembler.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "interpreter/bytecodeHistogram.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
|
||||
@ -2366,7 +2366,7 @@ void MacroAssembler::g1_write_barrier_post(Register Rstore_addr, Register Rnew_v
|
||||
#endif // INCLUDE_ALL_GCS
|
||||
|
||||
// Values for last_Java_pc, and last_Java_sp must comply to the rules
|
||||
// in frame_ppc64.hpp.
|
||||
// in frame_ppc.hpp.
|
||||
void MacroAssembler::set_last_Java_frame(Register last_Java_sp, Register last_Java_pc) {
|
||||
// Always set last_Java_pc and flags first because once last_Java_sp
|
||||
// is visible has_last_Java_frame is true and users will look at the
|
||||
@ -2493,6 +2493,7 @@ int MacroAssembler::instr_size_for_decode_klass_not_null() {
|
||||
}
|
||||
|
||||
void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
|
||||
assert(dst != R0, "Dst reg may not be R0, as R0 is used here.");
|
||||
if (src == noreg) src = dst;
|
||||
Register shifted_src = src;
|
||||
if (Universe::narrow_klass_shift() != 0 ||
|
||||
@ -2527,14 +2528,11 @@ void MacroAssembler::load_klass_with_trap_null_check(Register dst, Register src)
|
||||
|
||||
void MacroAssembler::reinit_heapbase(Register d, Register tmp) {
|
||||
if (Universe::heap() != NULL) {
|
||||
if (Universe::narrow_oop_base() == NULL) {
|
||||
Assembler::xorr(R30, R30, R30);
|
||||
} else {
|
||||
load_const(R30, Universe::narrow_ptrs_base(), tmp);
|
||||
}
|
||||
load_const_optimized(R30, Universe::narrow_ptrs_base(), tmp);
|
||||
} else {
|
||||
load_const(R30, Universe::narrow_ptrs_base_addr(), tmp);
|
||||
ld(R30, 0, R30);
|
||||
// Heap not yet allocated. Load indirectly.
|
||||
int simm16_offset = load_const_optimized(R30, Universe::narrow_ptrs_base_addr(), tmp, true);
|
||||
ld(R30, simm16_offset, R30);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1249,6 +1249,7 @@ EmitCallOffsets emit_call_with_trampoline_stub(MacroAssembler &_masm, address en
|
||||
|
||||
// Emit the trampoline stub which will be related to the branch-and-link below.
|
||||
CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, offsets.insts_call_instruction_offset);
|
||||
if (Compile::current()->env()->failing()) { return offsets; } // Code cache may be full.
|
||||
__ relocate(rtype);
|
||||
}
|
||||
|
||||
@ -1410,7 +1411,7 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
|
||||
while (bang_offset <= bang_end) {
|
||||
// Need at least one stack bang at end of shadow zone.
|
||||
|
||||
// Again I had to copy code, this time from assembler_ppc64.cpp,
|
||||
// Again I had to copy code, this time from assembler_ppc.cpp,
|
||||
// bang_stack_with_offset - see there for comments.
|
||||
|
||||
// Stack grows down, caller passes positive offset.
|
||||
@ -2000,7 +2001,7 @@ void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
|
||||
|
||||
// Inline_cache contains a klass.
|
||||
Register ic_klass = as_Register(Matcher::inline_cache_reg_encode());
|
||||
Register receiver_klass = R0; // tmp
|
||||
Register receiver_klass = R12_scratch2; // tmp
|
||||
|
||||
assert_different_registers(ic_klass, receiver_klass, R11_scratch1, R3_ARG1);
|
||||
assert(R11_scratch1 == R11, "need prologue scratch register");
|
||||
@ -3484,6 +3485,7 @@ encode %{
|
||||
|
||||
// Emit the trampoline stub which will be related to the branch-and-link below.
|
||||
CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, start_offset);
|
||||
if (Compile::current()->env()->failing()) { return; } // Code cache may be full.
|
||||
__ relocate(_optimized_virtual ?
|
||||
relocInfo::opt_virtual_call_type : relocInfo::static_call_type);
|
||||
}
|
||||
@ -3527,6 +3529,7 @@ encode %{
|
||||
|
||||
// Emit the trampoline stub which will be related to the branch-and-link below.
|
||||
CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, start_offset);
|
||||
if (ra_->C->env()->failing()) { return; } // Code cache may be full.
|
||||
assert(_optimized_virtual, "methodHandle call should be a virtual call");
|
||||
__ relocate(relocInfo::opt_virtual_call_type);
|
||||
}
|
||||
@ -3577,9 +3580,7 @@ encode %{
|
||||
const address entry_point_const = __ address_constant(entry_point, RelocationHolder::none);
|
||||
const int entry_point_const_toc_offset = __ offset_to_method_toc(entry_point_const);
|
||||
CallStubImpl::emit_trampoline_stub(_masm, entry_point_const_toc_offset, __ offset());
|
||||
|
||||
if (ra_->C->env()->failing())
|
||||
return;
|
||||
if (ra_->C->env()->failing()) { return; } // Code cache may be full.
|
||||
|
||||
// Build relocation at call site with ic position as data.
|
||||
assert((_load_ic_hi_node != NULL && _load_ic_node == NULL) ||
|
||||
@ -5638,19 +5639,6 @@ instruct loadNKlass(iRegNdst dst, memory mem) %{
|
||||
ins_pipe(pipe_class_memory);
|
||||
%}
|
||||
|
||||
//// Load compressed klass and decode it if narrow_klass_shift == 0.
|
||||
//// TODO: will narrow_klass_shift ever be 0?
|
||||
//instruct decodeNKlass2Klass(iRegPdst dst, memory mem) %{
|
||||
// match(Set dst (DecodeNKlass (LoadNKlass mem)));
|
||||
// predicate(false /* TODO: PPC port Universe::narrow_klass_shift() == 0*);
|
||||
// ins_cost(MEMORY_REF_COST);
|
||||
//
|
||||
// format %{ "LWZ $dst, $mem \t// DecodeNKlass (unscaled)" %}
|
||||
// size(4);
|
||||
// ins_encode( enc_lwz(dst, mem) );
|
||||
// ins_pipe(pipe_class_memory);
|
||||
//%}
|
||||
|
||||
// Load Klass Pointer
|
||||
instruct loadKlass(iRegPdst dst, memoryAlg4 mem) %{
|
||||
match(Set dst (LoadKlass mem));
|
||||
@ -6070,11 +6058,15 @@ instruct loadConN_Ex(iRegNdst dst, immN src) %{
|
||||
%}
|
||||
%}
|
||||
|
||||
instruct loadConNKlass_hi(iRegNdst dst, immNKlass src) %{
|
||||
// We have seen a safepoint between the hi and lo parts, and this node was handled
|
||||
// as an oop. Therefore this needs a match rule so that build_oop_map knows this is
|
||||
// not a narrow oop.
|
||||
instruct loadConNKlass_hi(iRegNdst dst, immNKlass_NM src) %{
|
||||
match(Set dst src);
|
||||
effect(DEF dst, USE src);
|
||||
ins_cost(DEFAULT_COST);
|
||||
|
||||
format %{ "LIS $dst, $src \t// narrow oop hi" %}
|
||||
format %{ "LIS $dst, $src \t// narrow klass hi" %}
|
||||
size(4);
|
||||
ins_encode %{
|
||||
// TODO: PPC port $archOpcode(ppc64Opcode_addis);
|
||||
@ -6084,6 +6076,21 @@ instruct loadConNKlass_hi(iRegNdst dst, immNKlass src) %{
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
// As loadConNKlass_hi this must be recognized as narrow klass, not oop!
|
||||
instruct loadConNKlass_mask(iRegNdst dst, immNKlass_NM src1, iRegNsrc src2) %{
|
||||
match(Set dst src1);
|
||||
effect(TEMP src2);
|
||||
ins_cost(DEFAULT_COST);
|
||||
|
||||
format %{ "MASK $dst, $src2, 0xFFFFFFFF" %} // mask
|
||||
size(4);
|
||||
ins_encode %{
|
||||
// TODO: PPC port $archOpcode(ppc64Opcode_rldicl);
|
||||
__ clrldi($dst$$Register, $src2$$Register, 0x20);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
// This needs a match rule so that build_oop_map knows this is
|
||||
// not a narrow oop.
|
||||
instruct loadConNKlass_lo(iRegNdst dst, immNKlass_NM src1, iRegNsrc src2) %{
|
||||
@ -6091,10 +6098,10 @@ instruct loadConNKlass_lo(iRegNdst dst, immNKlass_NM src1, iRegNsrc src2) %{
|
||||
effect(TEMP src2);
|
||||
ins_cost(DEFAULT_COST);
|
||||
|
||||
format %{ "ADDI $dst, $src1, $src2 \t// narrow oop lo" %}
|
||||
format %{ "ORI $dst, $src1, $src2 \t// narrow klass lo" %}
|
||||
size(4);
|
||||
ins_encode %{
|
||||
// TODO: PPC port $archOpcode(ppc64Opcode_addi);
|
||||
// TODO: PPC port $archOpcode(ppc64Opcode_ori);
|
||||
intptr_t Csrc = Klass::encode_klass((Klass *)$src1$$constant);
|
||||
assert(__ oop_recorder() != NULL, "this assembler needs an OopRecorder");
|
||||
int klass_index = __ oop_recorder()->find_index((Klass *)$src1$$constant);
|
||||
@ -6125,10 +6132,11 @@ instruct loadConNKlass_Ex(iRegNdst dst, immNKlass src) %{
|
||||
MachNode *m2 = m1;
|
||||
if (!Assembler::is_uimm((jlong)Klass::encode_klass((Klass *)op_src->constant()), 31)) {
|
||||
// Value might be 1-extended. Mask out these bits.
|
||||
m2 = new clearMs32bNode();
|
||||
m2 = new loadConNKlass_maskNode();
|
||||
m2->add_req(NULL, m1);
|
||||
m2->_opnds[0] = op_dst;
|
||||
m2->_opnds[1] = op_dst;
|
||||
m2->_opnds[1] = op_src;
|
||||
m2->_opnds[2] = op_dst;
|
||||
ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
||||
nodes->push(m2);
|
||||
}
|
||||
@ -6973,7 +6981,7 @@ instruct encodePKlass_32GAligned(iRegNdst dst, iRegPsrc src) %{
|
||||
size(4);
|
||||
ins_encode %{
|
||||
// TODO: PPC port $archOpcode(ppc64Opcode_rldicl);
|
||||
__ rldicl($dst$$Register, $src$$Register, 64-Universe::narrow_oop_shift(), 32);
|
||||
__ rldicl($dst$$Register, $src$$Register, 64-Universe::narrow_klass_shift(), 32);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
@ -24,7 +24,6 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "asm/assembler.hpp"
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "nativeInst_ppc.hpp"
|
||||
@ -39,9 +38,6 @@
|
||||
#include "runtime/stubCodeGenerator.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "utilities/top.hpp"
|
||||
#ifdef COMPILER2
|
||||
#include "opto/runtime.hpp"
|
||||
#endif
|
||||
#include "runtime/thread.inline.hpp"
|
||||
|
||||
#define __ _masm->
|
||||
@ -216,7 +212,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
{
|
||||
BLOCK_COMMENT("Call frame manager or native entry.");
|
||||
// Call frame manager or native entry.
|
||||
Register r_new_arg_entry = R14; // PPC_state;
|
||||
Register r_new_arg_entry = R14;
|
||||
assert_different_registers(r_new_arg_entry, r_top_of_arguments_addr,
|
||||
r_arg_method, r_arg_thread);
|
||||
|
||||
|
||||
@ -353,7 +353,6 @@ void TemplateTable::ldc(bool wide) {
|
||||
__ sldi(Rscratch1, Rscratch1, LogBytesPerWord);
|
||||
__ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Integer);
|
||||
__ bne(CCR0, notInt);
|
||||
__ isync(); // Order load of constant wrt. tags.
|
||||
__ lwax(R17_tos, Rcpool, Rscratch1);
|
||||
__ push(itos);
|
||||
__ b(exit);
|
||||
@ -365,7 +364,6 @@ void TemplateTable::ldc(bool wide) {
|
||||
__ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Float);
|
||||
__ asm_assert_eq("unexpected type", 0x8765);
|
||||
#endif
|
||||
__ isync(); // Order load of constant wrt. tags.
|
||||
__ lfsx(F15_ftos, Rcpool, Rscratch1);
|
||||
__ push(ftos);
|
||||
|
||||
@ -424,13 +422,11 @@ void TemplateTable::ldc2_w() {
|
||||
// Check out Conversions.java for an example.
|
||||
// Also ConstantPool::header_size() is 20, which makes it very difficult
|
||||
// to double-align double on the constant pool. SG, 11/7/97
|
||||
__ isync(); // Order load of constant wrt. tags.
|
||||
__ lfdx(F15_ftos, Rcpool, Rindex);
|
||||
__ push(dtos);
|
||||
__ b(Lexit);
|
||||
|
||||
__ bind(Llong);
|
||||
__ isync(); // Order load of constant wrt. tags.
|
||||
__ ldx(R17_tos, Rcpool, Rindex);
|
||||
__ push(ltos);
|
||||
|
||||
|
||||
@ -1020,7 +1020,3 @@ void PerfMemory::detach(char* addr, size_t bytes, TRAPS) {
|
||||
|
||||
unmap_shared(addr, bytes);
|
||||
}
|
||||
|
||||
char* PerfMemory::backing_store_filename() {
|
||||
return backing_store_file_name;
|
||||
}
|
||||
|
||||
@ -1043,7 +1043,3 @@ void PerfMemory::detach(char* addr, size_t bytes, TRAPS) {
|
||||
|
||||
unmap_shared(addr, bytes);
|
||||
}
|
||||
|
||||
char* PerfMemory::backing_store_filename() {
|
||||
return backing_store_file_name;
|
||||
}
|
||||
|
||||
@ -1049,7 +1049,3 @@ void PerfMemory::detach(char* addr, size_t bytes, TRAPS) {
|
||||
|
||||
unmap_shared(addr, bytes);
|
||||
}
|
||||
|
||||
char* PerfMemory::backing_store_filename() {
|
||||
return backing_store_file_name;
|
||||
}
|
||||
|
||||
@ -1068,7 +1068,3 @@ void PerfMemory::detach(char* addr, size_t bytes, TRAPS) {
|
||||
|
||||
unmap_shared(addr, bytes);
|
||||
}
|
||||
|
||||
char* PerfMemory::backing_store_filename() {
|
||||
return backing_store_file_name;
|
||||
}
|
||||
|
||||
@ -1846,7 +1846,3 @@ void PerfMemory::detach(char* addr, size_t bytes, TRAPS) {
|
||||
remove_file_mapping(addr);
|
||||
}
|
||||
}
|
||||
|
||||
char* PerfMemory::backing_store_filename() {
|
||||
return sharedmem_fileName;
|
||||
}
|
||||
|
||||
@ -47,4 +47,4 @@ inline void Prefetch::write(void *loc, intx interval) {
|
||||
);
|
||||
}
|
||||
|
||||
#endif // OS_CPU_LINUX_PPC_VM_PREFETCH_LINUX_OJDKPPC_HPP
|
||||
#endif // OS_CPU_LINUX_PPC_VM_PREFETCH_LINUX_PPC_INLINE_HPP
|
||||
|
||||
@ -2069,14 +2069,14 @@ void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
|
||||
LIR_Opr base_op = base.result();
|
||||
LIR_Opr index_op = idx.result();
|
||||
#ifndef _LP64
|
||||
if (x->base()->type()->tag() == longTag) {
|
||||
if (base_op->type() == T_LONG) {
|
||||
base_op = new_register(T_INT);
|
||||
__ convert(Bytecodes::_l2i, base.result(), base_op);
|
||||
}
|
||||
if (x->has_index()) {
|
||||
if (x->index()->type()->tag() == longTag) {
|
||||
if (index_op->type() == T_LONG) {
|
||||
LIR_Opr long_index_op = index_op;
|
||||
if (x->index()->type()->is_constant()) {
|
||||
if (index_op->is_constant()) {
|
||||
long_index_op = new_register(T_LONG);
|
||||
__ move(index_op, long_index_op);
|
||||
}
|
||||
@ -2091,14 +2091,14 @@ void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
|
||||
assert(!x->has_index() || index_op->type() == T_INT, "index should be an int");
|
||||
#else
|
||||
if (x->has_index()) {
|
||||
if (x->index()->type()->tag() == intTag) {
|
||||
if (!x->index()->type()->is_constant()) {
|
||||
if (index_op->type() == T_INT) {
|
||||
if (!index_op->is_constant()) {
|
||||
index_op = new_register(T_LONG);
|
||||
__ convert(Bytecodes::_i2l, idx.result(), index_op);
|
||||
}
|
||||
} else {
|
||||
assert(x->index()->type()->tag() == longTag, "must be");
|
||||
if (x->index()->type()->is_constant()) {
|
||||
assert(index_op->type() == T_LONG, "must be");
|
||||
if (index_op->is_constant()) {
|
||||
index_op = new_register(T_LONG);
|
||||
__ move(idx.result(), index_op);
|
||||
}
|
||||
@ -2179,12 +2179,12 @@ void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
|
||||
LIR_Opr index_op = idx.result();
|
||||
|
||||
#ifndef _LP64
|
||||
if (x->base()->type()->tag() == longTag) {
|
||||
if (base_op->type() == T_LONG) {
|
||||
base_op = new_register(T_INT);
|
||||
__ convert(Bytecodes::_l2i, base.result(), base_op);
|
||||
}
|
||||
if (x->has_index()) {
|
||||
if (x->index()->type()->tag() == longTag) {
|
||||
if (index_op->type() == T_LONG) {
|
||||
index_op = new_register(T_INT);
|
||||
__ convert(Bytecodes::_l2i, idx.result(), index_op);
|
||||
}
|
||||
@ -2194,7 +2194,7 @@ void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
|
||||
assert(!x->has_index() || (index_op->type() == T_INT && !index_op->is_constant()), "index should be an non-constant int");
|
||||
#else
|
||||
if (x->has_index()) {
|
||||
if (x->index()->type()->tag() == intTag) {
|
||||
if (index_op->type() == T_INT) {
|
||||
index_op = new_register(T_LONG);
|
||||
__ convert(Bytecodes::_i2l, idx.result(), index_op);
|
||||
}
|
||||
|
||||
@ -98,6 +98,14 @@ bool Verifier::verify(instanceKlassHandle klass, Verifier::Mode mode, bool shoul
|
||||
HandleMark hm;
|
||||
ResourceMark rm(THREAD);
|
||||
|
||||
if (!is_eligible_for_verification(klass, should_verify_class)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// If the class should be verified, first see if we can use the split
|
||||
// verifier. If not, or if verification fails and FailOverToOldVerifier
|
||||
// is set, then call the inference verifier.
|
||||
|
||||
Symbol* exception_name = NULL;
|
||||
const size_t message_buffer_len = klass->name()->utf8_length() + 1024;
|
||||
char* message_buffer = NEW_RESOURCE_ARRAY(char, message_buffer_len);
|
||||
@ -105,47 +113,42 @@ bool Verifier::verify(instanceKlassHandle klass, Verifier::Mode mode, bool shoul
|
||||
|
||||
const char* klassName = klass->external_name();
|
||||
bool can_failover = FailOverToOldVerifier &&
|
||||
klass->major_version() < NOFAILOVER_MAJOR_VERSION;
|
||||
klass->major_version() < NOFAILOVER_MAJOR_VERSION;
|
||||
|
||||
// If the class should be verified, first see if we can use the split
|
||||
// verifier. If not, or if verification fails and FailOverToOldVerifier
|
||||
// is set, then call the inference verifier.
|
||||
if (is_eligible_for_verification(klass, should_verify_class)) {
|
||||
if (TraceClassInitialization) {
|
||||
tty->print_cr("Start class verification for: %s", klassName);
|
||||
}
|
||||
if (klass->major_version() >= STACKMAP_ATTRIBUTE_MAJOR_VERSION) {
|
||||
ClassVerifier split_verifier(klass, THREAD);
|
||||
split_verifier.verify_class(THREAD);
|
||||
exception_name = split_verifier.result();
|
||||
if (can_failover && !HAS_PENDING_EXCEPTION &&
|
||||
(exception_name == vmSymbols::java_lang_VerifyError() ||
|
||||
exception_name == vmSymbols::java_lang_ClassFormatError())) {
|
||||
if (TraceClassInitialization || VerboseVerification) {
|
||||
tty->print_cr(
|
||||
"Fail over class verification to old verifier for: %s", klassName);
|
||||
}
|
||||
exception_name = inference_verify(
|
||||
klass, message_buffer, message_buffer_len, THREAD);
|
||||
if (TraceClassInitialization) {
|
||||
tty->print_cr("Start class verification for: %s", klassName);
|
||||
}
|
||||
if (klass->major_version() >= STACKMAP_ATTRIBUTE_MAJOR_VERSION) {
|
||||
ClassVerifier split_verifier(klass, THREAD);
|
||||
split_verifier.verify_class(THREAD);
|
||||
exception_name = split_verifier.result();
|
||||
if (can_failover && !HAS_PENDING_EXCEPTION &&
|
||||
(exception_name == vmSymbols::java_lang_VerifyError() ||
|
||||
exception_name == vmSymbols::java_lang_ClassFormatError())) {
|
||||
if (TraceClassInitialization || VerboseVerification) {
|
||||
tty->print_cr(
|
||||
"Fail over class verification to old verifier for: %s", klassName);
|
||||
}
|
||||
if (exception_name != NULL) {
|
||||
exception_message = split_verifier.exception_message();
|
||||
}
|
||||
} else {
|
||||
exception_name = inference_verify(
|
||||
klass, message_buffer, message_buffer_len, THREAD);
|
||||
klass, message_buffer, message_buffer_len, THREAD);
|
||||
}
|
||||
if (exception_name != NULL) {
|
||||
exception_message = split_verifier.exception_message();
|
||||
}
|
||||
} else {
|
||||
exception_name = inference_verify(
|
||||
klass, message_buffer, message_buffer_len, THREAD);
|
||||
}
|
||||
|
||||
if (TraceClassInitialization || VerboseVerification) {
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
tty->print("Verification for %s has", klassName);
|
||||
tty->print_cr(" exception pending %s ",
|
||||
InstanceKlass::cast(PENDING_EXCEPTION->klass())->external_name());
|
||||
} else if (exception_name != NULL) {
|
||||
tty->print_cr("Verification for %s failed", klassName);
|
||||
}
|
||||
tty->print_cr("End class verification for: %s", klassName);
|
||||
if (TraceClassInitialization || VerboseVerification) {
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
tty->print("Verification for %s has", klassName);
|
||||
tty->print_cr(" exception pending %s ",
|
||||
InstanceKlass::cast(PENDING_EXCEPTION->klass())->external_name());
|
||||
} else if (exception_name != NULL) {
|
||||
tty->print_cr("Verification for %s failed", klassName);
|
||||
}
|
||||
tty->print_cr("End class verification for: %s", klassName);
|
||||
}
|
||||
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
|
||||
@ -254,8 +254,7 @@ bool CodeCache::heap_available(int code_blob_type) {
|
||||
if (!SegmentedCodeCache) {
|
||||
// No segmentation: use a single code heap
|
||||
return (code_blob_type == CodeBlobType::All);
|
||||
} else if ((Arguments::mode() == Arguments::_int) ||
|
||||
(TieredStopAtLevel == CompLevel_none)) {
|
||||
} else if (Arguments::mode() == Arguments::_int) {
|
||||
// Interpreter only: we don't need any method code heaps
|
||||
return (code_blob_type == CodeBlobType::NonNMethod);
|
||||
} else if (TieredCompilation && (TieredStopAtLevel > CompLevel_simple)) {
|
||||
|
||||
@ -1683,6 +1683,8 @@ protected:
|
||||
int _failures;
|
||||
bool _verbose;
|
||||
|
||||
HeapRegionClaimer _hrclaimer;
|
||||
|
||||
public:
|
||||
G1ParVerifyFinalCountTask(G1CollectedHeap* g1h,
|
||||
BitMap* region_bm, BitMap* card_bm,
|
||||
@ -1692,19 +1694,8 @@ public:
|
||||
_actual_region_bm(region_bm), _actual_card_bm(card_bm),
|
||||
_expected_region_bm(expected_region_bm), _expected_card_bm(expected_card_bm),
|
||||
_failures(0), _verbose(false),
|
||||
_n_workers(0) {
|
||||
_n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) {
|
||||
assert(VerifyDuringGC, "don't call this otherwise");
|
||||
|
||||
// Use the value already set as the number of active threads
|
||||
// in the call to run_task().
|
||||
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||
assert( _g1h->workers()->active_workers() > 0,
|
||||
"Should have been previously set");
|
||||
_n_workers = _g1h->workers()->active_workers();
|
||||
} else {
|
||||
_n_workers = 1;
|
||||
}
|
||||
|
||||
assert(_expected_card_bm->size() == _actual_card_bm->size(), "sanity");
|
||||
assert(_expected_region_bm->size() == _actual_region_bm->size(), "sanity");
|
||||
|
||||
@ -1721,10 +1712,7 @@ public:
|
||||
_verbose);
|
||||
|
||||
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||
_g1h->heap_region_par_iterate_chunked(&verify_cl,
|
||||
worker_id,
|
||||
_n_workers,
|
||||
HeapRegion::VerifyCountClaimValue);
|
||||
_g1h->heap_region_par_iterate(&verify_cl, worker_id, &_hrclaimer);
|
||||
} else {
|
||||
_g1h->heap_region_iterate(&verify_cl);
|
||||
}
|
||||
@ -1813,22 +1801,14 @@ protected:
|
||||
BitMap* _actual_card_bm;
|
||||
|
||||
uint _n_workers;
|
||||
HeapRegionClaimer _hrclaimer;
|
||||
|
||||
public:
|
||||
G1ParFinalCountTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm)
|
||||
: AbstractGangTask("G1 final counting"),
|
||||
_g1h(g1h), _cm(_g1h->concurrent_mark()),
|
||||
_actual_region_bm(region_bm), _actual_card_bm(card_bm),
|
||||
_n_workers(0) {
|
||||
// Use the value already set as the number of active threads
|
||||
// in the call to run_task().
|
||||
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||
assert( _g1h->workers()->active_workers() > 0,
|
||||
"Should have been previously set");
|
||||
_n_workers = _g1h->workers()->active_workers();
|
||||
} else {
|
||||
_n_workers = 1;
|
||||
}
|
||||
_n_workers(_g1h->workers()->active_workers()), _hrclaimer(_n_workers) {
|
||||
}
|
||||
|
||||
void work(uint worker_id) {
|
||||
@ -1839,10 +1819,7 @@ public:
|
||||
_actual_card_bm);
|
||||
|
||||
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||
_g1h->heap_region_par_iterate_chunked(&final_update_cl,
|
||||
worker_id,
|
||||
_n_workers,
|
||||
HeapRegion::FinalCountClaimValue);
|
||||
_g1h->heap_region_par_iterate(&final_update_cl, worker_id, &_hrclaimer);
|
||||
} else {
|
||||
_g1h->heap_region_iterate(&final_update_cl);
|
||||
}
|
||||
@ -1929,12 +1906,12 @@ protected:
|
||||
size_t _max_live_bytes;
|
||||
size_t _freed_bytes;
|
||||
FreeRegionList* _cleanup_list;
|
||||
HeapRegionClaimer _hrclaimer;
|
||||
|
||||
public:
|
||||
G1ParNoteEndTask(G1CollectedHeap* g1h,
|
||||
FreeRegionList* cleanup_list) :
|
||||
AbstractGangTask("G1 note end"), _g1h(g1h),
|
||||
_max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { }
|
||||
G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
|
||||
AbstractGangTask("G1 note end"), _g1h(g1h), _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list), _hrclaimer(n_workers) {
|
||||
}
|
||||
|
||||
void work(uint worker_id) {
|
||||
double start = os::elapsedTime();
|
||||
@ -1943,9 +1920,7 @@ public:
|
||||
G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list,
|
||||
&hrrs_cleanup_task);
|
||||
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||
_g1h->heap_region_par_iterate_chunked(&g1_note_end, worker_id,
|
||||
_g1h->workers()->active_workers(),
|
||||
HeapRegion::NoteEndClaimValue);
|
||||
_g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer);
|
||||
} else {
|
||||
_g1h->heap_region_iterate(&g1_note_end);
|
||||
}
|
||||
@ -1991,16 +1966,16 @@ protected:
|
||||
G1RemSet* _g1rs;
|
||||
BitMap* _region_bm;
|
||||
BitMap* _card_bm;
|
||||
HeapRegionClaimer _hrclaimer;
|
||||
|
||||
public:
|
||||
G1ParScrubRemSetTask(G1CollectedHeap* g1h,
|
||||
BitMap* region_bm, BitMap* card_bm) :
|
||||
AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()),
|
||||
_region_bm(region_bm), _card_bm(card_bm) { }
|
||||
G1ParScrubRemSetTask(G1CollectedHeap* g1h, BitMap* region_bm, BitMap* card_bm, uint n_workers) :
|
||||
AbstractGangTask("G1 ScrubRS"), _g1rs(g1h->g1_rem_set()), _region_bm(region_bm), _card_bm(card_bm), _hrclaimer(n_workers) {
|
||||
}
|
||||
|
||||
void work(uint worker_id) {
|
||||
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||
_g1rs->scrub_par(_region_bm, _card_bm, worker_id,
|
||||
HeapRegion::ScrubRemSetClaimValue);
|
||||
_g1rs->scrub_par(_region_bm, _card_bm, worker_id, &_hrclaimer);
|
||||
} else {
|
||||
_g1rs->scrub(_region_bm, _card_bm);
|
||||
}
|
||||
@ -2043,9 +2018,6 @@ void ConcurrentMark::cleanup() {
|
||||
G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);
|
||||
|
||||
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||
assert(g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
|
||||
"sanity check");
|
||||
|
||||
g1h->set_par_threads();
|
||||
n_workers = g1h->n_par_threads();
|
||||
assert(g1h->n_par_threads() == n_workers,
|
||||
@ -2053,9 +2025,6 @@ void ConcurrentMark::cleanup() {
|
||||
g1h->workers()->run_task(&g1_par_count_task);
|
||||
// Done with the parallel phase so reset to 0.
|
||||
g1h->set_par_threads(0);
|
||||
|
||||
assert(g1h->check_heap_region_claim_values(HeapRegion::FinalCountClaimValue),
|
||||
"sanity check");
|
||||
} else {
|
||||
n_workers = 1;
|
||||
g1_par_count_task.work(0);
|
||||
@ -2080,9 +2049,6 @@ void ConcurrentMark::cleanup() {
|
||||
g1h->workers()->run_task(&g1_par_verify_task);
|
||||
// Done with the parallel phase so reset to 0.
|
||||
g1h->set_par_threads(0);
|
||||
|
||||
assert(g1h->check_heap_region_claim_values(HeapRegion::VerifyCountClaimValue),
|
||||
"sanity check");
|
||||
} else {
|
||||
g1_par_verify_task.work(0);
|
||||
}
|
||||
@ -2108,14 +2074,11 @@ void ConcurrentMark::cleanup() {
|
||||
g1h->reset_gc_time_stamp();
|
||||
|
||||
// Note end of marking in all heap regions.
|
||||
G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list);
|
||||
G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers);
|
||||
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||
g1h->set_par_threads((int)n_workers);
|
||||
g1h->workers()->run_task(&g1_par_note_end_task);
|
||||
g1h->set_par_threads(0);
|
||||
|
||||
assert(g1h->check_heap_region_claim_values(HeapRegion::NoteEndClaimValue),
|
||||
"sanity check");
|
||||
} else {
|
||||
g1_par_note_end_task.work(0);
|
||||
}
|
||||
@ -2132,15 +2095,11 @@ void ConcurrentMark::cleanup() {
|
||||
// regions.
|
||||
if (G1ScrubRemSets) {
|
||||
double rs_scrub_start = os::elapsedTime();
|
||||
G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm);
|
||||
G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm, n_workers);
|
||||
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||
g1h->set_par_threads((int)n_workers);
|
||||
g1h->workers()->run_task(&g1_par_scrub_rs_task);
|
||||
g1h->set_par_threads(0);
|
||||
|
||||
assert(g1h->check_heap_region_claim_values(
|
||||
HeapRegion::ScrubRemSetClaimValue),
|
||||
"sanity check");
|
||||
} else {
|
||||
g1_par_scrub_rs_task.work(0);
|
||||
}
|
||||
@ -3288,6 +3247,7 @@ protected:
|
||||
BitMap* _cm_card_bm;
|
||||
uint _max_worker_id;
|
||||
int _active_workers;
|
||||
HeapRegionClaimer _hrclaimer;
|
||||
|
||||
public:
|
||||
G1AggregateCountDataTask(G1CollectedHeap* g1h,
|
||||
@ -3295,18 +3255,18 @@ public:
|
||||
BitMap* cm_card_bm,
|
||||
uint max_worker_id,
|
||||
int n_workers) :
|
||||
AbstractGangTask("Count Aggregation"),
|
||||
_g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm),
|
||||
_max_worker_id(max_worker_id),
|
||||
_active_workers(n_workers) { }
|
||||
AbstractGangTask("Count Aggregation"),
|
||||
_g1h(g1h), _cm(cm), _cm_card_bm(cm_card_bm),
|
||||
_max_worker_id(max_worker_id),
|
||||
_active_workers(n_workers),
|
||||
_hrclaimer(_active_workers) {
|
||||
}
|
||||
|
||||
void work(uint worker_id) {
|
||||
AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_worker_id);
|
||||
|
||||
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||
_g1h->heap_region_par_iterate_chunked(&cl, worker_id,
|
||||
_active_workers,
|
||||
HeapRegion::AggregateCountClaimValue);
|
||||
_g1h->heap_region_par_iterate(&cl, worker_id, &_hrclaimer);
|
||||
} else {
|
||||
_g1h->heap_region_iterate(&cl);
|
||||
}
|
||||
@ -3323,15 +3283,9 @@ void ConcurrentMark::aggregate_count_data() {
|
||||
_max_worker_id, n_workers);
|
||||
|
||||
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||
assert(_g1h->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
|
||||
"sanity check");
|
||||
_g1h->set_par_threads(n_workers);
|
||||
_g1h->workers()->run_task(&g1_par_agg_task);
|
||||
_g1h->set_par_threads(0);
|
||||
|
||||
assert(_g1h->check_heap_region_claim_values(HeapRegion::AggregateCountClaimValue),
|
||||
"sanity check");
|
||||
_g1h->reset_heap_region_claim_values();
|
||||
} else {
|
||||
g1_par_agg_task.work(0);
|
||||
}
|
||||
|
||||
@ -90,8 +90,8 @@ size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
|
||||
|
||||
// Notes on implementation of parallelism in different tasks.
|
||||
//
|
||||
// G1ParVerifyTask uses heap_region_par_iterate_chunked() for parallelism.
|
||||
// The number of GC workers is passed to heap_region_par_iterate_chunked().
|
||||
// G1ParVerifyTask uses heap_region_par_iterate() for parallelism.
|
||||
// The number of GC workers is passed to heap_region_par_iterate().
|
||||
// It does use run_task() which sets _n_workers in the task.
|
||||
// G1ParTask executes g1_process_roots() ->
|
||||
// SharedHeap::process_roots() which calls eventually to
|
||||
@ -1215,17 +1215,15 @@ public:
|
||||
|
||||
class ParRebuildRSTask: public AbstractGangTask {
|
||||
G1CollectedHeap* _g1;
|
||||
HeapRegionClaimer _hrclaimer;
|
||||
|
||||
public:
|
||||
ParRebuildRSTask(G1CollectedHeap* g1)
|
||||
: AbstractGangTask("ParRebuildRSTask"),
|
||||
_g1(g1)
|
||||
{ }
|
||||
ParRebuildRSTask(G1CollectedHeap* g1) :
|
||||
AbstractGangTask("ParRebuildRSTask"), _g1(g1), _hrclaimer(g1->workers()->active_workers()) {}
|
||||
|
||||
void work(uint worker_id) {
|
||||
RebuildRSOutOfRegionClosure rebuild_rs(_g1, worker_id);
|
||||
_g1->heap_region_par_iterate_chunked(&rebuild_rs, worker_id,
|
||||
_g1->workers()->active_workers(),
|
||||
HeapRegion::RebuildRSClaimValue);
|
||||
_g1->heap_region_par_iterate(&rebuild_rs, worker_id, &_hrclaimer);
|
||||
}
|
||||
};
|
||||
|
||||
@ -1455,8 +1453,6 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
||||
set_par_threads(n_workers);
|
||||
|
||||
ParRebuildRSTask rebuild_rs_task(this);
|
||||
assert(check_heap_region_claim_values(
|
||||
HeapRegion::InitialClaimValue), "sanity check");
|
||||
assert(UseDynamicNumberOfGCThreads ||
|
||||
workers()->active_workers() == workers()->total_workers(),
|
||||
"Unless dynamic should use total workers");
|
||||
@ -1466,9 +1462,6 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
|
||||
set_par_threads(workers()->active_workers());
|
||||
workers()->run_task(&rebuild_rs_task);
|
||||
set_par_threads(0);
|
||||
assert(check_heap_region_claim_values(
|
||||
HeapRegion::RebuildRSClaimValue), "sanity check");
|
||||
reset_heap_region_claim_values();
|
||||
} else {
|
||||
RebuildRSOutOfRegionClosure rebuild_rs(this);
|
||||
heap_region_iterate(&rebuild_rs);
|
||||
@ -2343,6 +2336,7 @@ bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
|
||||
case GCCause::_gc_locker: return GCLockerInvokesConcurrent;
|
||||
case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent;
|
||||
case GCCause::_g1_humongous_allocation: return true;
|
||||
case GCCause::_update_allocation_context_stats_inc: return true;
|
||||
default: return false;
|
||||
}
|
||||
}
|
||||
@ -2633,111 +2627,12 @@ void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
|
||||
}
|
||||
|
||||
void
|
||||
G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
|
||||
uint worker_id,
|
||||
uint num_workers,
|
||||
jint claim_value) const {
|
||||
_hrm.par_iterate(cl, worker_id, num_workers, claim_value);
|
||||
G1CollectedHeap::heap_region_par_iterate(HeapRegionClosure* cl,
|
||||
uint worker_id,
|
||||
HeapRegionClaimer *hrclaimer) const {
|
||||
_hrm.par_iterate(cl, worker_id, hrclaimer);
|
||||
}
|
||||
|
||||
class ResetClaimValuesClosure: public HeapRegionClosure {
|
||||
public:
|
||||
bool doHeapRegion(HeapRegion* r) {
|
||||
r->set_claim_value(HeapRegion::InitialClaimValue);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
void G1CollectedHeap::reset_heap_region_claim_values() {
|
||||
ResetClaimValuesClosure blk;
|
||||
heap_region_iterate(&blk);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::reset_cset_heap_region_claim_values() {
|
||||
ResetClaimValuesClosure blk;
|
||||
collection_set_iterate(&blk);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
// This checks whether all regions in the heap have the correct claim
|
||||
// value. I also piggy-backed on this a check to ensure that the
|
||||
// humongous_start_region() information on "continues humongous"
|
||||
// regions is correct.
|
||||
|
||||
class CheckClaimValuesClosure : public HeapRegionClosure {
|
||||
private:
|
||||
jint _claim_value;
|
||||
uint _failures;
|
||||
HeapRegion* _sh_region;
|
||||
|
||||
public:
|
||||
CheckClaimValuesClosure(jint claim_value) :
|
||||
_claim_value(claim_value), _failures(0), _sh_region(NULL) { }
|
||||
bool doHeapRegion(HeapRegion* r) {
|
||||
if (r->claim_value() != _claim_value) {
|
||||
gclog_or_tty->print_cr("Region " HR_FORMAT ", "
|
||||
"claim value = %d, should be %d",
|
||||
HR_FORMAT_PARAMS(r),
|
||||
r->claim_value(), _claim_value);
|
||||
++_failures;
|
||||
}
|
||||
if (!r->is_humongous()) {
|
||||
_sh_region = NULL;
|
||||
} else if (r->is_starts_humongous()) {
|
||||
_sh_region = r;
|
||||
} else if (r->is_continues_humongous()) {
|
||||
if (r->humongous_start_region() != _sh_region) {
|
||||
gclog_or_tty->print_cr("Region " HR_FORMAT ", "
|
||||
"HS = "PTR_FORMAT", should be "PTR_FORMAT,
|
||||
HR_FORMAT_PARAMS(r),
|
||||
r->humongous_start_region(),
|
||||
_sh_region);
|
||||
++_failures;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
uint failures() { return _failures; }
|
||||
};
|
||||
|
||||
bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) {
|
||||
CheckClaimValuesClosure cl(claim_value);
|
||||
heap_region_iterate(&cl);
|
||||
return cl.failures() == 0;
|
||||
}
|
||||
|
||||
class CheckClaimValuesInCSetHRClosure: public HeapRegionClosure {
|
||||
private:
|
||||
jint _claim_value;
|
||||
uint _failures;
|
||||
|
||||
public:
|
||||
CheckClaimValuesInCSetHRClosure(jint claim_value) :
|
||||
_claim_value(claim_value), _failures(0) { }
|
||||
|
||||
uint failures() { return _failures; }
|
||||
|
||||
bool doHeapRegion(HeapRegion* hr) {
|
||||
assert(hr->in_collection_set(), "how?");
|
||||
assert(!hr->is_humongous(), "H-region in CSet");
|
||||
if (hr->claim_value() != _claim_value) {
|
||||
gclog_or_tty->print_cr("CSet Region " HR_FORMAT ", "
|
||||
"claim value = %d, should be %d",
|
||||
HR_FORMAT_PARAMS(hr),
|
||||
hr->claim_value(), _claim_value);
|
||||
_failures += 1;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
bool G1CollectedHeap::check_cset_heap_region_claim_values(jint claim_value) {
|
||||
CheckClaimValuesInCSetHRClosure cl(claim_value);
|
||||
collection_set_iterate(&cl);
|
||||
return cl.failures() == 0;
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
// Clear the cached CSet starting regions and (more importantly)
|
||||
// the time stamps. Called when we reset the GC time stamp.
|
||||
void G1CollectedHeap::clear_cset_start_regions() {
|
||||
@ -3251,19 +3146,21 @@ public:
|
||||
|
||||
class G1ParVerifyTask: public AbstractGangTask {
|
||||
private:
|
||||
G1CollectedHeap* _g1h;
|
||||
VerifyOption _vo;
|
||||
bool _failures;
|
||||
G1CollectedHeap* _g1h;
|
||||
VerifyOption _vo;
|
||||
bool _failures;
|
||||
HeapRegionClaimer _hrclaimer;
|
||||
|
||||
public:
|
||||
// _vo == UsePrevMarking -> use "prev" marking information,
|
||||
// _vo == UseNextMarking -> use "next" marking information,
|
||||
// _vo == UseMarkWord -> use mark word from object header.
|
||||
G1ParVerifyTask(G1CollectedHeap* g1h, VerifyOption vo) :
|
||||
AbstractGangTask("Parallel verify task"),
|
||||
_g1h(g1h),
|
||||
_vo(vo),
|
||||
_failures(false) { }
|
||||
AbstractGangTask("Parallel verify task"),
|
||||
_g1h(g1h),
|
||||
_vo(vo),
|
||||
_failures(false),
|
||||
_hrclaimer(g1h->workers()->active_workers()) {}
|
||||
|
||||
bool failures() {
|
||||
return _failures;
|
||||
@ -3272,9 +3169,7 @@ public:
|
||||
void work(uint worker_id) {
|
||||
HandleMark hm;
|
||||
VerifyRegionClosure blk(true, _vo);
|
||||
_g1h->heap_region_par_iterate_chunked(&blk, worker_id,
|
||||
_g1h->workers()->active_workers(),
|
||||
HeapRegion::ParVerifyClaimValue);
|
||||
_g1h->heap_region_par_iterate(&blk, worker_id, &_hrclaimer);
|
||||
if (blk.failures()) {
|
||||
_failures = true;
|
||||
}
|
||||
@ -3316,8 +3211,6 @@ void G1CollectedHeap::verify(bool silent, VerifyOption vo) {
|
||||
|
||||
if (!silent) { gclog_or_tty->print("HeapRegions "); }
|
||||
if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
|
||||
assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
|
||||
"sanity check");
|
||||
|
||||
G1ParVerifyTask task(this, vo);
|
||||
assert(UseDynamicNumberOfGCThreads ||
|
||||
@ -3331,15 +3224,6 @@ void G1CollectedHeap::verify(bool silent, VerifyOption vo) {
|
||||
failures = true;
|
||||
}
|
||||
|
||||
// Checks that the expected amount of parallel work was done.
|
||||
// The implication is that n_workers is > 0.
|
||||
assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue),
|
||||
"sanity check");
|
||||
|
||||
reset_heap_region_claim_values();
|
||||
|
||||
assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
|
||||
"sanity check");
|
||||
} else {
|
||||
VerifyRegionClosure blk(false, vo);
|
||||
heap_region_iterate(&blk);
|
||||
@ -3926,8 +3810,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
|
||||
}
|
||||
|
||||
assert(check_young_list_well_formed(), "young list should be well formed");
|
||||
assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
|
||||
"sanity check");
|
||||
|
||||
// Don't dynamically change the number of GC threads this early. A value of
|
||||
// 0 is used to indicate serial work. When parallel work is done,
|
||||
@ -4288,26 +4170,12 @@ void G1CollectedHeap::finalize_for_evac_failure() {
|
||||
}
|
||||
|
||||
void G1CollectedHeap::remove_self_forwarding_pointers() {
|
||||
assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
|
||||
|
||||
double remove_self_forwards_start = os::elapsedTime();
|
||||
|
||||
set_par_threads();
|
||||
G1ParRemoveSelfForwardPtrsTask rsfp_task(this);
|
||||
|
||||
if (G1CollectedHeap::use_parallel_gc_threads()) {
|
||||
set_par_threads();
|
||||
workers()->run_task(&rsfp_task);
|
||||
set_par_threads(0);
|
||||
} else {
|
||||
rsfp_task.work(0);
|
||||
}
|
||||
|
||||
assert(check_cset_heap_region_claim_values(HeapRegion::ParEvacFailureClaimValue), "sanity");
|
||||
|
||||
// Reset the claim values in the regions in the collection set.
|
||||
reset_cset_heap_region_claim_values();
|
||||
|
||||
assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity");
|
||||
workers()->run_task(&rsfp_task);
|
||||
set_par_threads(0);
|
||||
|
||||
// Now restore saved marks, if any.
|
||||
assert(_objs_with_preserved_marks.size() ==
|
||||
@ -5948,11 +5816,6 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
|
||||
|
||||
purge_code_root_memory();
|
||||
|
||||
if (g1_policy()->during_initial_mark_pause()) {
|
||||
// Reset the claim values set during marking the strong code roots
|
||||
reset_heap_region_claim_values();
|
||||
}
|
||||
|
||||
finalize_for_evac_failure();
|
||||
|
||||
if (evacuation_failed()) {
|
||||
|
||||
@ -211,6 +211,7 @@ class G1CollectedHeap : public SharedHeap {
|
||||
friend class G1FreeHumongousRegionClosure;
|
||||
// Other related classes.
|
||||
friend class G1MarkSweep;
|
||||
friend class HeapRegionClaimer;
|
||||
|
||||
private:
|
||||
// The one and only G1CollectedHeap, so static functions can find it.
|
||||
@ -1377,38 +1378,15 @@ public:
|
||||
|
||||
inline HeapWord* bottom_addr_for_region(uint index) const;
|
||||
|
||||
// Divide the heap region sequence into "chunks" of some size (the number
|
||||
// of regions divided by the number of parallel threads times some
|
||||
// overpartition factor, currently 4). Assumes that this will be called
|
||||
// in parallel by ParallelGCThreads worker threads with distinct worker
|
||||
// ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel
|
||||
// calls will use the same "claim_value", and that that claim value is
|
||||
// different from the claim_value of any heap region before the start of
|
||||
// the iteration. Applies "blk->doHeapRegion" to each of the regions, by
|
||||
// attempting to claim the first region in each chunk, and, if
|
||||
// successful, applying the closure to each region in the chunk (and
|
||||
// setting the claim value of the second and subsequent regions of the
|
||||
// chunk.) For now requires that "doHeapRegion" always returns "false",
|
||||
// i.e., that a closure never attempt to abort a traversal.
|
||||
void heap_region_par_iterate_chunked(HeapRegionClosure* cl,
|
||||
uint worker_id,
|
||||
uint num_workers,
|
||||
jint claim_value) const;
|
||||
|
||||
// It resets all the region claim values to the default.
|
||||
void reset_heap_region_claim_values();
|
||||
|
||||
// Resets the claim values of regions in the current
|
||||
// collection set to the default.
|
||||
void reset_cset_heap_region_claim_values();
|
||||
|
||||
#ifdef ASSERT
|
||||
bool check_heap_region_claim_values(jint claim_value);
|
||||
|
||||
// Same as the routine above but only checks regions in the
|
||||
// current collection set.
|
||||
bool check_cset_heap_region_claim_values(jint claim_value);
|
||||
#endif // ASSERT
|
||||
// Iterate over the heap regions in parallel. Assumes that this will be called
|
||||
// in parallel by ParallelGCThreads worker threads with distinct worker ids
|
||||
// in the range [0..max(ParallelGCThreads-1, 1)]. Applies "blk->doHeapRegion"
|
||||
// to each of the regions, by attempting to claim the region using the
|
||||
// HeapRegionClaimer and, if successful, applying the closure to the claimed
|
||||
// region.
|
||||
void heap_region_par_iterate(HeapRegionClosure* cl,
|
||||
uint worker_id,
|
||||
HeapRegionClaimer* hrclaimer) const;
|
||||
|
||||
// Clear the cached cset start regions and (more importantly)
|
||||
// the time stamps. Called when we reset the GC time stamp.
|
||||
|
||||
@ -1598,19 +1598,17 @@ class ParKnownGarbageTask: public AbstractGangTask {
|
||||
CollectionSetChooser* _hrSorted;
|
||||
uint _chunk_size;
|
||||
G1CollectedHeap* _g1;
|
||||
HeapRegionClaimer _hrclaimer;
|
||||
|
||||
public:
|
||||
ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size) :
|
||||
AbstractGangTask("ParKnownGarbageTask"),
|
||||
_hrSorted(hrSorted), _chunk_size(chunk_size),
|
||||
_g1(G1CollectedHeap::heap()) { }
|
||||
ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size, uint n_workers) :
|
||||
AbstractGangTask("ParKnownGarbageTask"),
|
||||
_hrSorted(hrSorted), _chunk_size(chunk_size),
|
||||
_g1(G1CollectedHeap::heap()), _hrclaimer(n_workers) {}
|
||||
|
||||
void work(uint worker_id) {
|
||||
ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size);
|
||||
|
||||
// Back to zero for the claim value.
|
||||
_g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, worker_id,
|
||||
_g1->workers()->active_workers(),
|
||||
HeapRegion::InitialClaimValue);
|
||||
_g1->heap_region_par_iterate(&parKnownGarbageCl, worker_id, &_hrclaimer);
|
||||
}
|
||||
};
|
||||
|
||||
@ -1641,12 +1639,8 @@ G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
|
||||
}
|
||||
_collectionSetChooser->prepare_for_par_region_addition(_g1->num_regions(),
|
||||
WorkUnit);
|
||||
ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser,
|
||||
(int) WorkUnit);
|
||||
ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser, WorkUnit, (uint) no_of_gc_threads);
|
||||
_g1->workers()->run_task(&parKnownGarbageTask);
|
||||
|
||||
assert(_g1->check_heap_region_claim_values(HeapRegion::InitialClaimValue),
|
||||
"sanity check");
|
||||
} else {
|
||||
KnownGarbageClosure knownGarbagecl(_collectionSetChooser);
|
||||
_g1->heap_region_iterate(&knownGarbagecl);
|
||||
|
||||
@ -177,16 +177,18 @@ class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure {
|
||||
G1CollectedHeap* _g1h;
|
||||
ConcurrentMark* _cm;
|
||||
uint _worker_id;
|
||||
HeapRegionClaimer* _hrclaimer;
|
||||
|
||||
DirtyCardQueue _dcq;
|
||||
UpdateRSetDeferred _update_rset_cl;
|
||||
|
||||
public:
|
||||
RemoveSelfForwardPtrHRClosure(G1CollectedHeap* g1h,
|
||||
uint worker_id) :
|
||||
_g1h(g1h), _dcq(&g1h->dirty_card_queue_set()), _update_rset_cl(g1h, &_dcq),
|
||||
_worker_id(worker_id), _cm(_g1h->concurrent_mark()) {
|
||||
}
|
||||
uint worker_id,
|
||||
HeapRegionClaimer* hrclaimer) :
|
||||
_g1h(g1h), _dcq(&g1h->dirty_card_queue_set()), _update_rset_cl(g1h, &_dcq),
|
||||
_worker_id(worker_id), _cm(_g1h->concurrent_mark()), _hrclaimer(hrclaimer) {
|
||||
}
|
||||
|
||||
bool doHeapRegion(HeapRegion *hr) {
|
||||
bool during_initial_mark = _g1h->g1_policy()->during_initial_mark_pause();
|
||||
@ -195,7 +197,7 @@ public:
|
||||
assert(!hr->is_humongous(), "sanity");
|
||||
assert(hr->in_collection_set(), "bad CS");
|
||||
|
||||
if (hr->claimHeapRegion(HeapRegion::ParEvacFailureClaimValue)) {
|
||||
if (_hrclaimer->claim_region(hr->hrm_index())) {
|
||||
if (hr->evacuation_failed()) {
|
||||
RemoveSelfForwardPtrObjClosure rspc(_g1h, _cm, hr, &_update_rset_cl,
|
||||
during_initial_mark,
|
||||
@ -233,14 +235,15 @@ public:
|
||||
class G1ParRemoveSelfForwardPtrsTask: public AbstractGangTask {
|
||||
protected:
|
||||
G1CollectedHeap* _g1h;
|
||||
HeapRegionClaimer _hrclaimer;
|
||||
|
||||
public:
|
||||
G1ParRemoveSelfForwardPtrsTask(G1CollectedHeap* g1h) :
|
||||
AbstractGangTask("G1 Remove Self-forwarding Pointers"),
|
||||
_g1h(g1h) { }
|
||||
AbstractGangTask("G1 Remove Self-forwarding Pointers"), _g1h(g1h),
|
||||
_hrclaimer(g1h->workers()->active_workers()) {}
|
||||
|
||||
void work(uint worker_id) {
|
||||
RemoveSelfForwardPtrHRClosure rsfp_cl(_g1h, worker_id);
|
||||
RemoveSelfForwardPtrHRClosure rsfp_cl(_g1h, worker_id, &_hrclaimer);
|
||||
|
||||
HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_id);
|
||||
_g1h->collection_set_iterate_from(hr, &rsfp_cl);
|
||||
|
||||
@ -425,13 +425,9 @@ void G1RemSet::scrub(BitMap* region_bm, BitMap* card_bm) {
|
||||
_g1->heap_region_iterate(&scrub_cl);
|
||||
}
|
||||
|
||||
void G1RemSet::scrub_par(BitMap* region_bm, BitMap* card_bm,
|
||||
uint worker_num, int claim_val) {
|
||||
void G1RemSet::scrub_par(BitMap* region_bm, BitMap* card_bm, uint worker_num, HeapRegionClaimer *hrclaimer) {
|
||||
ScrubRSClosure scrub_cl(region_bm, card_bm);
|
||||
_g1->heap_region_par_iterate_chunked(&scrub_cl,
|
||||
worker_num,
|
||||
n_workers(),
|
||||
claim_val);
|
||||
_g1->heap_region_par_iterate(&scrub_cl, worker_num, hrclaimer);
|
||||
}
|
||||
|
||||
G1TriggerClosure::G1TriggerClosure() :
|
||||
|
||||
@ -128,10 +128,10 @@ public:
|
||||
void scrub(BitMap* region_bm, BitMap* card_bm);
|
||||
|
||||
// Like the above, but assumes is called in parallel: "worker_num" is the
|
||||
// parallel thread id of the current thread, and "claim_val" is the
|
||||
// value that should be used to claim heap regions.
|
||||
// parallel thread id of the current thread, and "hrclaimer" is the shared
|
||||
// HeapRegionClaimer that should be used to claim heap regions.
|
||||
void scrub_par(BitMap* region_bm, BitMap* card_bm,
|
||||
uint worker_num, int claim_val);
|
||||
uint worker_num, HeapRegionClaimer* hrclaimer);
|
||||
|
||||
// Refine the card corresponding to "card_ptr".
|
||||
// If check_for_refs_into_cset is true, a true result is returned
|
||||
|
||||
@ -217,7 +217,6 @@ void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) {
|
||||
} else {
|
||||
hrrs->clear();
|
||||
}
|
||||
_claimed = InitialClaimValue;
|
||||
}
|
||||
zero_marked_bytes();
|
||||
|
||||
@ -294,17 +293,6 @@ void HeapRegion::clear_humongous() {
|
||||
_humongous_start_region = NULL;
|
||||
}
|
||||
|
||||
bool HeapRegion::claimHeapRegion(jint claimValue) {
|
||||
jint current = _claimed;
|
||||
if (current != claimValue) {
|
||||
jint res = Atomic::cmpxchg(claimValue, &_claimed, current);
|
||||
if (res == current) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
HeapRegion::HeapRegion(uint hrm_index,
|
||||
G1BlockOffsetSharedArray* sharedOffsetArray,
|
||||
MemRegion mr) :
|
||||
@ -314,7 +302,7 @@ HeapRegion::HeapRegion(uint hrm_index,
|
||||
_humongous_start_region(NULL),
|
||||
_in_collection_set(false),
|
||||
_next_in_special_set(NULL),
|
||||
_claimed(InitialClaimValue), _evacuation_failed(false),
|
||||
_evacuation_failed(false),
|
||||
_prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
|
||||
_next_young_region(NULL),
|
||||
_next_dirty_cards_region(NULL), _next(NULL), _prev(NULL),
|
||||
|
||||
@ -254,9 +254,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
HeapRegionSetBase* _containing_set;
|
||||
#endif // ASSERT
|
||||
|
||||
// For parallel heapRegion traversal.
|
||||
jint _claimed;
|
||||
|
||||
// We use concurrent marking to determine the amount of live data
|
||||
// in each heap region.
|
||||
size_t _prev_marked_bytes; // Bytes known to be live via last completed marking.
|
||||
@ -336,19 +333,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
// up once during initialization time.
|
||||
static void setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size);
|
||||
|
||||
enum ClaimValues {
|
||||
InitialClaimValue = 0,
|
||||
FinalCountClaimValue = 1,
|
||||
NoteEndClaimValue = 2,
|
||||
ScrubRemSetClaimValue = 3,
|
||||
ParVerifyClaimValue = 4,
|
||||
RebuildRSClaimValue = 5,
|
||||
ParEvacFailureClaimValue = 6,
|
||||
AggregateCountClaimValue = 7,
|
||||
VerifyCountClaimValue = 8,
|
||||
ParMarkRootClaimValue = 9
|
||||
};
|
||||
|
||||
// All allocated blocks are occupied by objects in a HeapRegion
|
||||
bool block_is_obj(const HeapWord* p) const;
|
||||
|
||||
@ -691,12 +675,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
|
||||
return (HeapWord *) obj >= next_top_at_mark_start();
|
||||
}
|
||||
|
||||
// For parallel heapRegion traversal.
|
||||
bool claimHeapRegion(int claimValue);
|
||||
jint claim_value() { return _claimed; }
|
||||
// Use this carefully: only when you're sure no one is claiming...
|
||||
void set_claim_value(int claimValue) { _claimed = claimValue; }
|
||||
|
||||
// Returns the "evacuation_failed" property of the region.
|
||||
bool evacuation_failed() { return _evacuation_failed; }
|
||||
|
||||
|
||||
@ -260,20 +260,17 @@ uint HeapRegionManager::find_unavailable_from_idx(uint start_idx, uint* res_idx)
|
||||
return num_regions;
|
||||
}
|
||||
|
||||
uint HeapRegionManager::start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const {
|
||||
return num_regions * worker_i / num_workers;
|
||||
}
|
||||
|
||||
void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, uint num_workers, jint claim_value) const {
|
||||
const uint start_index = start_region_for_worker(worker_id, num_workers, _allocated_heapregions_length);
|
||||
void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer) const {
|
||||
const uint start_index = hrclaimer->start_region_for_worker(worker_id);
|
||||
|
||||
// Every worker will actually look at all regions, skipping over regions that
|
||||
// are currently not committed.
|
||||
// This also (potentially) iterates over regions newly allocated during GC. This
|
||||
// is no problem except for some extra work.
|
||||
for (uint count = 0; count < _allocated_heapregions_length; count++) {
|
||||
const uint index = (start_index + count) % _allocated_heapregions_length;
|
||||
assert(0 <= index && index < _allocated_heapregions_length, "sanity");
|
||||
const uint n_regions = hrclaimer->n_regions();
|
||||
for (uint count = 0; count < n_regions; count++) {
|
||||
const uint index = (start_index + count) % n_regions;
|
||||
assert(0 <= index && index < n_regions, "sanity");
|
||||
// Skip over unavailable regions
|
||||
if (!is_available(index)) {
|
||||
continue;
|
||||
@ -282,11 +279,11 @@ void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, uint
|
||||
// We'll ignore "continues humongous" regions (we'll process them
|
||||
// when we come across their corresponding "start humongous"
|
||||
// region) and regions already claimed.
|
||||
if (r->claim_value() == claim_value || r->is_continues_humongous()) {
|
||||
if (hrclaimer->is_region_claimed(index) || r->is_continues_humongous()) {
|
||||
continue;
|
||||
}
|
||||
// OK, try to claim it
|
||||
if (!r->claimHeapRegion(claim_value)) {
|
||||
if (!hrclaimer->claim_region(index)) {
|
||||
continue;
|
||||
}
|
||||
// Success!
|
||||
@ -306,13 +303,11 @@ void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, uint
|
||||
assert(chr->humongous_start_region() == r,
|
||||
err_msg("Must work on humongous continuation of the original start region "
|
||||
PTR_FORMAT ", but is " PTR_FORMAT, p2i(r), p2i(chr)));
|
||||
assert(chr->claim_value() != claim_value,
|
||||
assert(!hrclaimer->is_region_claimed(ch_index),
|
||||
"Must not have been claimed yet because claiming of humongous continuation first claims the start region");
|
||||
|
||||
bool claim_result = chr->claimHeapRegion(claim_value);
|
||||
// We should always be able to claim it; no one else should
|
||||
// be trying to claim this region.
|
||||
guarantee(claim_result, "We should always be able to claim the is_continues_humongous part of the humongous object");
|
||||
// There's no need to actually claim the continues humongous region, but we can do it in an assert as an extra precaution.
|
||||
assert(hrclaimer->claim_region(ch_index), "We should always be able to claim the continuesHumongous part of the humongous object");
|
||||
|
||||
bool res2 = blk->doHeapRegion(chr);
|
||||
if (res2) {
|
||||
@ -445,3 +440,31 @@ void HeapRegionManager::verify_optional() {
|
||||
}
|
||||
#endif // PRODUCT
|
||||
|
||||
HeapRegionClaimer::HeapRegionClaimer(uint n_workers) :
|
||||
_n_workers(n_workers), _n_regions(G1CollectedHeap::heap()->_hrm._allocated_heapregions_length), _claims(NULL) {
|
||||
assert(n_workers > 0, "Need at least one worker.");
|
||||
_claims = NEW_C_HEAP_ARRAY(uint, _n_regions, mtGC);
|
||||
memset(_claims, Unclaimed, sizeof(*_claims) * _n_regions);
|
||||
}
|
||||
|
||||
HeapRegionClaimer::~HeapRegionClaimer() {
|
||||
if (_claims != NULL) {
|
||||
FREE_C_HEAP_ARRAY(uint, _claims, mtGC);
|
||||
}
|
||||
}
|
||||
|
||||
uint HeapRegionClaimer::start_region_for_worker(uint worker_id) const {
|
||||
assert(worker_id < _n_workers, "Invalid worker_id.");
|
||||
return _n_regions * worker_id / _n_workers;
|
||||
}
|
||||
|
||||
bool HeapRegionClaimer::is_region_claimed(uint region_index) const {
|
||||
assert(region_index < _n_regions, "Invalid index.");
|
||||
return _claims[region_index] == Claimed;
|
||||
}
|
||||
|
||||
bool HeapRegionClaimer::claim_region(uint region_index) {
|
||||
assert(region_index < _n_regions, "Invalid index.");
|
||||
uint old_val = Atomic::cmpxchg(Claimed, &_claims[region_index], Unclaimed);
|
||||
return old_val == Unclaimed;
|
||||
}
|
||||
|
||||
@ -31,6 +31,7 @@
|
||||
|
||||
class HeapRegion;
|
||||
class HeapRegionClosure;
|
||||
class HeapRegionClaimer;
|
||||
class FreeRegionList;
|
||||
|
||||
class G1HeapRegionTable : public G1BiasedMappedArray<HeapRegion*> {
|
||||
@ -66,6 +67,7 @@ class G1HeapRegionTable : public G1BiasedMappedArray<HeapRegion*> {
|
||||
|
||||
class HeapRegionManager: public CHeapObj<mtGC> {
|
||||
friend class VMStructs;
|
||||
friend class HeapRegionClaimer;
|
||||
|
||||
G1HeapRegionTable _regions;
|
||||
|
||||
@ -99,9 +101,6 @@ class HeapRegionManager: public CHeapObj<mtGC> {
|
||||
|
||||
// Notify other data structures about change in the heap layout.
|
||||
void update_committed_space(HeapWord* old_end, HeapWord* new_end);
|
||||
// Calculate the starting region for each worker during parallel iteration so
|
||||
// that they do not all start from the same region.
|
||||
uint start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const;
|
||||
|
||||
// Find a contiguous set of empty or uncommitted regions of length num and return
|
||||
// the index of the first region or G1_NO_HRM_INDEX if the search was unsuccessful.
|
||||
@ -223,7 +222,7 @@ public:
|
||||
// terminating the iteration early if doHeapRegion() returns true.
|
||||
void iterate(HeapRegionClosure* blk) const;
|
||||
|
||||
void par_iterate(HeapRegionClosure* blk, uint worker_id, uint no_of_par_workers, jint claim_value) const;
|
||||
void par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer) const;
|
||||
|
||||
// Uncommit up to num_regions_to_remove regions that are completely free.
|
||||
// Return the actual number of uncommitted regions.
|
||||
@ -235,5 +234,33 @@ public:
|
||||
void verify_optional() PRODUCT_RETURN;
|
||||
};
|
||||
|
||||
// The HeapRegionClaimer is used during parallel iteration over heap regions,
|
||||
// allowing workers to claim heap regions, gaining exclusive rights to these regions.
|
||||
class HeapRegionClaimer : public StackObj {
|
||||
uint _n_workers;
|
||||
uint _n_regions;
|
||||
uint* _claims;
|
||||
|
||||
static const uint Unclaimed = 0;
|
||||
static const uint Claimed = 1;
|
||||
|
||||
public:
|
||||
HeapRegionClaimer(uint n_workers);
|
||||
~HeapRegionClaimer();
|
||||
|
||||
inline uint n_regions() const {
|
||||
return _n_regions;
|
||||
}
|
||||
|
||||
// Calculate the starting region for given worker so
|
||||
// that they do not all start from the same region.
|
||||
uint start_region_for_worker(uint worker_id) const;
|
||||
|
||||
// Check if region has been claimed with this HRClaimer.
|
||||
bool is_region_claimed(uint region_index) const;
|
||||
|
||||
// Claim the given region, returns true if successfully claimed.
|
||||
bool claim_region(uint region_index);
|
||||
};
|
||||
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONMANAGER_HPP
|
||||
|
||||
|
||||
@ -95,8 +95,9 @@ void VM_G1IncCollectionPause::doit() {
|
||||
assert(!_should_initiate_conc_mark ||
|
||||
((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
|
||||
(_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent) ||
|
||||
_gc_cause == GCCause::_g1_humongous_allocation),
|
||||
"only a GC locker, a System.gc() or a hum allocation induced GC should start a cycle");
|
||||
_gc_cause == GCCause::_g1_humongous_allocation ||
|
||||
_gc_cause == GCCause::_update_allocation_context_stats_inc),
|
||||
"only a GC locker, a System.gc(), stats update or a hum allocation induced GC should start a cycle");
|
||||
|
||||
if (_word_size > 0) {
|
||||
// An allocation has been requested. So, try to do that first.
|
||||
|
||||
@ -54,7 +54,8 @@ const char* GCCause::to_string(GCCause::Cause cause) {
|
||||
case _wb_young_gc:
|
||||
return "WhiteBox Initiated Young GC";
|
||||
|
||||
case _update_allocation_context_stats:
|
||||
case _update_allocation_context_stats_inc:
|
||||
case _update_allocation_context_stats_full:
|
||||
return "Update Allocation Context Stats";
|
||||
|
||||
case _no_gc:
|
||||
|
||||
@ -47,7 +47,8 @@ class GCCause : public AllStatic {
|
||||
_heap_inspection,
|
||||
_heap_dump,
|
||||
_wb_young_gc,
|
||||
_update_allocation_context_stats,
|
||||
_update_allocation_context_stats_inc,
|
||||
_update_allocation_context_stats_full,
|
||||
|
||||
/* implementation independent, but reserved for GC use */
|
||||
_no_gc,
|
||||
|
||||
@ -1153,12 +1153,18 @@ void Compile::init_start(StartNode* s) {
|
||||
assert(s == start(), "");
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the 'StartNode'. We must not have a pending failure, since the ideal graph
|
||||
* can be in an inconsistent state, i.e., we can get segmentation faults when traversing
|
||||
* the ideal graph.
|
||||
*/
|
||||
StartNode* Compile::start() const {
|
||||
assert(!failing(), "");
|
||||
assert (!failing(), err_msg_res("Must not have pending failure. Reason is: %s", failure_reason()));
|
||||
for (DUIterator_Fast imax, i = root()->fast_outs(imax); i < imax; i++) {
|
||||
Node* start = root()->fast_out(i);
|
||||
if( start->is_Start() )
|
||||
if (start->is_Start()) {
|
||||
return start->as_Start();
|
||||
}
|
||||
}
|
||||
fatal("Did not find Start node!");
|
||||
return NULL;
|
||||
|
||||
@ -707,12 +707,15 @@ class Compile : public Phase {
|
||||
void sort_expensive_nodes();
|
||||
|
||||
// Compilation environment.
|
||||
Arena* comp_arena() { return &_comp_arena; }
|
||||
ciEnv* env() const { return _env; }
|
||||
CompileLog* log() const { return _log; }
|
||||
bool failing() const { return _env->failing() || _failure_reason != NULL; }
|
||||
const char* failure_reason() { return _failure_reason; }
|
||||
bool failure_reason_is(const char* r) { return (r==_failure_reason) || (r!=NULL && _failure_reason!=NULL && strcmp(r, _failure_reason)==0); }
|
||||
Arena* comp_arena() { return &_comp_arena; }
|
||||
ciEnv* env() const { return _env; }
|
||||
CompileLog* log() const { return _log; }
|
||||
bool failing() const { return _env->failing() || _failure_reason != NULL; }
|
||||
const char* failure_reason() const { return (_env->failing()) ? _env->failure_reason() : _failure_reason; }
|
||||
|
||||
bool failure_reason_is(const char* r) const {
|
||||
return (r == _failure_reason) || (r != NULL && _failure_reason != NULL && strcmp(r, _failure_reason) == 0);
|
||||
}
|
||||
|
||||
void record_failure(const char* reason);
|
||||
void record_method_not_compilable(const char* reason, bool all_tiers = false) {
|
||||
|
||||
@ -802,10 +802,16 @@ void Parse::catch_inline_exceptions(SafePointNode* ex_map) {
|
||||
// each arm of the Phi. If I know something clever about the exceptions
|
||||
// I'm loading the class from, I can replace the LoadKlass with the
|
||||
// klass constant for the exception oop.
|
||||
if( ex_node->is_Phi() ) {
|
||||
ex_klass_node = new PhiNode( ex_node->in(0), TypeKlassPtr::OBJECT );
|
||||
for( uint i = 1; i < ex_node->req(); i++ ) {
|
||||
Node* p = basic_plus_adr( ex_node->in(i), ex_node->in(i), oopDesc::klass_offset_in_bytes() );
|
||||
if (ex_node->is_Phi()) {
|
||||
ex_klass_node = new PhiNode(ex_node->in(0), TypeKlassPtr::OBJECT);
|
||||
for (uint i = 1; i < ex_node->req(); i++) {
|
||||
Node* ex_in = ex_node->in(i);
|
||||
if (ex_in == top() || ex_in == NULL) {
|
||||
// This path was not taken.
|
||||
ex_klass_node->init_req(i, top());
|
||||
continue;
|
||||
}
|
||||
Node* p = basic_plus_adr(ex_in, ex_in, oopDesc::klass_offset_in_bytes());
|
||||
Node* k = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) );
|
||||
ex_klass_node->init_req( i, k );
|
||||
}
|
||||
|
||||
@ -1073,17 +1073,6 @@ JVM_ENTRY(jobjectArray, JVM_GetClassInterfaces(JNIEnv *env, jclass cls))
|
||||
JVM_END
|
||||
|
||||
|
||||
JVM_ENTRY(jobject, JVM_GetClassLoader(JNIEnv *env, jclass cls))
|
||||
JVMWrapper("JVM_GetClassLoader");
|
||||
if (java_lang_Class::is_primitive(JNIHandles::resolve_non_null(cls))) {
|
||||
return NULL;
|
||||
}
|
||||
Klass* k = java_lang_Class::as_Klass(JNIHandles::resolve_non_null(cls));
|
||||
oop loader = k->class_loader();
|
||||
return JNIHandles::make_local(env, loader);
|
||||
JVM_END
|
||||
|
||||
|
||||
JVM_QUICK_ENTRY(jboolean, JVM_IsInterface(JNIEnv *env, jclass cls))
|
||||
JVMWrapper("JVM_IsInterface");
|
||||
oop mirror = JNIHandles::resolve_non_null(cls);
|
||||
|
||||
@ -462,9 +462,6 @@ JVM_GetClassName(JNIEnv *env, jclass cls);
|
||||
JNIEXPORT jobjectArray JNICALL
|
||||
JVM_GetClassInterfaces(JNIEnv *env, jclass cls);
|
||||
|
||||
JNIEXPORT jobject JNICALL
|
||||
JVM_GetClassLoader(JNIEnv *env, jclass cls);
|
||||
|
||||
JNIEXPORT jboolean JNICALL
|
||||
JVM_IsInterface(JNIEnv *env, jclass cls);
|
||||
|
||||
|
||||
@ -155,9 +155,6 @@ class PerfMemory : AllStatic {
|
||||
}
|
||||
}
|
||||
|
||||
// filename of backing store or NULL if none.
|
||||
static char* backing_store_filename();
|
||||
|
||||
// returns the complete file path of hsperfdata.
|
||||
// the caller is expected to free the allocated memory.
|
||||
static char* get_perfdata_file_path();
|
||||
|
||||
@ -540,17 +540,25 @@ int NMethodSweeper::process_nmethod(nmethod *nm) {
|
||||
// If there are no current activations of this method on the
|
||||
// stack we can safely convert it to a zombie method
|
||||
if (nm->can_not_entrant_be_converted()) {
|
||||
if (PrintMethodFlushing && Verbose) {
|
||||
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
|
||||
}
|
||||
// Clear ICStubs to prevent back patching stubs of zombie or unloaded
|
||||
// nmethods during the next safepoint (see ICStub::finalize).
|
||||
MutexLocker cl(CompiledIC_lock);
|
||||
nm->clear_ic_stubs();
|
||||
// Code cache state change is tracked in make_zombie()
|
||||
nm->make_zombie();
|
||||
_zombified_count++;
|
||||
SWEEP(nm);
|
||||
{
|
||||
MutexLocker cl(CompiledIC_lock);
|
||||
nm->clear_ic_stubs();
|
||||
}
|
||||
// Acquiring the CompiledIC_lock may block for a safepoint and set the
|
||||
// nmethod to zombie (see 'CodeCache::make_marked_nmethods_zombies').
|
||||
// Check if nmethod is still non-entrant at this point.
|
||||
if (nm->is_not_entrant()) {
|
||||
if (PrintMethodFlushing && Verbose) {
|
||||
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
|
||||
}
|
||||
// Code cache state change is tracked in make_zombie()
|
||||
nm->make_zombie();
|
||||
_zombified_count++;
|
||||
SWEEP(nm);
|
||||
}
|
||||
assert(nm->is_zombie(), "nmethod must be zombie");
|
||||
} else {
|
||||
// Still alive, clean up its inline caches
|
||||
MutexLocker cl(CompiledIC_lock);
|
||||
|
||||
@ -447,7 +447,7 @@ hotspot_compiler_3 = \
|
||||
compiler/codegen/ \
|
||||
compiler/cpuflags/RestoreMXCSR.java \
|
||||
compiler/EscapeAnalysis/ \
|
||||
compiler/exceptions/TestRecursiveReplacedException.java \
|
||||
compiler/exceptions/ \
|
||||
compiler/floatingpoint/ModNaN.java \
|
||||
compiler/gcbarriers/G1CrashTest.java \
|
||||
compiler/inlining/ \
|
||||
|
||||
@ -38,22 +38,26 @@ public class CheckSegmentedCodeCache {
|
||||
|
||||
private static void verifySegmentedCodeCache(ProcessBuilder pb, boolean enabled) throws Exception {
|
||||
OutputAnalyzer out = new OutputAnalyzer(pb.start());
|
||||
out.shouldHaveExitValue(0);
|
||||
if (enabled) {
|
||||
try {
|
||||
// Non-nmethod code heap should be always available with the segmented code cache
|
||||
out.shouldContain(NON_METHOD);
|
||||
} catch (RuntimeException e) {
|
||||
// TieredCompilation is disabled in a client VM
|
||||
out.shouldContain("TieredCompilation is disabled in this release.");
|
||||
// Check if TieredCompilation is disabled (in a client VM)
|
||||
if(!out.getOutput().contains("TieredCompilation is disabled in this release.")) {
|
||||
// Code cache is not segmented
|
||||
throw new RuntimeException("No code cache segmentation.");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
out.shouldNotContain(NON_METHOD);
|
||||
}
|
||||
out.shouldHaveExitValue(0);
|
||||
}
|
||||
|
||||
private static void verifyCodeHeapNotExists(ProcessBuilder pb, String... heapNames) throws Exception {
|
||||
OutputAnalyzer out = new OutputAnalyzer(pb.start());
|
||||
out.shouldHaveExitValue(0);
|
||||
for (String name : heapNames) {
|
||||
out.shouldNotContain(name);
|
||||
}
|
||||
@ -86,6 +90,10 @@ public class CheckSegmentedCodeCache {
|
||||
"-XX:ReservedCodeCacheSize=240m",
|
||||
"-XX:+PrintCodeCache", "-version");
|
||||
verifySegmentedCodeCache(pb, true);
|
||||
pb = ProcessTools.createJavaProcessBuilder("-XX:+TieredCompilation",
|
||||
"-XX:ReservedCodeCacheSize=400m",
|
||||
"-XX:+PrintCodeCache", "-version");
|
||||
verifySegmentedCodeCache(pb, true);
|
||||
|
||||
// Always enabled if SegmentedCodeCache is set
|
||||
pb = ProcessTools.createJavaProcessBuilder("-XX:+SegmentedCodeCache",
|
||||
@ -100,12 +108,13 @@ public class CheckSegmentedCodeCache {
|
||||
"-Xint",
|
||||
"-XX:+PrintCodeCache", "-version");
|
||||
verifyCodeHeapNotExists(pb, PROFILED, NON_PROFILED);
|
||||
|
||||
// If we stop compilation at CompLevel_none or CompLevel_simple we
|
||||
// don't need a profiled code heap.
|
||||
pb = ProcessTools.createJavaProcessBuilder("-XX:+SegmentedCodeCache",
|
||||
"-XX:TieredStopAtLevel=0",
|
||||
"-XX:+PrintCodeCache", "-version");
|
||||
verifyCodeHeapNotExists(pb, PROFILED, NON_PROFILED);
|
||||
|
||||
// If we stop compilation at CompLevel_simple
|
||||
verifyCodeHeapNotExists(pb, PROFILED);
|
||||
pb = ProcessTools.createJavaProcessBuilder("-XX:+SegmentedCodeCache",
|
||||
"-XX:TieredStopAtLevel=1",
|
||||
"-XX:+PrintCodeCache", "-version");
|
||||
|
||||
81
hotspot/test/compiler/exceptions/CatchInlineExceptions.java
Normal file
81
hotspot/test/compiler/exceptions/CatchInlineExceptions.java
Normal file
@ -0,0 +1,81 @@
|
||||
/*
|
||||
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @test
|
||||
* @bug 8059299
|
||||
* @summary assert(adr_type != NULL) failed: expecting TypeKlassPtr
|
||||
* @run main/othervm -Xbatch CatchInlineExceptions
|
||||
*/
|
||||
|
||||
class Exception1 extends Exception {};
|
||||
class Exception2 extends Exception {};
|
||||
|
||||
public class CatchInlineExceptions {
|
||||
private static int counter0;
|
||||
private static int counter1;
|
||||
private static int counter2;
|
||||
private static int counter;
|
||||
|
||||
static void foo(int i) throws Exception {
|
||||
if ((i & 1023) == 2) {
|
||||
counter0++;
|
||||
throw new Exception2();
|
||||
}
|
||||
}
|
||||
|
||||
static void test(int i) throws Exception {
|
||||
try {
|
||||
foo(i);
|
||||
}
|
||||
catch (Exception e) {
|
||||
if (e instanceof Exception1) {
|
||||
counter1++;
|
||||
} else if (e instanceof Exception2) {
|
||||
counter2++;
|
||||
}
|
||||
counter++;
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
public static void main(String[] args) throws Throwable {
|
||||
for (int i = 0; i < 15000; i++) {
|
||||
try {
|
||||
test(i);
|
||||
} catch (Exception e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
if (counter1 != 0) {
|
||||
throw new RuntimeException("Failed: counter1(" + counter1 + ") != 0");
|
||||
}
|
||||
if (counter2 != counter) {
|
||||
throw new RuntimeException("Failed: counter2(" + counter2 + ") != counter0(" + counter0 + ")");
|
||||
}
|
||||
if (counter2 != counter) {
|
||||
throw new RuntimeException("Failed: counter2(" + counter2 + ") != counter(" + counter + ")");
|
||||
}
|
||||
System.out.println("TEST PASSED");
|
||||
}
|
||||
}
|
||||
Loading…
x
Reference in New Issue
Block a user