mirror of
https://github.com/openjdk/jdk.git
synced 2026-02-09 18:08:31 +00:00
Merge
This commit is contained in:
commit
ea20cf16df
@ -12658,6 +12658,64 @@ instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask m
|
||||
ins_pipe(ialu_reg_shift);
|
||||
%}
|
||||
|
||||
// We can use ubfiz when masking by a positive number and then left shifting the result.
|
||||
// We know that the mask is positive because immI_bitmask guarantees it.
|
||||
instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
|
||||
%{
|
||||
match(Set dst (LShiftI (AndI src mask) lshift));
|
||||
predicate((unsigned int)n->in(2)->get_int() <= 31 &&
|
||||
(exact_log2(n->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= (31+1));
|
||||
|
||||
ins_cost(INSN_COST);
|
||||
format %{ "ubfizw $dst, $src, $lshift, $mask" %}
|
||||
ins_encode %{
|
||||
int lshift = $lshift$$constant;
|
||||
long mask = $mask$$constant;
|
||||
int width = exact_log2(mask+1);
|
||||
__ ubfizw(as_Register($dst$$reg),
|
||||
as_Register($src$$reg), lshift, width);
|
||||
%}
|
||||
ins_pipe(ialu_reg_shift);
|
||||
%}
|
||||
// We can use ubfiz when masking by a positive number and then left shifting the result.
|
||||
// We know that the mask is positive because immL_bitmask guarantees it.
|
||||
instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
|
||||
%{
|
||||
match(Set dst (LShiftL (AndL src mask) lshift));
|
||||
predicate((unsigned int)n->in(2)->get_int() <= 63 &&
|
||||
(exact_log2_long(n->in(1)->in(2)->get_long()+1) + (unsigned int)n->in(2)->get_int()) <= (63+1));
|
||||
|
||||
ins_cost(INSN_COST);
|
||||
format %{ "ubfiz $dst, $src, $lshift, $mask" %}
|
||||
ins_encode %{
|
||||
int lshift = $lshift$$constant;
|
||||
long mask = $mask$$constant;
|
||||
int width = exact_log2(mask+1);
|
||||
__ ubfiz(as_Register($dst$$reg),
|
||||
as_Register($src$$reg), lshift, width);
|
||||
%}
|
||||
ins_pipe(ialu_reg_shift);
|
||||
%}
|
||||
|
||||
// If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
|
||||
instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
|
||||
%{
|
||||
match(Set dst (LShiftL (ConvI2L(AndI src mask)) lshift));
|
||||
predicate((unsigned int)n->in(2)->get_int() <= 31 &&
|
||||
(exact_log2((unsigned int)n->in(1)->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= 32);
|
||||
|
||||
ins_cost(INSN_COST);
|
||||
format %{ "ubfiz $dst, $src, $lshift, $mask" %}
|
||||
ins_encode %{
|
||||
int lshift = $lshift$$constant;
|
||||
long mask = $mask$$constant;
|
||||
int width = exact_log2(mask+1);
|
||||
__ ubfiz(as_Register($dst$$reg),
|
||||
as_Register($src$$reg), lshift, width);
|
||||
%}
|
||||
ins_pipe(ialu_reg_shift);
|
||||
%}
|
||||
|
||||
// Rotations
|
||||
|
||||
instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
|
||||
|
||||
@ -214,6 +214,48 @@ instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask m
|
||||
ins_pipe(ialu_reg_shift);
|
||||
%}
|
||||
|
||||
define(`UBFIZ_INSN',
|
||||
// We can use ubfiz when masking by a positive number and then left shifting the result.
|
||||
// We know that the mask is positive because imm$1_bitmask guarantees it.
|
||||
`instruct $2$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src, immI lshift, imm$1_bitmask mask)
|
||||
%{
|
||||
match(Set dst (LShift$1 (And$1 src mask) lshift));
|
||||
predicate((unsigned int)n->in(2)->get_int() <= $3 &&
|
||||
(exact_log2$5(n->in(1)->in(2)->get_$4()+1) + (unsigned int)n->in(2)->get_int()) <= ($3+1));
|
||||
|
||||
ins_cost(INSN_COST);
|
||||
format %{ "$2 $dst, $src, $lshift, $mask" %}
|
||||
ins_encode %{
|
||||
int lshift = $lshift$$constant;
|
||||
long mask = $mask$$constant;
|
||||
int width = exact_log2(mask+1);
|
||||
__ $2(as_Register($dst$$reg),
|
||||
as_Register($src$$reg), lshift, width);
|
||||
%}
|
||||
ins_pipe(ialu_reg_shift);
|
||||
%}')
|
||||
UBFIZ_INSN(I, ubfizw, 31, int)
|
||||
UBFIZ_INSN(L, ubfiz, 63, long, _long)
|
||||
|
||||
// If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
|
||||
instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
|
||||
%{
|
||||
match(Set dst (LShiftL (ConvI2L(AndI src mask)) lshift));
|
||||
predicate((unsigned int)n->in(2)->get_int() <= 31 &&
|
||||
(exact_log2((unsigned int)n->in(1)->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= 32);
|
||||
|
||||
ins_cost(INSN_COST);
|
||||
format %{ "ubfiz $dst, $src, $lshift, $mask" %}
|
||||
ins_encode %{
|
||||
int lshift = $lshift$$constant;
|
||||
long mask = $mask$$constant;
|
||||
int width = exact_log2(mask+1);
|
||||
__ ubfiz(as_Register($dst$$reg),
|
||||
as_Register($src$$reg), lshift, width);
|
||||
%}
|
||||
ins_pipe(ialu_reg_shift);
|
||||
%}
|
||||
|
||||
// Rotations
|
||||
|
||||
define(`EXTRACT_INSN',
|
||||
|
||||
@ -1748,8 +1748,10 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
|
||||
{EM_PPC, EM_PPC, ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
|
||||
#if defined(VM_LITTLE_ENDIAN)
|
||||
{EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2LSB, (char*)"Power PC 64 LE"},
|
||||
{EM_SH, EM_SH, ELFCLASS32, ELFDATA2LSB, (char*)"SuperH"},
|
||||
#else
|
||||
{EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
|
||||
{EM_SH, EM_SH, ELFCLASS32, ELFDATA2MSB, (char*)"SuperH BE"},
|
||||
#endif
|
||||
{EM_ARM, EM_ARM, ELFCLASS32, ELFDATA2LSB, (char*)"ARM"},
|
||||
{EM_S390, EM_S390, ELFCLASSNONE, ELFDATA2MSB, (char*)"IBM System/390"},
|
||||
@ -1791,9 +1793,11 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
|
||||
static Elf32_Half running_arch_code=EM_MIPS;
|
||||
#elif (defined M68K)
|
||||
static Elf32_Half running_arch_code=EM_68K;
|
||||
#elif (defined SH)
|
||||
static Elf32_Half running_arch_code=EM_SH;
|
||||
#else
|
||||
#error Method os::dll_load requires that one of following is defined:\
|
||||
AARCH64, ALPHA, ARM, AMD64, IA32, IA64, M68K, MIPS, MIPSEL, PARISC, __powerpc__, __powerpc64__, S390, __sparc
|
||||
AARCH64, ALPHA, ARM, AMD64, IA32, IA64, M68K, MIPS, MIPSEL, PARISC, __powerpc__, __powerpc64__, S390, SH, __sparc
|
||||
#endif
|
||||
|
||||
// Identify compatability class for VM's architecture and library's architecture
|
||||
|
||||
@ -95,9 +95,21 @@ inline jlong Atomic::load(const volatile jlong* src) { return *src; }
|
||||
#define strasm_nobarrier ""
|
||||
#define strasm_nobarrier_clobber_memory ""
|
||||
|
||||
inline jint Atomic::add (jint add_value, volatile jint* dest) {
|
||||
template<size_t byte_size>
|
||||
struct Atomic::PlatformAdd
|
||||
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
|
||||
{
|
||||
template<typename I, typename D>
|
||||
D add_and_fetch(I add_value, D volatile* dest) const;
|
||||
};
|
||||
|
||||
unsigned int result;
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
STATIC_CAST(4 == sizeof(I));
|
||||
STATIC_CAST(4 == sizeof(D));
|
||||
|
||||
D result;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
strasm_lwsync
|
||||
@ -110,13 +122,17 @@ inline jint Atomic::add (jint add_value, volatile jint* dest) {
|
||||
: /*%1*/"r" (add_value), /*%2*/"r" (dest)
|
||||
: "cc", "memory" );
|
||||
|
||||
return (jint) result;
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
STATIC_CAST(8 == sizeof(I));
|
||||
STATIC_CAST(8 == sizeof(D));
|
||||
|
||||
long result;
|
||||
D result;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
strasm_lwsync
|
||||
@ -129,11 +145,7 @@ inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
|
||||
: /*%1*/"r" (add_value), /*%2*/"r" (dest)
|
||||
: "cc", "memory" );
|
||||
|
||||
return (intptr_t) result;
|
||||
}
|
||||
|
||||
inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
|
||||
return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
|
||||
@ -40,13 +40,25 @@ inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *
|
||||
inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; }
|
||||
|
||||
|
||||
inline jint Atomic::add (jint add_value, volatile jint* dest) {
|
||||
jint addend = add_value;
|
||||
template<size_t byte_size>
|
||||
struct Atomic::PlatformAdd
|
||||
: Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
|
||||
{
|
||||
template<typename I, typename D>
|
||||
D fetch_and_add(I add_value, D volatile* dest) const;
|
||||
};
|
||||
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest) const {
|
||||
STATIC_ASSERT(4 == sizeof(I));
|
||||
STATIC_ASSERT(4 == sizeof(D));
|
||||
D old_value;
|
||||
__asm__ volatile ( "lock xaddl %0,(%2)"
|
||||
: "=r" (addend)
|
||||
: "0" (addend), "r" (dest)
|
||||
: "=r" (old_value)
|
||||
: "0" (add_value), "r" (dest)
|
||||
: "cc", "memory");
|
||||
return addend + add_value;
|
||||
return old_value;
|
||||
}
|
||||
|
||||
inline void Atomic::inc (volatile jint* dest) {
|
||||
@ -111,17 +123,17 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
||||
inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
|
||||
inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
|
||||
|
||||
inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
|
||||
intptr_t addend = add_value;
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest) const {
|
||||
STATIC_ASSERT(8 == sizeof(I));
|
||||
STATIC_ASSERT(8 == sizeof(D));
|
||||
D old_value;
|
||||
__asm__ __volatile__ ( "lock xaddq %0,(%2)"
|
||||
: "=r" (addend)
|
||||
: "0" (addend), "r" (dest)
|
||||
: "=r" (old_value)
|
||||
: "0" (add_value), "r" (dest)
|
||||
: "cc", "memory");
|
||||
return addend + add_value;
|
||||
}
|
||||
|
||||
inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
|
||||
return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
|
||||
return old_value;
|
||||
}
|
||||
|
||||
inline void Atomic::inc_ptr(volatile intptr_t* dest) {
|
||||
@ -164,15 +176,6 @@ inline jlong Atomic::load(const volatile jlong* src) { return *src; }
|
||||
|
||||
#else // !AMD64
|
||||
|
||||
inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
|
||||
return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest);
|
||||
}
|
||||
|
||||
inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
|
||||
return (void*)Atomic::add((jint)add_value, (volatile jint*)dest);
|
||||
}
|
||||
|
||||
|
||||
inline void Atomic::inc_ptr(volatile intptr_t* dest) {
|
||||
inc((volatile jint*)dest);
|
||||
}
|
||||
|
||||
@ -74,7 +74,7 @@ static inline int m68k_compare_and_swap(int newval,
|
||||
}
|
||||
|
||||
/* Atomically add an int to memory. */
|
||||
static inline int m68k_add_and_fetch(volatile int *ptr, int add_value) {
|
||||
static inline int m68k_add_and_fetch(int add_value, volatile int *ptr) {
|
||||
for (;;) {
|
||||
// Loop until success.
|
||||
|
||||
@ -135,7 +135,7 @@ static inline int arm_compare_and_swap(int newval,
|
||||
}
|
||||
|
||||
/* Atomically add an int to memory. */
|
||||
static inline int arm_add_and_fetch(volatile int *ptr, int add_value) {
|
||||
static inline int arm_add_and_fetch(int add_value, volatile int *ptr) {
|
||||
for (;;) {
|
||||
// Loop until a __kernel_cmpxchg succeeds.
|
||||
|
||||
@ -173,32 +173,38 @@ inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) {
|
||||
*dest = store_value;
|
||||
}
|
||||
|
||||
inline jint Atomic::add(jint add_value, volatile jint* dest) {
|
||||
template<size_t byte_size>
|
||||
struct Atomic::PlatformAdd
|
||||
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
|
||||
{
|
||||
template<typename I, typename D>
|
||||
D add_and_fetch(I add_value, D volatile* dest) const;
|
||||
};
|
||||
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
STATIC_CAST(4 == sizeof(I));
|
||||
STATIC_CAST(4 == sizeof(D));
|
||||
|
||||
#ifdef ARM
|
||||
return arm_add_and_fetch(dest, add_value);
|
||||
return add_using_helper<int>(arm_add_and_fetch, add_value, dest);
|
||||
#else
|
||||
#ifdef M68K
|
||||
return m68k_add_and_fetch(dest, add_value);
|
||||
return add_using_helper<int>(m68k_add_and_fetch, add_value, dest);
|
||||
#else
|
||||
return __sync_add_and_fetch(dest, add_value);
|
||||
#endif // M68K
|
||||
#endif // ARM
|
||||
}
|
||||
|
||||
inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
|
||||
#ifdef ARM
|
||||
return arm_add_and_fetch(dest, add_value);
|
||||
#else
|
||||
#ifdef M68K
|
||||
return m68k_add_and_fetch(dest, add_value);
|
||||
#else
|
||||
return __sync_add_and_fetch(dest, add_value);
|
||||
#endif // M68K
|
||||
#endif // ARM
|
||||
}
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
STATIC_CAST(8 == sizeof(I));
|
||||
STATIC_CAST(8 == sizeof(D));
|
||||
|
||||
inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
|
||||
return (void *) add_ptr(add_value, (volatile intptr_t *) dest);
|
||||
return __sync_add_and_fetch(dest, add_value);
|
||||
}
|
||||
|
||||
inline void Atomic::inc(volatile jint* dest) {
|
||||
|
||||
@ -47,10 +47,15 @@ inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *
|
||||
inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; }
|
||||
|
||||
|
||||
inline jint Atomic::add(jint add_value, volatile jint* dest)
|
||||
template<size_t byte_size>
|
||||
struct Atomic::PlatformAdd
|
||||
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
|
||||
{
|
||||
return __sync_add_and_fetch(dest, add_value);
|
||||
}
|
||||
template<typename I, typename D>
|
||||
D add_and_fetch(I add_value, D volatile* dest) const {
|
||||
return __sync_add_and_fetch(dest, add_value);
|
||||
}
|
||||
};
|
||||
|
||||
inline void Atomic::inc(volatile jint* dest)
|
||||
{
|
||||
@ -105,16 +110,6 @@ inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T exchange_value,
|
||||
inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
|
||||
inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
|
||||
|
||||
inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest)
|
||||
{
|
||||
return __sync_add_and_fetch(dest, add_value);
|
||||
}
|
||||
|
||||
inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest)
|
||||
{
|
||||
return (void *) add_ptr(add_value, (volatile intptr_t *) dest);
|
||||
}
|
||||
|
||||
inline void Atomic::inc_ptr(volatile intptr_t* dest)
|
||||
{
|
||||
add_ptr(1, dest);
|
||||
|
||||
@ -91,9 +91,21 @@ inline void Atomic::store (jlong value, jlong* dest) {
|
||||
//
|
||||
// For ARMv7 we add explicit barriers in the stubs.
|
||||
|
||||
inline jint Atomic::add(jint add_value, volatile jint* dest) {
|
||||
template<size_t byte_size>
|
||||
struct Atomic::PlatformAdd
|
||||
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
|
||||
{
|
||||
template<typename I, typename D>
|
||||
D add_and_fetch(I add_value, D volatile* dest) const;
|
||||
};
|
||||
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
STATIC_ASSERT(4 == sizeof(I));
|
||||
STATIC_ASSERT(4 == sizeof(D));
|
||||
#ifdef AARCH64
|
||||
jint val;
|
||||
D val;
|
||||
int tmp;
|
||||
__asm__ volatile(
|
||||
"1:\n\t"
|
||||
@ -106,7 +118,7 @@ inline jint Atomic::add(jint add_value, volatile jint* dest) {
|
||||
: "memory");
|
||||
return val;
|
||||
#else
|
||||
return (*os::atomic_add_func)(add_value, dest);
|
||||
return add_using_helper<jint>(os::atomic_add_func, add_value, dest);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -118,9 +130,13 @@ inline void Atomic::dec(volatile jint* dest) {
|
||||
Atomic::add(-1, (volatile jint *)dest);
|
||||
}
|
||||
|
||||
inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
|
||||
#ifdef AARCH64
|
||||
intptr_t val;
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
STATIC_ASSERT(8 == sizeof(I));
|
||||
STATIC_ASSERT(8 == sizeof(D));
|
||||
D val;
|
||||
int tmp;
|
||||
__asm__ volatile(
|
||||
"1:\n\t"
|
||||
@ -132,14 +148,8 @@ inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
|
||||
: [add_val] "r" (add_value), [dest] "r" (dest)
|
||||
: "memory");
|
||||
return val;
|
||||
#else
|
||||
return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
|
||||
return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
|
||||
}
|
||||
#endif // AARCH64
|
||||
|
||||
inline void Atomic::inc_ptr(volatile intptr_t* dest) {
|
||||
Atomic::add_ptr(1, dest);
|
||||
|
||||
@ -93,9 +93,21 @@ inline jlong Atomic::load(const volatile jlong* src) { return *src; }
|
||||
#define strasm_nobarrier ""
|
||||
#define strasm_nobarrier_clobber_memory ""
|
||||
|
||||
inline jint Atomic::add (jint add_value, volatile jint* dest) {
|
||||
template<size_t byte_size>
|
||||
struct Atomic::PlatformAdd
|
||||
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
|
||||
{
|
||||
template<typename I, typename D>
|
||||
D add_and_fetch(I add_value, D volatile* dest) const;
|
||||
};
|
||||
|
||||
unsigned int result;
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
STATIC_CAST(4 == sizeof(I));
|
||||
STATIC_CAST(4 == sizeof(D));
|
||||
|
||||
D result;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
strasm_lwsync
|
||||
@ -108,13 +120,17 @@ inline jint Atomic::add (jint add_value, volatile jint* dest) {
|
||||
: /*%1*/"r" (add_value), /*%2*/"r" (dest)
|
||||
: "cc", "memory" );
|
||||
|
||||
return (jint) result;
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
STATIC_CAST(8 == sizeof(I));
|
||||
STATIC_CAST(8 == sizeof(D));
|
||||
|
||||
long result;
|
||||
D result;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
strasm_lwsync
|
||||
@ -127,11 +143,7 @@ inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
|
||||
: /*%1*/"r" (add_value), /*%2*/"r" (dest)
|
||||
: "cc", "memory" );
|
||||
|
||||
return (intptr_t) result;
|
||||
}
|
||||
|
||||
inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
|
||||
return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
|
||||
@ -82,8 +82,21 @@ inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *
|
||||
// The return value of the method is the value that was successfully stored. At the
|
||||
// time the caller receives back control, the value in memory may have changed already.
|
||||
|
||||
inline jint Atomic::add(jint inc, volatile jint*dest) {
|
||||
unsigned int old, upd;
|
||||
template<size_t byte_size>
|
||||
struct Atomic::PlatformAdd
|
||||
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
|
||||
{
|
||||
template<typename I, typename D>
|
||||
D add_and_fetch(I add_value, D volatile* dest) const;
|
||||
};
|
||||
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
STATIC_CAST(4 == sizeof(I));
|
||||
STATIC_CAST(4 == sizeof(D));
|
||||
|
||||
D old, upd;
|
||||
|
||||
if (VM_Version::has_LoadAndALUAtomicV1()) {
|
||||
__asm__ __volatile__ (
|
||||
@ -124,12 +137,17 @@ inline jint Atomic::add(jint inc, volatile jint*dest) {
|
||||
);
|
||||
}
|
||||
|
||||
return (jint)upd;
|
||||
return upd;
|
||||
}
|
||||
|
||||
|
||||
inline intptr_t Atomic::add_ptr(intptr_t inc, volatile intptr_t* dest) {
|
||||
unsigned long old, upd;
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
STATIC_CAST(8 == sizeof(I));
|
||||
STATIC_CAST(8 == sizeof(D));
|
||||
|
||||
D old, upd;
|
||||
|
||||
if (VM_Version::has_LoadAndALUAtomicV1()) {
|
||||
__asm__ __volatile__ (
|
||||
@ -170,11 +188,7 @@ inline intptr_t Atomic::add_ptr(intptr_t inc, volatile intptr_t* dest) {
|
||||
);
|
||||
}
|
||||
|
||||
return (intptr_t)upd;
|
||||
}
|
||||
|
||||
inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
|
||||
return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
|
||||
return upd;
|
||||
}
|
||||
|
||||
|
||||
|
||||
@ -51,8 +51,21 @@ inline void Atomic::dec_ptr(volatile void* dest) { (void)add_ptr(-1, dest);
|
||||
|
||||
inline jlong Atomic::load(const volatile jlong* src) { return *src; }
|
||||
|
||||
inline jint Atomic::add (jint add_value, volatile jint* dest) {
|
||||
intptr_t rv;
|
||||
template<size_t byte_size>
|
||||
struct Atomic::PlatformAdd
|
||||
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
|
||||
{
|
||||
template<typename I, typename D>
|
||||
D add_and_fetch(I add_value, D volatile* dest) const;
|
||||
};
|
||||
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
STATIC_CAST(4 == sizeof(I));
|
||||
STATIC_CAST(4 == sizeof(D));
|
||||
|
||||
D rv;
|
||||
__asm__ volatile(
|
||||
"1: \n\t"
|
||||
" ld [%2], %%o2\n\t"
|
||||
@ -68,8 +81,12 @@ inline jint Atomic::add (jint add_value, volatile jint* dest) {
|
||||
return rv;
|
||||
}
|
||||
|
||||
inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
|
||||
intptr_t rv;
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
STATIC_CAST(8 == sizeof(I));
|
||||
STATIC_CAST(8 == sizeof(D));
|
||||
|
||||
D rv;
|
||||
__asm__ volatile(
|
||||
"1: \n\t"
|
||||
" ldx [%2], %%o2\n\t"
|
||||
@ -85,10 +102,6 @@ inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
|
||||
return rv;
|
||||
}
|
||||
|
||||
inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
|
||||
return (void*)add_ptr((intptr_t)add_value, (volatile intptr_t*)dest);
|
||||
}
|
||||
|
||||
|
||||
inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) {
|
||||
intptr_t rv = exchange_value;
|
||||
|
||||
@ -40,13 +40,25 @@ inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *
|
||||
inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; }
|
||||
|
||||
|
||||
inline jint Atomic::add (jint add_value, volatile jint* dest) {
|
||||
jint addend = add_value;
|
||||
template<size_t byte_size>
|
||||
struct Atomic::PlatformAdd
|
||||
: Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
|
||||
{
|
||||
template<typename I, typename D>
|
||||
D fetch_and_add(I add_value, D volatile* dest) const;
|
||||
};
|
||||
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest) const {
|
||||
STATIC_ASSERT(4 == sizeof(I));
|
||||
STATIC_ASSERT(4 == sizeof(D));
|
||||
D old_value;
|
||||
__asm__ volatile ( "lock xaddl %0,(%2)"
|
||||
: "=r" (addend)
|
||||
: "0" (addend), "r" (dest)
|
||||
: "=r" (old_value)
|
||||
: "0" (add_value), "r" (dest)
|
||||
: "cc", "memory");
|
||||
return addend + add_value;
|
||||
return old_value;
|
||||
}
|
||||
|
||||
inline void Atomic::inc (volatile jint* dest) {
|
||||
@ -111,17 +123,17 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
||||
inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
|
||||
inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
|
||||
|
||||
inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
|
||||
intptr_t addend = add_value;
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest) const {
|
||||
STATIC_ASSERT(8 == sizeof(I));
|
||||
STATIC_ASSERT(8 == sizeof(D));
|
||||
D old_value;
|
||||
__asm__ __volatile__ ("lock xaddq %0,(%2)"
|
||||
: "=r" (addend)
|
||||
: "0" (addend), "r" (dest)
|
||||
: "=r" (old_value)
|
||||
: "0" (add_value), "r" (dest)
|
||||
: "cc", "memory");
|
||||
return addend + add_value;
|
||||
}
|
||||
|
||||
inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
|
||||
return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
|
||||
return old_value;
|
||||
}
|
||||
|
||||
inline void Atomic::inc_ptr(volatile intptr_t* dest) {
|
||||
@ -164,15 +176,6 @@ inline jlong Atomic::load(const volatile jlong* src) { return *src; }
|
||||
|
||||
#else // !AMD64
|
||||
|
||||
inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
|
||||
return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest);
|
||||
}
|
||||
|
||||
inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
|
||||
return (void*)Atomic::add((jint)add_value, (volatile jint*)dest);
|
||||
}
|
||||
|
||||
|
||||
inline void Atomic::inc_ptr(volatile intptr_t* dest) {
|
||||
inc((volatile jint*)dest);
|
||||
}
|
||||
|
||||
@ -74,7 +74,7 @@ static inline int m68k_compare_and_swap(int newval,
|
||||
}
|
||||
|
||||
/* Atomically add an int to memory. */
|
||||
static inline int m68k_add_and_fetch(volatile int *ptr, int add_value) {
|
||||
static inline int m68k_add_and_fetch(int add_value, volatile int *ptr) {
|
||||
for (;;) {
|
||||
// Loop until success.
|
||||
|
||||
@ -135,7 +135,7 @@ static inline int arm_compare_and_swap(int newval,
|
||||
}
|
||||
|
||||
/* Atomically add an int to memory. */
|
||||
static inline int arm_add_and_fetch(volatile int *ptr, int add_value) {
|
||||
static inline int arm_add_and_fetch(int add_value, volatile int *ptr) {
|
||||
for (;;) {
|
||||
// Loop until a __kernel_cmpxchg succeeds.
|
||||
|
||||
@ -167,32 +167,38 @@ inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) {
|
||||
*dest = store_value;
|
||||
}
|
||||
|
||||
inline jint Atomic::add(jint add_value, volatile jint* dest) {
|
||||
template<size_t byte_size>
|
||||
struct Atomic::PlatformAdd
|
||||
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
|
||||
{
|
||||
template<typename I, typename D>
|
||||
D add_and_fetch(I add_value, D volatile* dest) const;
|
||||
};
|
||||
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
STATIC_CAST(4 == sizeof(I));
|
||||
STATIC_CAST(4 == sizeof(D));
|
||||
|
||||
#ifdef ARM
|
||||
return arm_add_and_fetch(dest, add_value);
|
||||
return add_using_helper<int>(arm_add_and_fetch, add_value, dest);
|
||||
#else
|
||||
#ifdef M68K
|
||||
return m68k_add_and_fetch(dest, add_value);
|
||||
return add_using_helper<int>(m68k_add_and_fetch, add_value, dest);
|
||||
#else
|
||||
return __sync_add_and_fetch(dest, add_value);
|
||||
#endif // M68K
|
||||
#endif // ARM
|
||||
}
|
||||
|
||||
inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
|
||||
#ifdef ARM
|
||||
return arm_add_and_fetch(dest, add_value);
|
||||
#else
|
||||
#ifdef M68K
|
||||
return m68k_add_and_fetch(dest, add_value);
|
||||
#else
|
||||
return __sync_add_and_fetch(dest, add_value);
|
||||
#endif // M68K
|
||||
#endif // ARM
|
||||
}
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
STATIC_CAST(8 == sizeof(I));
|
||||
STATIC_CAST(8 == sizeof(D));
|
||||
|
||||
inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
|
||||
return (void *) add_ptr(add_value, (volatile intptr_t *) dest);
|
||||
return __sync_add_and_fetch(dest, add_value);
|
||||
}
|
||||
|
||||
inline void Atomic::inc(volatile jint* dest) {
|
||||
|
||||
@ -62,22 +62,21 @@ inline jlong Atomic::load(const volatile jlong* src) { return *src; }
|
||||
extern "C" jint _Atomic_swap32(jint exchange_value, volatile jint* dest);
|
||||
extern "C" intptr_t _Atomic_swap64(intptr_t exchange_value, volatile intptr_t* dest);
|
||||
|
||||
extern "C" jint _Atomic_add32(jint inc, volatile jint* dest);
|
||||
extern "C" intptr_t _Atomic_add64(intptr_t add_value, volatile intptr_t* dest);
|
||||
|
||||
|
||||
inline jint Atomic::add (jint add_value, volatile jint* dest) {
|
||||
return _Atomic_add32(add_value, dest);
|
||||
}
|
||||
|
||||
inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
|
||||
return _Atomic_add64(add_value, dest);
|
||||
}
|
||||
|
||||
inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
|
||||
return (void*)add_ptr((intptr_t)add_value, (volatile intptr_t*)dest);
|
||||
}
|
||||
|
||||
// Implement ADD using a CAS loop.
|
||||
template<size_t byte_size>
|
||||
struct Atomic::PlatformAdd VALUE_OBJ_CLASS_SPEC {
|
||||
template<typename I, typename D>
|
||||
inline D operator()(I add_value, D volatile* dest) const {
|
||||
D old_value = *dest;
|
||||
while (true) {
|
||||
D new_value = old_value + add_value;
|
||||
D result = cmpxchg(new_value, dest, old_value);
|
||||
if (result == old_value) break;
|
||||
old_value = result;
|
||||
}
|
||||
return old_value + add_value;
|
||||
}
|
||||
};
|
||||
|
||||
inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) {
|
||||
return _Atomic_swap32(exchange_value, dest);
|
||||
|
||||
@ -90,58 +90,6 @@
|
||||
.nonvolatile
|
||||
.end
|
||||
|
||||
// Support for jint Atomic::add(jint add_value, volatile jint* dest).
|
||||
//
|
||||
// Arguments:
|
||||
// add_value: O0 (e.g., +1 or -1)
|
||||
// dest: O1
|
||||
//
|
||||
// Results:
|
||||
// O0: the new value stored in dest
|
||||
//
|
||||
// Overwrites O3
|
||||
|
||||
.inline _Atomic_add32, 2
|
||||
.volatile
|
||||
2:
|
||||
ld [%o1], %o2
|
||||
add %o0, %o2, %o3
|
||||
cas [%o1], %o2, %o3
|
||||
cmp %o2, %o3
|
||||
bne 2b
|
||||
nop
|
||||
add %o0, %o2, %o0
|
||||
.nonvolatile
|
||||
.end
|
||||
|
||||
|
||||
// Support for intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest)
|
||||
//
|
||||
// 64-bit
|
||||
//
|
||||
// Arguments:
|
||||
// add_value: O0 (e.g., +1 or -1)
|
||||
// dest: O1
|
||||
//
|
||||
// Results:
|
||||
// O0: the new value stored in dest
|
||||
//
|
||||
// Overwrites O3
|
||||
|
||||
.inline _Atomic_add64, 2
|
||||
.volatile
|
||||
3:
|
||||
ldx [%o1], %o2
|
||||
add %o0, %o2, %o3
|
||||
casx [%o1], %o2, %o3
|
||||
cmp %o2, %o3
|
||||
bne %xcc, 3b
|
||||
nop
|
||||
add %o0, %o2, %o0
|
||||
.nonvolatile
|
||||
.end
|
||||
|
||||
|
||||
// Support for void Prefetch::read(void *loc, intx interval)
|
||||
//
|
||||
// Prefetch for several reads.
|
||||
|
||||
@ -51,6 +51,8 @@ inline void Atomic::dec_ptr(volatile void* dest) { (void)add_ptr(-1, dest);
|
||||
|
||||
extern "C" {
|
||||
jint _Atomic_add(jint add_value, volatile jint* dest);
|
||||
jlong _Atomic_add_long(jlong add_value, volatile jlong* dest);
|
||||
|
||||
jint _Atomic_xchg(jint exchange_value, volatile jint* dest);
|
||||
jbyte _Atomic_cmpxchg_byte(jbyte exchange_value, volatile jbyte* dest,
|
||||
jbyte compare_value);
|
||||
@ -60,8 +62,34 @@ extern "C" {
|
||||
jlong compare_value);
|
||||
}
|
||||
|
||||
inline jint Atomic::add (jint add_value, volatile jint* dest) {
|
||||
return _Atomic_add(add_value, dest);
|
||||
template<size_t byte_size>
|
||||
struct Atomic::PlatformAdd
|
||||
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
|
||||
{
|
||||
template<typename I, typename D>
|
||||
D add_and_fetch(I add_value, D volatile* dest) const;
|
||||
};
|
||||
|
||||
// Not using add_using_helper; see comment for cmpxchg.
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
STATIC_ASSERT(4 == sizeof(I));
|
||||
STATIC_ASSERT(4 == sizeof(D));
|
||||
return PrimitiveConversions::cast<D>(
|
||||
_Atomic_add(PrimitiveConversions::cast<jint>(add_value),
|
||||
reinterpret_cast<jint volatile*>(dest)));
|
||||
}
|
||||
|
||||
// Not using add_using_helper; see comment for cmpxchg.
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
STATIC_ASSERT(8 == sizeof(I));
|
||||
STATIC_ASSERT(8 == sizeof(D));
|
||||
return PrimitiveConversions::cast<D>(
|
||||
_Atomic_add_long(PrimitiveConversions::cast<jlong>(add_value),
|
||||
reinterpret_cast<jlong volatile*>(dest)));
|
||||
}
|
||||
|
||||
inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) {
|
||||
@ -115,17 +143,8 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
||||
|
||||
inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
|
||||
inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
|
||||
extern "C" jlong _Atomic_add_long(jlong add_value, volatile jlong* dest);
|
||||
extern "C" jlong _Atomic_xchg_long(jlong exchange_value, volatile jlong* dest);
|
||||
|
||||
inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
|
||||
return (intptr_t)_Atomic_add_long((jlong)add_value, (volatile jlong*)dest);
|
||||
}
|
||||
|
||||
inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
|
||||
return (void*)_Atomic_add_long((jlong)add_value, (volatile jlong*)dest);
|
||||
}
|
||||
|
||||
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
|
||||
return (intptr_t)_Atomic_xchg_long((jlong)exchange_value, (volatile jlong*)dest);
|
||||
}
|
||||
|
||||
@ -57,20 +57,28 @@ inline void Atomic::store (jint store_value, volatile jint* dest) { *
|
||||
inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
|
||||
inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; }
|
||||
|
||||
template<size_t byte_size>
|
||||
struct Atomic::PlatformAdd
|
||||
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
|
||||
{
|
||||
template<typename I, typename D>
|
||||
D add_and_fetch(I add_value, D volatile* dest) const;
|
||||
};
|
||||
|
||||
#ifdef AMD64
|
||||
inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
|
||||
inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
|
||||
|
||||
inline jint Atomic::add (jint add_value, volatile jint* dest) {
|
||||
return (jint)(*os::atomic_add_func)(add_value, dest);
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
return add_using_helper<jint>(os::atomic_add_func, add_value, dest);
|
||||
}
|
||||
|
||||
inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
|
||||
return (intptr_t)(*os::atomic_add_ptr_func)(add_value, dest);
|
||||
}
|
||||
|
||||
inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
|
||||
return (void*)(*os::atomic_add_ptr_func)(add_value, (volatile intptr_t*)dest);
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
return add_using_helper<intptr_t>(os::atomic_add_ptr_func, add_value, dest);
|
||||
}
|
||||
|
||||
inline void Atomic::inc (volatile jint* dest) {
|
||||
@ -130,7 +138,11 @@ inline jlong Atomic::load(const volatile jlong* src) { return *src; }
|
||||
|
||||
#else // !AMD64
|
||||
|
||||
inline jint Atomic::add (jint add_value, volatile jint* dest) {
|
||||
template<>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||
STATIC_ASSERT(4 == sizeof(I));
|
||||
STATIC_ASSERT(4 == sizeof(D));
|
||||
__asm {
|
||||
mov edx, dest;
|
||||
mov eax, add_value;
|
||||
@ -140,14 +152,6 @@ inline jint Atomic::add (jint add_value, volatile jint* dest) {
|
||||
}
|
||||
}
|
||||
|
||||
inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
|
||||
return (intptr_t)add((jint)add_value, (volatile jint*)dest);
|
||||
}
|
||||
|
||||
inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
|
||||
return (void*)add((jint)add_value, (volatile jint*)dest);
|
||||
}
|
||||
|
||||
inline void Atomic::inc (volatile jint* dest) {
|
||||
// alternative for InterlockedIncrement
|
||||
__asm {
|
||||
|
||||
@ -77,9 +77,8 @@
|
||||
#include "services/classLoadingService.hpp"
|
||||
#include "services/diagnosticCommand.hpp"
|
||||
#include "services/threadService.hpp"
|
||||
#include "trace/traceMacros.hpp"
|
||||
#include "trace/tracing.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#include "utilities/ticks.hpp"
|
||||
#if INCLUDE_CDS
|
||||
#include "classfile/sharedClassUtil.hpp"
|
||||
#include "classfile/systemDictionaryShared.hpp"
|
||||
@ -87,9 +86,6 @@
|
||||
#if INCLUDE_JVMCI
|
||||
#include "jvmci/jvmciRuntime.hpp"
|
||||
#endif
|
||||
#if INCLUDE_TRACE
|
||||
#include "trace/tracing.hpp"
|
||||
#endif
|
||||
|
||||
PlaceholderTable* SystemDictionary::_placeholders = NULL;
|
||||
Dictionary* SystemDictionary::_shared_dictionary = NULL;
|
||||
@ -615,17 +611,17 @@ InstanceKlass* SystemDictionary::handle_parallel_super_load(
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void post_class_load_event(const Ticks& start_time,
|
||||
InstanceKlass* k,
|
||||
static void post_class_load_event(EventClassLoad* event,
|
||||
const InstanceKlass* k,
|
||||
const ClassLoaderData* init_cld) {
|
||||
#if INCLUDE_TRACE
|
||||
EventClassLoad event(UNTIMED);
|
||||
if (event.should_commit()) {
|
||||
event.set_starttime(start_time);
|
||||
event.set_loadedClass(k);
|
||||
event.set_definingClassLoader(k->class_loader_data());
|
||||
event.set_initiatingClassLoader(init_cld);
|
||||
event.commit();
|
||||
assert(event != NULL, "invariant");
|
||||
assert(k != NULL, "invariant");
|
||||
if (event->should_commit()) {
|
||||
event->set_loadedClass(k);
|
||||
event->set_definingClassLoader(k->class_loader_data());
|
||||
event->set_initiatingClassLoader(init_cld);
|
||||
event->commit();
|
||||
}
|
||||
#endif // INCLUDE_TRACE
|
||||
}
|
||||
@ -653,7 +649,7 @@ Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name,
|
||||
assert(name != NULL && !FieldType::is_array(name) &&
|
||||
!FieldType::is_obj(name), "invalid class name");
|
||||
|
||||
Ticks class_load_start_time = Ticks::now();
|
||||
EventClassLoad class_load_start_event;
|
||||
|
||||
HandleMark hm(THREAD);
|
||||
|
||||
@ -899,7 +895,7 @@ Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
post_class_load_event(class_load_start_time, k, loader_data);
|
||||
post_class_load_event(&class_load_start_event, k, loader_data);
|
||||
|
||||
#ifdef ASSERT
|
||||
{
|
||||
@ -1006,7 +1002,7 @@ InstanceKlass* SystemDictionary::parse_stream(Symbol* class_name,
|
||||
GrowableArray<Handle>* cp_patches,
|
||||
TRAPS) {
|
||||
|
||||
Ticks class_load_start_time = Ticks::now();
|
||||
EventClassLoad class_load_start_event;
|
||||
|
||||
ClassLoaderData* loader_data;
|
||||
if (host_klass != NULL) {
|
||||
@ -1064,7 +1060,7 @@ InstanceKlass* SystemDictionary::parse_stream(Symbol* class_name,
|
||||
JvmtiExport::post_class_load((JavaThread *) THREAD, k);
|
||||
}
|
||||
|
||||
post_class_load_event(class_load_start_time, k, loader_data);
|
||||
post_class_load_event(&class_load_start_event, k, loader_data);
|
||||
}
|
||||
assert(host_klass != NULL || NULL == cp_patches,
|
||||
"cp_patches only found with host_klass");
|
||||
|
||||
@ -720,44 +720,49 @@ JavaThread* CompileBroker::make_thread(const char* name, CompileQueue* queue, Co
|
||||
// At this point it may be possible that no osthread was created for the
|
||||
// JavaThread due to lack of memory. We would have to throw an exception
|
||||
// in that case. However, since this must work and we do not allow
|
||||
// exceptions anyway, check and abort if this fails.
|
||||
// exceptions anyway, check and abort if this fails. But first release the
|
||||
// lock.
|
||||
|
||||
if (thread == NULL || thread->osthread() == NULL) {
|
||||
vm_exit_during_initialization("java.lang.OutOfMemoryError",
|
||||
os::native_thread_creation_failed_msg());
|
||||
}
|
||||
if (thread != NULL && thread->osthread() != NULL) {
|
||||
|
||||
java_lang_Thread::set_thread(thread_oop(), thread);
|
||||
java_lang_Thread::set_thread(thread_oop(), thread);
|
||||
|
||||
// Note that this only sets the JavaThread _priority field, which by
|
||||
// definition is limited to Java priorities and not OS priorities.
|
||||
// The os-priority is set in the CompilerThread startup code itself
|
||||
// Note that this only sets the JavaThread _priority field, which by
|
||||
// definition is limited to Java priorities and not OS priorities.
|
||||
// The os-priority is set in the CompilerThread startup code itself
|
||||
|
||||
java_lang_Thread::set_priority(thread_oop(), NearMaxPriority);
|
||||
java_lang_Thread::set_priority(thread_oop(), NearMaxPriority);
|
||||
|
||||
// Note that we cannot call os::set_priority because it expects Java
|
||||
// priorities and we are *explicitly* using OS priorities so that it's
|
||||
// possible to set the compiler thread priority higher than any Java
|
||||
// thread.
|
||||
// Note that we cannot call os::set_priority because it expects Java
|
||||
// priorities and we are *explicitly* using OS priorities so that it's
|
||||
// possible to set the compiler thread priority higher than any Java
|
||||
// thread.
|
||||
|
||||
int native_prio = CompilerThreadPriority;
|
||||
if (native_prio == -1) {
|
||||
if (UseCriticalCompilerThreadPriority) {
|
||||
native_prio = os::java_to_os_priority[CriticalPriority];
|
||||
} else {
|
||||
native_prio = os::java_to_os_priority[NearMaxPriority];
|
||||
int native_prio = CompilerThreadPriority;
|
||||
if (native_prio == -1) {
|
||||
if (UseCriticalCompilerThreadPriority) {
|
||||
native_prio = os::java_to_os_priority[CriticalPriority];
|
||||
} else {
|
||||
native_prio = os::java_to_os_priority[NearMaxPriority];
|
||||
}
|
||||
}
|
||||
}
|
||||
os::set_native_priority(thread, native_prio);
|
||||
os::set_native_priority(thread, native_prio);
|
||||
|
||||
java_lang_Thread::set_daemon(thread_oop());
|
||||
java_lang_Thread::set_daemon(thread_oop());
|
||||
|
||||
thread->set_threadObj(thread_oop());
|
||||
if (compiler_thread) {
|
||||
thread->as_CompilerThread()->set_compiler(comp);
|
||||
thread->set_threadObj(thread_oop());
|
||||
if (compiler_thread) {
|
||||
thread->as_CompilerThread()->set_compiler(comp);
|
||||
}
|
||||
Threads::add(thread);
|
||||
Thread::start(thread);
|
||||
}
|
||||
Threads::add(thread);
|
||||
Thread::start(thread);
|
||||
}
|
||||
|
||||
// First release lock before aborting VM.
|
||||
if (thread == NULL || thread->osthread() == NULL) {
|
||||
vm_exit_during_initialization("java.lang.OutOfMemoryError",
|
||||
os::native_thread_creation_failed_msg());
|
||||
}
|
||||
|
||||
// Let go of Threads_lock before yielding
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -409,7 +409,7 @@ public:
|
||||
|
||||
virtual void work(uint worker_id) {
|
||||
while (true) {
|
||||
size_t to_process = Atomic::add(1, &_cur_chunk) - 1;
|
||||
size_t to_process = Atomic::add(1u, &_cur_chunk) - 1;
|
||||
if (to_process >= _num_chunks) {
|
||||
break;
|
||||
}
|
||||
|
||||
@ -200,7 +200,7 @@ G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::allocate_new_chunk() {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
size_t cur_idx = Atomic::add(1, &_hwm) - 1;
|
||||
size_t cur_idx = Atomic::add(1u, &_hwm) - 1;
|
||||
if (cur_idx >= _chunk_capacity) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -64,7 +64,7 @@ jbyte* G1HotCardCache::insert(jbyte* card_ptr) {
|
||||
return card_ptr;
|
||||
}
|
||||
// Otherwise, the card is hot.
|
||||
size_t index = Atomic::add(1, &_hot_cache_idx) - 1;
|
||||
size_t index = Atomic::add(1u, &_hot_cache_idx) - 1;
|
||||
size_t masked_index = index & (_hot_cache_size - 1);
|
||||
jbyte* current_ptr = _hot_cache[masked_index];
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -67,7 +67,7 @@ class G1HotCardCache: public CHeapObj<mtGC> {
|
||||
|
||||
size_t _hot_cache_size;
|
||||
|
||||
int _hot_cache_par_chunk_size;
|
||||
size_t _hot_cache_par_chunk_size;
|
||||
|
||||
// Avoids false sharing when concurrently updating _hot_cache_idx or
|
||||
// _hot_cache_par_claimed_idx. These are never updated at the same time
|
||||
|
||||
@ -243,7 +243,7 @@ public:
|
||||
|
||||
bool marked_as_dirty = Atomic::cmpxchg(Dirty, &_in_dirty_region_buffer[region], Clean) == Clean;
|
||||
if (marked_as_dirty) {
|
||||
size_t allocated = Atomic::add(1, &_cur_dirty_region) - 1;
|
||||
size_t allocated = Atomic::add(1u, &_cur_dirty_region) - 1;
|
||||
_dirty_region_buffer[allocated] = region;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -30,6 +30,7 @@
|
||||
#include "gc/shared/gcLocker.inline.hpp"
|
||||
#include "gc/shared/genCollectedHeap.hpp"
|
||||
#include "gc/shared/vmGCOperations.hpp"
|
||||
#include "interpreter/oopMapCache.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/oopFactory.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
@ -111,6 +112,9 @@ bool VM_GC_Operation::doit_prologue() {
|
||||
|
||||
void VM_GC_Operation::doit_epilogue() {
|
||||
assert(Thread::current()->is_Java_thread(), "just checking");
|
||||
// Clean up old interpreter OopMap entries that were replaced
|
||||
// during the GC thread root traversal.
|
||||
OopMapCache::cleanup_old_entries();
|
||||
if (Universe::has_reference_pending_list()) {
|
||||
Heap_lock->notify_all();
|
||||
}
|
||||
|
||||
@ -25,6 +25,7 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "interpreter/oopMapCache.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "logging/logStream.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
@ -37,6 +38,9 @@ class OopMapCacheEntry: private InterpreterOopMap {
|
||||
friend class OopMapCache;
|
||||
friend class VerifyClosure;
|
||||
|
||||
private:
|
||||
OopMapCacheEntry* _next;
|
||||
|
||||
protected:
|
||||
// Initialization
|
||||
void fill(const methodHandle& method, int bci);
|
||||
@ -54,8 +58,9 @@ class OopMapCacheEntry: private InterpreterOopMap {
|
||||
|
||||
public:
|
||||
OopMapCacheEntry() : InterpreterOopMap() {
|
||||
_next = NULL;
|
||||
#ifdef ASSERT
|
||||
_resource_allocate_bit_mask = false;
|
||||
_resource_allocate_bit_mask = false;
|
||||
#endif
|
||||
}
|
||||
};
|
||||
@ -263,23 +268,26 @@ bool OopMapCacheEntry::verify_mask(CellTypeState* vars, CellTypeState* stack, in
|
||||
|
||||
// Check if map is generated correctly
|
||||
// (Use ?: operator to make sure all 'true' & 'false' are represented exactly the same so we can use == afterwards)
|
||||
if (TraceOopMapGeneration && Verbose) tty->print("Locals (%d): ", max_locals);
|
||||
Log(interpreter, oopmap) logv;
|
||||
LogStream st(logv.trace());
|
||||
|
||||
st.print("Locals (%d): ", max_locals);
|
||||
for(int i = 0; i < max_locals; i++) {
|
||||
bool v1 = is_oop(i) ? true : false;
|
||||
bool v2 = vars[i].is_reference() ? true : false;
|
||||
assert(v1 == v2, "locals oop mask generation error");
|
||||
if (TraceOopMapGeneration && Verbose) tty->print("%d", v1 ? 1 : 0);
|
||||
st.print("%d", v1 ? 1 : 0);
|
||||
}
|
||||
st.cr();
|
||||
|
||||
if (TraceOopMapGeneration && Verbose) { tty->cr(); tty->print("Stack (%d): ", stack_top); }
|
||||
st.print("Stack (%d): ", stack_top);
|
||||
for(int j = 0; j < stack_top; j++) {
|
||||
bool v1 = is_oop(max_locals + j) ? true : false;
|
||||
bool v2 = stack[j].is_reference() ? true : false;
|
||||
assert(v1 == v2, "stack oop mask generation error");
|
||||
if (TraceOopMapGeneration && Verbose) tty->print("%d", v1 ? 1 : 0);
|
||||
st.print("%d", v1 ? 1 : 0);
|
||||
}
|
||||
if (TraceOopMapGeneration && Verbose) tty->cr();
|
||||
st.cr();
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -373,8 +381,6 @@ void OopMapCacheEntry::set_mask(CellTypeState *vars, CellTypeState *stack, int s
|
||||
|
||||
// verify bit mask
|
||||
assert(verify_mask(vars, stack, max_locals, stack_top), "mask could not be verified");
|
||||
|
||||
|
||||
}
|
||||
|
||||
void OopMapCacheEntry::flush() {
|
||||
@ -385,16 +391,6 @@ void OopMapCacheEntry::flush() {
|
||||
|
||||
// Implementation of OopMapCache
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
static long _total_memory_usage = 0;
|
||||
|
||||
long OopMapCache::memory_usage() {
|
||||
return _total_memory_usage;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
void InterpreterOopMap::resource_copy(OopMapCacheEntry* from) {
|
||||
assert(_resource_allocate_bit_mask,
|
||||
"Should not resource allocate the _bit_mask");
|
||||
@ -435,15 +431,11 @@ inline unsigned int OopMapCache::hash_value_for(const methodHandle& method, int
|
||||
^ ((unsigned int) method->size_of_parameters() << 6);
|
||||
}
|
||||
|
||||
OopMapCacheEntry* volatile OopMapCache::_old_entries = NULL;
|
||||
|
||||
OopMapCache::OopMapCache() :
|
||||
_mut(Mutex::leaf, "An OopMapCache lock", true)
|
||||
{
|
||||
_array = NEW_C_HEAP_ARRAY(OopMapCacheEntry, _size, mtClass);
|
||||
// Cannot call flush for initialization, since flush
|
||||
// will check if memory should be deallocated
|
||||
for(int i = 0; i < _size; i++) _array[i].initialize();
|
||||
NOT_PRODUCT(_total_memory_usage += sizeof(OopMapCache) + (sizeof(OopMapCacheEntry) * _size);)
|
||||
OopMapCache::OopMapCache() {
|
||||
_array = NEW_C_HEAP_ARRAY(OopMapCacheEntry*, _size, mtClass);
|
||||
for(int i = 0; i < _size; i++) _array[i] = NULL;
|
||||
}
|
||||
|
||||
|
||||
@ -452,114 +444,154 @@ OopMapCache::~OopMapCache() {
|
||||
// Deallocate oop maps that are allocated out-of-line
|
||||
flush();
|
||||
// Deallocate array
|
||||
NOT_PRODUCT(_total_memory_usage -= sizeof(OopMapCache) + (sizeof(OopMapCacheEntry) * _size);)
|
||||
FREE_C_HEAP_ARRAY(OopMapCacheEntry, _array);
|
||||
FREE_C_HEAP_ARRAY(OopMapCacheEntry*, _array);
|
||||
}
|
||||
|
||||
OopMapCacheEntry* OopMapCache::entry_at(int i) const {
|
||||
return &_array[i % _size];
|
||||
return (OopMapCacheEntry*)OrderAccess::load_ptr_acquire(&(_array[i % _size]));
|
||||
}
|
||||
|
||||
bool OopMapCache::put_at(int i, OopMapCacheEntry* entry, OopMapCacheEntry* old) {
|
||||
return Atomic::cmpxchg_ptr (entry, &_array[i % _size], old) == old;
|
||||
}
|
||||
|
||||
void OopMapCache::flush() {
|
||||
for (int i = 0; i < _size; i++) _array[i].flush();
|
||||
for (int i = 0; i < _size; i++) {
|
||||
OopMapCacheEntry* entry = _array[i];
|
||||
if (entry != NULL) {
|
||||
_array[i] = NULL; // no barrier, only called in OopMapCache destructor
|
||||
entry->flush();
|
||||
FREE_C_HEAP_OBJ(entry);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void OopMapCache::flush_obsolete_entries() {
|
||||
for (int i = 0; i < _size; i++)
|
||||
if (!_array[i].is_empty() && _array[i].method()->is_old()) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "called by RedefineClasses in a safepoint");
|
||||
for (int i = 0; i < _size; i++) {
|
||||
OopMapCacheEntry* entry = _array[i];
|
||||
if (entry != NULL && !entry->is_empty() && entry->method()->is_old()) {
|
||||
// Cache entry is occupied by an old redefined method and we don't want
|
||||
// to pin it down so flush the entry.
|
||||
if (log_is_enabled(Debug, redefine, class, oopmap)) {
|
||||
ResourceMark rm;
|
||||
log_debug(redefine, class, oopmap)
|
||||
log_debug(redefine, class, interpreter, oopmap)
|
||||
("flush: %s(%s): cached entry @%d",
|
||||
_array[i].method()->name()->as_C_string(), _array[i].method()->signature()->as_C_string(), i);
|
||||
entry->method()->name()->as_C_string(), entry->method()->signature()->as_C_string(), i);
|
||||
}
|
||||
_array[i].flush();
|
||||
_array[i] = NULL;
|
||||
entry->flush();
|
||||
FREE_C_HEAP_OBJ(entry);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Called by GC for thread root scan during a safepoint only. The other interpreted frame oopmaps
|
||||
// are generated locally and not cached.
|
||||
void OopMapCache::lookup(const methodHandle& method,
|
||||
int bci,
|
||||
InterpreterOopMap* entry_for) const {
|
||||
MutexLocker x(&_mut);
|
||||
|
||||
OopMapCacheEntry* entry = NULL;
|
||||
InterpreterOopMap* entry_for) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "called by GC in a safepoint");
|
||||
int probe = hash_value_for(method, bci);
|
||||
int i;
|
||||
OopMapCacheEntry* entry = NULL;
|
||||
|
||||
if (log_is_enabled(Debug, interpreter, oopmap)) {
|
||||
static int count = 0;
|
||||
ResourceMark rm;
|
||||
log_debug(interpreter, oopmap)
|
||||
("%d - Computing oopmap at bci %d for %s at hash %d", ++count, bci,
|
||||
method()->name_and_sig_as_C_string(), probe);
|
||||
}
|
||||
|
||||
// Search hashtable for match
|
||||
int i;
|
||||
for(i = 0; i < _probe_depth; i++) {
|
||||
entry = entry_at(probe + i);
|
||||
if (entry->match(method, bci)) {
|
||||
if (entry != NULL && !entry->is_empty() && entry->match(method, bci)) {
|
||||
entry_for->resource_copy(entry);
|
||||
assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
|
||||
log_debug(interpreter, oopmap)("- found at hash %d", probe + i);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (TraceOopMapGeneration) {
|
||||
static int count = 0;
|
||||
ResourceMark rm;
|
||||
tty->print("%d - Computing oopmap at bci %d for ", ++count, bci);
|
||||
method->print_value(); tty->cr();
|
||||
}
|
||||
|
||||
// Entry is not in hashtable.
|
||||
// Compute entry and return it
|
||||
// Compute entry
|
||||
|
||||
OopMapCacheEntry* tmp = NEW_C_HEAP_OBJ(OopMapCacheEntry, mtClass);
|
||||
tmp->initialize();
|
||||
tmp->fill(method, bci);
|
||||
entry_for->resource_copy(tmp);
|
||||
|
||||
if (method->should_not_be_cached()) {
|
||||
// It is either not safe or not a good idea to cache this Method*
|
||||
// at this time. We give the caller of lookup() a copy of the
|
||||
// interesting info via parameter entry_for, but we don't add it to
|
||||
// the cache. See the gory details in Method*.cpp.
|
||||
compute_one_oop_map(method, bci, entry_for);
|
||||
FREE_C_HEAP_OBJ(tmp);
|
||||
return;
|
||||
}
|
||||
|
||||
// First search for an empty slot
|
||||
for(i = 0; i < _probe_depth; i++) {
|
||||
entry = entry_at(probe + i);
|
||||
if (entry->is_empty()) {
|
||||
entry->fill(method, bci);
|
||||
entry_for->resource_copy(entry);
|
||||
assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
|
||||
return;
|
||||
entry = entry_at(probe + i);
|
||||
if (entry == NULL) {
|
||||
if (put_at(probe + i, tmp, NULL)) {
|
||||
assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (TraceOopMapGeneration) {
|
||||
ResourceMark rm;
|
||||
tty->print_cr("*** collision in oopmap cache - flushing item ***");
|
||||
}
|
||||
log_debug(interpreter, oopmap)("*** collision in oopmap cache - flushing item ***");
|
||||
|
||||
// No empty slot (uncommon case). Use (some approximation of a) LRU algorithm
|
||||
//entry_at(probe + _probe_depth - 1)->flush();
|
||||
//for(i = _probe_depth - 1; i > 0; i--) {
|
||||
// // Coping entry[i] = entry[i-1];
|
||||
// OopMapCacheEntry *to = entry_at(probe + i);
|
||||
// OopMapCacheEntry *from = entry_at(probe + i - 1);
|
||||
// to->copy(from);
|
||||
// }
|
||||
|
||||
assert(method->is_method(), "gaga");
|
||||
|
||||
entry = entry_at(probe + 0);
|
||||
entry->fill(method, bci);
|
||||
|
||||
// Copy the newly cached entry to input parameter
|
||||
entry_for->resource_copy(entry);
|
||||
|
||||
if (TraceOopMapGeneration) {
|
||||
ResourceMark rm;
|
||||
tty->print("Done with ");
|
||||
method->print_value(); tty->cr();
|
||||
// where the first entry in the collision array is replaced with the new one.
|
||||
OopMapCacheEntry* old = entry_at(probe + 0);
|
||||
if (put_at(probe + 0, tmp, old)) {
|
||||
enqueue_for_cleanup(old);
|
||||
} else {
|
||||
enqueue_for_cleanup(tmp);
|
||||
}
|
||||
assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
|
||||
|
||||
assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
|
||||
return;
|
||||
}
|
||||
|
||||
void OopMapCache::enqueue_for_cleanup(OopMapCacheEntry* entry) {
|
||||
bool success = false;
|
||||
OopMapCacheEntry* head;
|
||||
do {
|
||||
head = _old_entries;
|
||||
entry->_next = head;
|
||||
success = Atomic::cmpxchg_ptr (entry, &_old_entries, head) == head;
|
||||
} while (!success);
|
||||
|
||||
if (log_is_enabled(Debug, interpreter, oopmap)) {
|
||||
ResourceMark rm;
|
||||
log_debug(interpreter, oopmap)("enqueue %s at bci %d for cleanup",
|
||||
entry->method()->name_and_sig_as_C_string(), entry->bci());
|
||||
}
|
||||
}
|
||||
|
||||
// This is called after GC threads are done and nothing is accessing the old_entries
|
||||
// list, so no synchronization needed.
|
||||
void OopMapCache::cleanup_old_entries() {
|
||||
OopMapCacheEntry* entry = _old_entries;
|
||||
_old_entries = NULL;
|
||||
while (entry != NULL) {
|
||||
if (log_is_enabled(Debug, interpreter, oopmap)) {
|
||||
ResourceMark rm;
|
||||
log_debug(interpreter, oopmap)("cleanup entry %s at bci %d",
|
||||
entry->method()->name_and_sig_as_C_string(), entry->bci());
|
||||
}
|
||||
OopMapCacheEntry* next = entry->_next;
|
||||
entry->flush();
|
||||
FREE_C_HEAP_OBJ(entry);
|
||||
entry = next;
|
||||
}
|
||||
}
|
||||
|
||||
void OopMapCache::compute_one_oop_map(const methodHandle& method, int bci, InterpreterOopMap* entry) {
|
||||
// Due to the invariants above it's tricky to allocate a temporary OopMapCacheEntry on the stack
|
||||
OopMapCacheEntry* tmp = NEW_C_HEAP_ARRAY(OopMapCacheEntry, 1, mtClass);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -144,17 +144,19 @@ class InterpreterOopMap: ResourceObj {
|
||||
};
|
||||
|
||||
class OopMapCache : public CHeapObj<mtClass> {
|
||||
static OopMapCacheEntry* volatile _old_entries;
|
||||
private:
|
||||
enum { _size = 32, // Use fixed size for now
|
||||
_probe_depth = 3 // probe depth in case of collisions
|
||||
};
|
||||
|
||||
OopMapCacheEntry* _array;
|
||||
OopMapCacheEntry* volatile * _array;
|
||||
|
||||
unsigned int hash_value_for(const methodHandle& method, int bci) const;
|
||||
OopMapCacheEntry* entry_at(int i) const;
|
||||
bool put_at(int i, OopMapCacheEntry* entry, OopMapCacheEntry* old);
|
||||
|
||||
mutable Mutex _mut;
|
||||
static void enqueue_for_cleanup(OopMapCacheEntry* entry);
|
||||
|
||||
void flush();
|
||||
|
||||
@ -167,13 +169,11 @@ class OopMapCache : public CHeapObj<mtClass> {
|
||||
|
||||
// Returns the oopMap for (method, bci) in parameter "entry".
|
||||
// Returns false if an oop map was not found.
|
||||
void lookup(const methodHandle& method, int bci, InterpreterOopMap* entry) const;
|
||||
void lookup(const methodHandle& method, int bci, InterpreterOopMap* entry);
|
||||
|
||||
// Compute an oop map without updating the cache or grabbing any locks (for debugging)
|
||||
static void compute_one_oop_map(const methodHandle& method, int bci, InterpreterOopMap* entry);
|
||||
|
||||
// Returns total no. of bytes allocated as part of OopMapCache's
|
||||
static long memory_usage() PRODUCT_RETURN0;
|
||||
static void cleanup_old_entries();
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_INTERPRETER_OOPMAPCACHE_HPP
|
||||
|
||||
@ -74,6 +74,7 @@
|
||||
LOG_TAG(iklass) \
|
||||
LOG_TAG(init) \
|
||||
LOG_TAG(inlining) \
|
||||
LOG_TAG(interpreter) \
|
||||
LOG_TAG(itables) \
|
||||
LOG_TAG(jit) \
|
||||
LOG_TAG(jni) \
|
||||
|
||||
@ -1613,6 +1613,8 @@ void VM_PopulateDumpSharedSpace::dump_java_heap_objects() {
|
||||
tty->print_cr("Dumping objects to open archive heap region ...");
|
||||
_open_archive_heap_regions = new GrowableArray<MemRegion>(2);
|
||||
MetaspaceShared::dump_open_archive_heap_objects(_open_archive_heap_regions);
|
||||
|
||||
MetaspaceShared::destroy_archive_object_cache();
|
||||
}
|
||||
|
||||
G1HeapVerifier::verify_archive_regions();
|
||||
|
||||
@ -29,6 +29,7 @@
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/memRegion.hpp"
|
||||
#include "memory/virtualspace.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "utilities/exceptions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#include "utilities/resourceHash.hpp"
|
||||
@ -96,11 +97,16 @@ class MetaspaceShared : AllStatic {
|
||||
return p1 == p2;
|
||||
}
|
||||
static unsigned obj_hash(oop const& p) {
|
||||
unsigned hash = (unsigned)((uintptr_t)&p);
|
||||
return hash ^ (hash >> LogMinObjAlignment);
|
||||
assert(!p->mark()->has_bias_pattern(),
|
||||
"this object should never have been locked"); // so identity_hash won't safepoin
|
||||
unsigned hash = (unsigned)p->identity_hash();
|
||||
return hash;
|
||||
}
|
||||
typedef ResourceHashtable<oop, oop,
|
||||
MetaspaceShared::obj_hash, MetaspaceShared::obj_equals> ArchivedObjectCache;
|
||||
MetaspaceShared::obj_hash,
|
||||
MetaspaceShared::obj_equals,
|
||||
15889, // prime number
|
||||
ResourceObj::C_HEAP> ArchivedObjectCache;
|
||||
static ArchivedObjectCache* _archive_object_cache;
|
||||
|
||||
public:
|
||||
@ -115,7 +121,10 @@ class MetaspaceShared : AllStatic {
|
||||
NOT_CDS_JAVA_HEAP(return false;)
|
||||
}
|
||||
static void create_archive_object_cache() {
|
||||
CDS_JAVA_HEAP_ONLY(_archive_object_cache = new ArchivedObjectCache(););
|
||||
CDS_JAVA_HEAP_ONLY(_archive_object_cache = new (ResourceObj::C_HEAP, mtClass)ArchivedObjectCache(););
|
||||
}
|
||||
static void destroy_archive_object_cache() {
|
||||
CDS_JAVA_HEAP_ONLY(delete _archive_object_cache; _archive_object_cache = NULL;);
|
||||
}
|
||||
static void fixup_mapped_heap_regions() NOT_CDS_JAVA_HEAP_RETURN;
|
||||
|
||||
|
||||
@ -214,26 +214,14 @@ int Method::fast_exception_handler_bci_for(const methodHandle& mh, Klass* ex_kla
|
||||
}
|
||||
|
||||
void Method::mask_for(int bci, InterpreterOopMap* mask) {
|
||||
|
||||
Thread* myThread = Thread::current();
|
||||
methodHandle h_this(myThread, this);
|
||||
#if defined(ASSERT) && !INCLUDE_JVMCI
|
||||
bool has_capability = myThread->is_VM_thread() ||
|
||||
myThread->is_ConcurrentGC_thread() ||
|
||||
myThread->is_GC_task_thread();
|
||||
|
||||
if (!has_capability) {
|
||||
if (!VerifyStack && !VerifyLastFrame) {
|
||||
// verify stack calls this outside VM thread
|
||||
warning("oopmap should only be accessed by the "
|
||||
"VM, GC task or CMS threads (or during debugging)");
|
||||
InterpreterOopMap local_mask;
|
||||
method_holder()->mask_for(h_this, bci, &local_mask);
|
||||
local_mask.print();
|
||||
}
|
||||
methodHandle h_this(Thread::current(), this);
|
||||
// Only GC uses the OopMapCache during thread stack root scanning
|
||||
// any other uses generate an oopmap but do not save it in the cache.
|
||||
if (Universe::heap()->is_gc_active()) {
|
||||
method_holder()->mask_for(h_this, bci, mask);
|
||||
} else {
|
||||
OopMapCache::compute_one_oop_map(h_this, bci, mask);
|
||||
}
|
||||
#endif
|
||||
method_holder()->mask_for(h_this, bci, mask);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@ -219,7 +219,7 @@ void Symbol::increment_refcount() {
|
||||
|
||||
void Symbol::decrement_refcount() {
|
||||
if (_refcount >= 0) { // not a permanent symbol
|
||||
jshort new_value = Atomic::add(-1, &_refcount);
|
||||
short new_value = Atomic::add(short(-1), &_refcount);
|
||||
#ifdef ASSERT
|
||||
if (new_value == -1) { // we have transitioned from 0 -> -1
|
||||
print();
|
||||
|
||||
@ -283,7 +283,7 @@ bool C2Compiler::is_intrinsic_supported(const methodHandle& method, bool is_virt
|
||||
case vmIntrinsics::_weakCompareAndSetIntAcquire:
|
||||
case vmIntrinsics::_weakCompareAndSetIntRelease:
|
||||
case vmIntrinsics::_weakCompareAndSetInt:
|
||||
if (!Matcher::match_rule_supported(Op_WeakCompareAndSwapL)) return false;
|
||||
if (!Matcher::match_rule_supported(Op_WeakCompareAndSwapI)) return false;
|
||||
break;
|
||||
|
||||
/* CompareAndSet, Byte: */
|
||||
|
||||
@ -26,11 +26,14 @@
|
||||
#define SHARE_VM_RUNTIME_ATOMIC_HPP
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "metaprogramming/conditional.hpp"
|
||||
#include "metaprogramming/enableIf.hpp"
|
||||
#include "metaprogramming/isIntegral.hpp"
|
||||
#include "metaprogramming/isPointer.hpp"
|
||||
#include "metaprogramming/isSame.hpp"
|
||||
#include "metaprogramming/primitiveConversions.hpp"
|
||||
#include "metaprogramming/removeCV.hpp"
|
||||
#include "metaprogramming/removePointer.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
@ -82,11 +85,17 @@ class Atomic : AllStatic {
|
||||
|
||||
// Atomically add to a location. Returns updated value. add*() provide:
|
||||
// <fence> add-value-to-dest <membar StoreLoad|StoreStore>
|
||||
inline static jshort add (jshort add_value, volatile jshort* dest);
|
||||
inline static jint add (jint add_value, volatile jint* dest);
|
||||
inline static size_t add (size_t add_value, volatile size_t* dest);
|
||||
inline static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest);
|
||||
inline static void* add_ptr(intptr_t add_value, volatile void* dest);
|
||||
|
||||
template<typename I, typename D>
|
||||
inline static D add(I add_value, D volatile* dest);
|
||||
|
||||
inline static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest) {
|
||||
return add(add_value, dest);
|
||||
}
|
||||
|
||||
inline static void* add_ptr(intptr_t add_value, volatile void* dest) {
|
||||
return add(add_value, reinterpret_cast<char* volatile*>(dest));
|
||||
}
|
||||
|
||||
// Atomically increment location. inc*() provide:
|
||||
// <fence> increment-dest <membar StoreLoad|StoreStore>
|
||||
@ -156,6 +165,74 @@ private:
|
||||
// that is needed here.
|
||||
template<typename From, typename To> struct IsPointerConvertible;
|
||||
|
||||
// Dispatch handler for add. Provides type-based validity checking
|
||||
// and limited conversions around calls to the platform-specific
|
||||
// implementation layer provided by PlatformAdd.
|
||||
template<typename I, typename D, typename Enable = void>
|
||||
struct AddImpl;
|
||||
|
||||
// Platform-specific implementation of add. Support for sizes of 4
|
||||
// bytes and (if different) pointer size bytes are required. The
|
||||
// class is a function object that must be default constructable,
|
||||
// with these requirements:
|
||||
//
|
||||
// - dest is of type D*, an integral or pointer type.
|
||||
// - add_value is of type I, an integral type.
|
||||
// - sizeof(I) == sizeof(D).
|
||||
// - if D is an integral type, I == D.
|
||||
// - platform_add is an object of type PlatformAdd<sizeof(D)>.
|
||||
//
|
||||
// Then
|
||||
// platform_add(add_value, dest)
|
||||
// must be a valid expression, returning a result convertible to D.
|
||||
//
|
||||
// No definition is provided; all platforms must explicitly define
|
||||
// this class and any needed specializations.
|
||||
template<size_t byte_size> struct PlatformAdd;
|
||||
|
||||
// Helper base classes for defining PlatformAdd. To use, define
|
||||
// PlatformAdd or a specialization that derives from one of these,
|
||||
// and include in the PlatformAdd definition the support function
|
||||
// (described below) required by the base class.
|
||||
//
|
||||
// These classes implement the required function object protocol for
|
||||
// PlatformAdd, using a support function template provided by the
|
||||
// derived class. Let add_value (of type I) and dest (of type D) be
|
||||
// the arguments the object is called with. If D is a pointer type
|
||||
// P*, then let addend (of type I) be add_value * sizeof(P);
|
||||
// otherwise, addend is add_value.
|
||||
//
|
||||
// FetchAndAdd requires the derived class to provide
|
||||
// fetch_and_add(addend, dest)
|
||||
// atomically adding addend to the value of dest, and returning the
|
||||
// old value.
|
||||
//
|
||||
// AddAndFetch requires the derived class to provide
|
||||
// add_and_fetch(addend, dest)
|
||||
// atomically adding addend to the value of dest, and returning the
|
||||
// new value.
|
||||
//
|
||||
// When D is a pointer type P*, both fetch_and_add and add_and_fetch
|
||||
// treat it as if it were a uintptr_t; they do not perform any
|
||||
// scaling of the addend, as that has already been done by the
|
||||
// caller.
|
||||
public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11.
|
||||
template<typename Derived> struct FetchAndAdd;
|
||||
template<typename Derived> struct AddAndFetch;
|
||||
private:
|
||||
|
||||
// Support for platforms that implement some variants of add using a
|
||||
// (typically out of line) non-template helper function. The
|
||||
// generic arguments passed to PlatformAdd need to be translated to
|
||||
// the appropriate type for the helper function, the helper function
|
||||
// invoked on the translated arguments, and the result translated
|
||||
// back. Type is the parameter / return type of the helper
|
||||
// function. No scaling of add_value is performed when D is a pointer
|
||||
// type, so this function can be used to implement the support function
|
||||
// required by AddAndFetch.
|
||||
template<typename Type, typename Fn, typename I, typename D>
|
||||
static D add_using_helper(Fn fn, I add_value, D volatile* dest);
|
||||
|
||||
// Dispatch handler for cmpxchg. Provides type-based validity
|
||||
// checking and limited conversions around calls to the
|
||||
// platform-specific implementation layer provided by
|
||||
@ -219,6 +296,22 @@ struct Atomic::IsPointerConvertible<From*, To*> : AllStatic {
|
||||
static const bool value = (sizeof(yes) == sizeof(test(test_value)));
|
||||
};
|
||||
|
||||
// Define FetchAndAdd and AddAndFetch helper classes before including
|
||||
// platform file, which may use these as base classes, requiring they
|
||||
// be complete.
|
||||
|
||||
template<typename Derived>
|
||||
struct Atomic::FetchAndAdd VALUE_OBJ_CLASS_SPEC {
|
||||
template<typename I, typename D>
|
||||
D operator()(I add_value, D volatile* dest) const;
|
||||
};
|
||||
|
||||
template<typename Derived>
|
||||
struct Atomic::AddAndFetch VALUE_OBJ_CLASS_SPEC {
|
||||
template<typename I, typename D>
|
||||
D operator()(I add_value, D volatile* dest) const;
|
||||
};
|
||||
|
||||
// Define the class before including platform file, which may specialize
|
||||
// the operator definition. No generic definition of specializations
|
||||
// of the operator template are provided, nor are there any generic
|
||||
@ -255,8 +348,93 @@ struct Atomic::CmpxchgByteUsingInt VALUE_OBJ_CLASS_SPEC {
|
||||
#error size_t is not WORD_SIZE, interesting platform, but missing implementation here
|
||||
#endif
|
||||
|
||||
inline size_t Atomic::add(size_t add_value, volatile size_t* dest) {
|
||||
return (size_t) add_ptr((intptr_t) add_value, (volatile intptr_t*) dest);
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::add(I add_value, D volatile* dest) {
|
||||
return AddImpl<I, D>()(add_value, dest);
|
||||
}
|
||||
|
||||
template<typename I, typename D>
|
||||
struct Atomic::AddImpl<
|
||||
I, D,
|
||||
typename EnableIf<IsIntegral<I>::value &&
|
||||
IsIntegral<D>::value &&
|
||||
(sizeof(I) <= sizeof(D)) &&
|
||||
(IsSigned<I>::value == IsSigned<D>::value)>::type>
|
||||
VALUE_OBJ_CLASS_SPEC
|
||||
{
|
||||
D operator()(I add_value, D volatile* dest) const {
|
||||
D addend = add_value;
|
||||
return PlatformAdd<sizeof(D)>()(addend, dest);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename I, typename P>
|
||||
struct Atomic::AddImpl<
|
||||
I, P*,
|
||||
typename EnableIf<IsIntegral<I>::value && (sizeof(I) <= sizeof(P*))>::type>
|
||||
VALUE_OBJ_CLASS_SPEC
|
||||
{
|
||||
P* operator()(I add_value, P* volatile* dest) const {
|
||||
STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*));
|
||||
STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*));
|
||||
typedef typename Conditional<IsSigned<I>::value,
|
||||
intptr_t,
|
||||
uintptr_t>::type CI;
|
||||
CI addend = add_value;
|
||||
return PlatformAdd<sizeof(P*)>()(addend, dest);
|
||||
}
|
||||
};
|
||||
|
||||
// Most platforms do not support atomic add on a 2-byte value. However,
|
||||
// if the value occupies the most significant 16 bits of an aligned 32-bit
|
||||
// word, then we can do this with an atomic add of (add_value << 16)
|
||||
// to the 32-bit word.
|
||||
//
|
||||
// The least significant parts of this 32-bit word will never be affected, even
|
||||
// in case of overflow/underflow.
|
||||
//
|
||||
// Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment.
|
||||
template<>
|
||||
struct Atomic::AddImpl<jshort, jshort> VALUE_OBJ_CLASS_SPEC {
|
||||
jshort operator()(jshort add_value, jshort volatile* dest) const {
|
||||
#ifdef VM_LITTLE_ENDIAN
|
||||
assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
|
||||
jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest-1));
|
||||
#else
|
||||
assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
|
||||
jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest));
|
||||
#endif
|
||||
return (jshort)(new_value >> 16); // preserves sign
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Derived>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::FetchAndAdd<Derived>::operator()(I add_value, D volatile* dest) const {
|
||||
I addend = add_value;
|
||||
// If D is a pointer type P*, scale by sizeof(P).
|
||||
if (IsPointer<D>::value) {
|
||||
addend *= sizeof(typename RemovePointer<D>::type);
|
||||
}
|
||||
D old = static_cast<const Derived*>(this)->fetch_and_add(addend, dest);
|
||||
return old + add_value;
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
template<typename I, typename D>
|
||||
inline D Atomic::AddAndFetch<Derived>::operator()(I add_value, D volatile* dest) const {
|
||||
// If D is a pointer type P*, scale by sizeof(P).
|
||||
if (IsPointer<D>::value) {
|
||||
add_value *= sizeof(typename RemovePointer<D>::type);
|
||||
}
|
||||
return static_cast<const Derived*>(this)->add_and_fetch(add_value, dest);
|
||||
}
|
||||
|
||||
template<typename Type, typename Fn, typename I, typename D>
|
||||
inline D Atomic::add_using_helper(Fn fn, I add_value, D volatile* dest) {
|
||||
return PrimitiveConversions::cast<D>(
|
||||
fn(PrimitiveConversions::cast<Type>(add_value),
|
||||
reinterpret_cast<Type volatile*>(dest)));
|
||||
}
|
||||
|
||||
inline void Atomic::inc(volatile size_t* dest) {
|
||||
@ -413,32 +591,12 @@ inline unsigned Atomic::xchg(unsigned int exchange_value, volatile unsigned int*
|
||||
return (unsigned int)Atomic::xchg((jint)exchange_value, (volatile jint*)dest);
|
||||
}
|
||||
|
||||
inline jshort Atomic::add(jshort add_value, volatile jshort* dest) {
|
||||
// Most platforms do not support atomic add on a 2-byte value. However,
|
||||
// if the value occupies the most significant 16 bits of an aligned 32-bit
|
||||
// word, then we can do this with an atomic add of (add_value << 16)
|
||||
// to the 32-bit word.
|
||||
//
|
||||
// The least significant parts of this 32-bit word will never be affected, even
|
||||
// in case of overflow/underflow.
|
||||
//
|
||||
// Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment.
|
||||
#ifdef VM_LITTLE_ENDIAN
|
||||
assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
|
||||
jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest-1));
|
||||
#else
|
||||
assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
|
||||
jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest));
|
||||
#endif
|
||||
return (jshort)(new_value >> 16); // preserves sign
|
||||
}
|
||||
|
||||
inline void Atomic::inc(volatile jshort* dest) {
|
||||
(void)add(1, dest);
|
||||
(void)add(jshort(1), dest);
|
||||
}
|
||||
|
||||
inline void Atomic::dec(volatile jshort* dest) {
|
||||
(void)add(-1, dest);
|
||||
(void)add(jshort(-1), dest);
|
||||
}
|
||||
|
||||
#endif // SHARE_VM_RUNTIME_ATOMIC_HPP
|
||||
|
||||
@ -1373,6 +1373,30 @@ void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map,
|
||||
|
||||
}
|
||||
|
||||
#if INCLUDE_JVMCI
|
||||
address Deoptimization::deoptimize_for_missing_exception_handler(CompiledMethod* cm) {
|
||||
// there is no exception handler for this pc => deoptimize
|
||||
cm->make_not_entrant();
|
||||
|
||||
// Use Deoptimization::deoptimize for all of its side-effects:
|
||||
// revoking biases of monitors, gathering traps statistics, logging...
|
||||
// it also patches the return pc but we do not care about that
|
||||
// since we return a continuation to the deopt_blob below.
|
||||
JavaThread* thread = JavaThread::current();
|
||||
RegisterMap reg_map(thread, UseBiasedLocking);
|
||||
frame runtime_frame = thread->last_frame();
|
||||
frame caller_frame = runtime_frame.sender(®_map);
|
||||
assert(caller_frame.cb()->as_nmethod_or_null() == cm, "expect top frame nmethod");
|
||||
Deoptimization::deoptimize(thread, caller_frame, ®_map, Deoptimization::Reason_not_compiled_exception_handler);
|
||||
|
||||
MethodData* trap_mdo = get_method_data(thread, cm->method(), true);
|
||||
if (trap_mdo != NULL) {
|
||||
trap_mdo->inc_trap_count(Deoptimization::Reason_not_compiled_exception_handler);
|
||||
}
|
||||
|
||||
return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
|
||||
}
|
||||
#endif
|
||||
|
||||
void Deoptimization::deoptimize_frame_internal(JavaThread* thread, intptr_t* id, DeoptReason reason) {
|
||||
assert(thread == Thread::current() || SafepointSynchronize::is_at_safepoint(),
|
||||
|
||||
@ -136,6 +136,10 @@ class Deoptimization : AllStatic {
|
||||
static void deoptimize(JavaThread* thread, frame fr, RegisterMap *reg_map);
|
||||
static void deoptimize(JavaThread* thread, frame fr, RegisterMap *reg_map, DeoptReason reason);
|
||||
|
||||
#if INCLUDE_JVMCI
|
||||
static address deoptimize_for_missing_exception_handler(CompiledMethod* cm);
|
||||
#endif
|
||||
|
||||
private:
|
||||
// Does the actual work for deoptimizing a single frame
|
||||
static void deoptimize_single_frame(JavaThread* thread, frame fr, DeoptReason reason);
|
||||
|
||||
@ -715,9 +715,6 @@ public:
|
||||
product(bool, PrintVMQWaitTime, false, \
|
||||
"Print out the waiting time in VM operation queue") \
|
||||
\
|
||||
develop(bool, TraceOopMapGeneration, false, \
|
||||
"Show OopMapGeneration") \
|
||||
\
|
||||
product(bool, MethodFlushing, true, \
|
||||
"Reclamation of zombie and not-entrant methods") \
|
||||
\
|
||||
|
||||
@ -129,7 +129,7 @@ void MemProfiler::do_trace() {
|
||||
fprintf(_log_fp, UINTX_FORMAT_W(6) "," UINTX_FORMAT_W(6) ",%6ld\n",
|
||||
handles_memory_usage / K,
|
||||
resource_memory_usage / K,
|
||||
OopMapCache::memory_usage() / K);
|
||||
0L);
|
||||
fflush(_log_fp);
|
||||
}
|
||||
|
||||
|
||||
@ -638,20 +638,7 @@ address SharedRuntime::compute_compiled_exc_handler(CompiledMethod* cm, address
|
||||
if (t != NULL) {
|
||||
return cm->code_begin() + t->pco();
|
||||
} else {
|
||||
// there is no exception handler for this pc => deoptimize
|
||||
cm->make_not_entrant();
|
||||
|
||||
// Use Deoptimization::deoptimize for all of its side-effects:
|
||||
// revoking biases of monitors, gathering traps statistics, logging...
|
||||
// it also patches the return pc but we do not care about that
|
||||
// since we return a continuation to the deopt_blob below.
|
||||
JavaThread* thread = JavaThread::current();
|
||||
RegisterMap reg_map(thread, UseBiasedLocking);
|
||||
frame runtime_frame = thread->last_frame();
|
||||
frame caller_frame = runtime_frame.sender(®_map);
|
||||
Deoptimization::deoptimize(thread, caller_frame, ®_map, Deoptimization::Reason_not_compiled_exception_handler);
|
||||
|
||||
return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
|
||||
return Deoptimization::deoptimize_for_missing_exception_handler(cm);
|
||||
}
|
||||
}
|
||||
#endif // INCLUDE_JVMCI
|
||||
|
||||
@ -1407,7 +1407,6 @@ ObjectMonitor* ObjectSynchronizer::inflate(Thread * Self,
|
||||
assert(inf->header()->is_neutral(), "invariant");
|
||||
assert(inf->object() == object, "invariant");
|
||||
assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
|
||||
event.cancel(); // let's not post an inflation event, unless we did the deed ourselves
|
||||
return inf;
|
||||
}
|
||||
|
||||
|
||||
@ -396,14 +396,7 @@ StackValueCollection* interpretedVFrame::expressions() const {
|
||||
StackValueCollection* interpretedVFrame::stack_data(bool expressions) const {
|
||||
|
||||
InterpreterOopMap oop_mask;
|
||||
// oopmap for current bci
|
||||
if ((TraceDeoptimization && Verbose) JVMCI_ONLY( || PrintDeoptimizationDetails)) {
|
||||
methodHandle m_h(Thread::current(), method());
|
||||
OopMapCache::compute_one_oop_map(m_h, bci(), &oop_mask);
|
||||
} else {
|
||||
method()->mask_for(bci(), &oop_mask);
|
||||
}
|
||||
|
||||
method()->mask_for(bci(), &oop_mask);
|
||||
const int mask_len = oop_mask.number_of_entries();
|
||||
|
||||
// If the method is native, method()->max_locals() is not telling the truth.
|
||||
|
||||
@ -53,7 +53,7 @@ class MemoryCounter VALUE_OBJ_CLASS_SPEC {
|
||||
}
|
||||
|
||||
inline void allocate(size_t sz) {
|
||||
Atomic::add(1, &_count);
|
||||
Atomic::inc(&_count);
|
||||
if (sz > 0) {
|
||||
Atomic::add(sz, &_size);
|
||||
DEBUG_ONLY(_peak_size = MAX2(_peak_size, _size));
|
||||
@ -64,7 +64,7 @@ class MemoryCounter VALUE_OBJ_CLASS_SPEC {
|
||||
inline void deallocate(size_t sz) {
|
||||
assert(_count > 0, "Nothing allocated yet");
|
||||
assert(_size >= sz, "deallocation > allocated");
|
||||
Atomic::add(-1, &_count);
|
||||
Atomic::dec(&_count);
|
||||
if (sz > 0) {
|
||||
// unary minus operator applied to unsigned type, result still unsigned
|
||||
#pragma warning(suppress: 4146)
|
||||
@ -74,7 +74,7 @@ class MemoryCounter VALUE_OBJ_CLASS_SPEC {
|
||||
|
||||
inline void resize(long sz) {
|
||||
if (sz != 0) {
|
||||
Atomic::add(sz, &_size);
|
||||
Atomic::add(size_t(sz), &_size);
|
||||
DEBUG_ONLY(_peak_size = MAX2(_size, _peak_size);)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,6 +28,7 @@
|
||||
#include <stddef.h>
|
||||
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/ticks.hpp"
|
||||
|
||||
enum {
|
||||
CONTENT_TYPE_NONE = 0,
|
||||
@ -54,10 +55,11 @@ enum ReservedEvent {
|
||||
NUM_RESERVED_EVENTS = JVM_CONTENT_TYPES_END
|
||||
};
|
||||
|
||||
typedef enum ReservedEvent ReservedEvent;
|
||||
|
||||
typedef u8 traceid;
|
||||
|
||||
class ClassLoaderData;
|
||||
class Klass;
|
||||
class Method;
|
||||
class ModuleEntry;
|
||||
class PackageEntry;
|
||||
class Symbol;
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,6 +25,7 @@
|
||||
#ifndef SHARE_VM_TRACE_TRACEEVENT_HPP
|
||||
#define SHARE_VM_TRACE_TRACEEVENT_HPP
|
||||
|
||||
#include "trace/traceTime.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
enum EventStartTime {
|
||||
@ -34,25 +35,18 @@ enum EventStartTime {
|
||||
|
||||
#if INCLUDE_TRACE
|
||||
#include "trace/traceBackend.hpp"
|
||||
#include "trace/tracing.hpp"
|
||||
#include "tracefiles/traceEventIds.hpp"
|
||||
#include "tracefiles/traceTypes.hpp"
|
||||
#include "utilities/ticks.hpp"
|
||||
|
||||
template<typename T>
|
||||
class TraceEvent : public StackObj {
|
||||
class TraceEvent {
|
||||
private:
|
||||
bool _started;
|
||||
#ifdef ASSERT
|
||||
bool _committed;
|
||||
bool _cancelled;
|
||||
protected:
|
||||
bool _ignore_check;
|
||||
#endif
|
||||
|
||||
protected:
|
||||
jlong _startTime;
|
||||
jlong _endTime;
|
||||
DEBUG_ONLY(bool _committed;)
|
||||
|
||||
void set_starttime(const TracingTime& time) {
|
||||
_startTime = time;
|
||||
@ -67,10 +61,7 @@ class TraceEvent : public StackObj {
|
||||
_endTime(0),
|
||||
_started(false)
|
||||
#ifdef ASSERT
|
||||
,
|
||||
_committed(false),
|
||||
_cancelled(false),
|
||||
_ignore_check(false)
|
||||
, _committed(false)
|
||||
#endif
|
||||
{
|
||||
if (T::is_enabled()) {
|
||||
@ -100,10 +91,9 @@ class TraceEvent : public StackObj {
|
||||
|
||||
void commit() {
|
||||
if (!should_commit()) {
|
||||
DEBUG_ONLY(cancel());
|
||||
return;
|
||||
}
|
||||
assert(!_cancelled, "Committing an event that has already been cancelled");
|
||||
assert(!_committed, "event already committed");
|
||||
if (_startTime == 0) {
|
||||
static_cast<T*>(this)->set_starttime(Tracing::time());
|
||||
} else if (_endTime == 0) {
|
||||
@ -111,8 +101,8 @@ class TraceEvent : public StackObj {
|
||||
}
|
||||
if (static_cast<T*>(this)->should_write()) {
|
||||
static_cast<T*>(this)->writeEvent();
|
||||
DEBUG_ONLY(_committed = true;)
|
||||
}
|
||||
DEBUG_ONLY(set_commited());
|
||||
}
|
||||
|
||||
static TraceEventId id() {
|
||||
@ -134,32 +124,6 @@ class TraceEvent : public StackObj {
|
||||
static bool has_stacktrace() {
|
||||
return T::hasStackTrace;
|
||||
}
|
||||
|
||||
void cancel() {
|
||||
assert(!_committed && !_cancelled,
|
||||
"event was already committed/cancelled");
|
||||
DEBUG_ONLY(_cancelled = true);
|
||||
}
|
||||
|
||||
~TraceEvent() {
|
||||
if (_started) {
|
||||
assert(_ignore_check || _committed || _cancelled,
|
||||
"event was not committed/cancelled");
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
protected:
|
||||
void ignoreCheck() {
|
||||
_ignore_check = true;
|
||||
}
|
||||
|
||||
private:
|
||||
void set_commited() {
|
||||
assert(!_committed, "event has already been committed");
|
||||
_committed = true;
|
||||
}
|
||||
#endif // ASSERT
|
||||
};
|
||||
|
||||
#endif // INCLUDE_TRACE
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<!--
|
||||
Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
|
||||
This code is free software; you can redistribute it and/or modify it
|
||||
@ -37,10 +37,10 @@
|
||||
// INCLUDE_TRACE
|
||||
|
||||
#include "tracefiles/traceTypes.hpp"
|
||||
#include "trace/traceEvent.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#include "utilities/ticks.hpp"
|
||||
|
||||
#if INCLUDE_TRACE
|
||||
#include "trace/traceEvent.hpp"
|
||||
#include "trace/traceStream.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
||||
@ -57,7 +57,6 @@ public:
|
||||
bool should_commit() const { return false; }
|
||||
static bool is_enabled() { return false; }
|
||||
void commit() {}
|
||||
void cancel() {}
|
||||
};
|
||||
|
||||
<xsl:apply-templates select="trace/events/struct" mode="empty"/>
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<!--
|
||||
Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
|
||||
This code is free software; you can redistribute it and/or modify it
|
||||
@ -32,10 +32,7 @@
|
||||
#ifndef TRACEFILES_TRACETYPES_HPP
|
||||
#define TRACEFILES_TRACETYPES_HPP
|
||||
|
||||
#include "oops/symbol.hpp"
|
||||
#include "trace/traceDataTypes.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/ticks.hpp"
|
||||
|
||||
enum JVMContentType {
|
||||
_not_a_content_type = (JVM_CONTENT_TYPES_START - 1),
|
||||
|
||||
@ -30,21 +30,21 @@
|
||||
* @modules jdk.internal.vm.ci/jdk.vm.ci.runtime
|
||||
* @run main/othervm -XX:+UnlockExperimentalVMOptions
|
||||
* -Dcompiler.jvmci.JVM_GetJVMCIRuntimeTest.positive=true
|
||||
* -XX:+EnableJVMCI -Djvmci.Compiler=null
|
||||
* -XX:+EnableJVMCI
|
||||
* compiler.jvmci.JVM_GetJVMCIRuntimeTest
|
||||
* @run main/othervm -XX:+UnlockExperimentalVMOptions
|
||||
* -Dcompiler.jvmci.JVM_GetJVMCIRuntimeTest.positive=false
|
||||
* -XX:-EnableJVMCI
|
||||
* -XX:-EnableJVMCI -XX:-UseJVMCICompiler
|
||||
* compiler.jvmci.JVM_GetJVMCIRuntimeTest
|
||||
* @run main/othervm -XX:+UnlockExperimentalVMOptions
|
||||
* -Dcompiler.jvmci.JVM_GetJVMCIRuntimeTest.positive=true
|
||||
* -Dcompiler.jvmci.JVM_GetJVMCIRuntimeTest.threaded=true
|
||||
* -XX:+EnableJVMCI -Djvmci.Compiler=null
|
||||
* -XX:+EnableJVMCI
|
||||
* compiler.jvmci.JVM_GetJVMCIRuntimeTest
|
||||
* @run main/othervm -XX:+UnlockExperimentalVMOptions
|
||||
* -Dcompiler.jvmci.JVM_GetJVMCIRuntimeTest.positive=false
|
||||
* -Dcompiler.jvmci.JVM_GetJVMCIRuntimeTest.threaded=true
|
||||
* -XX:-EnableJVMCI
|
||||
* -XX:-EnableJVMCI -XX:-UseJVMCICompiler
|
||||
* compiler.jvmci.JVM_GetJVMCIRuntimeTest
|
||||
|
||||
*/
|
||||
|
||||
@ -43,11 +43,11 @@
|
||||
* compiler.jvmci.SecurityRestrictionsTest
|
||||
* ALL_PERM
|
||||
* @run main/othervm -XX:+UnlockExperimentalVMOptions
|
||||
* -XX:+EnableJVMCI
|
||||
* -XX:+EnableJVMCI -XX:-UseJVMCICompiler
|
||||
* compiler.jvmci.SecurityRestrictionsTest
|
||||
* NO_JVMCI_ACCESS_PERM
|
||||
* @run main/othervm -XX:+UnlockExperimentalVMOptions
|
||||
* -XX:-EnableJVMCI
|
||||
* -XX:-EnableJVMCI -XX:-UseJVMCICompiler
|
||||
* compiler.jvmci.SecurityRestrictionsTest
|
||||
* NO_JVMCI
|
||||
*/
|
||||
|
||||
@ -61,7 +61,6 @@ public class DisassembleCodeBlobTest {
|
||||
= new DisassembleCodeBlobTest();
|
||||
List<CompileCodeTestCase> testCases
|
||||
= CompileCodeTestCase.generate(/* bci = */ -1);
|
||||
testCases.addAll(CompileCodeTestCase.generate(/* bci = */ 0));
|
||||
testCases.forEach(test::check);
|
||||
testCases.stream().findAny().ifPresent(test::checkZero);
|
||||
test.checkNull();
|
||||
|
||||
@ -39,7 +39,7 @@
|
||||
* @run main/othervm -Xbootclasspath/a:.
|
||||
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
|
||||
* -XX:+UnlockExperimentalVMOptions -XX:+EnableJVMCI
|
||||
* -XX:-BackgroundCompilation -Djvmci.Compiler=null
|
||||
* -XX:-BackgroundCompilation
|
||||
* compiler.jvmci.compilerToVM.HasCompiledCodeForOSRTest
|
||||
*/
|
||||
|
||||
|
||||
@ -42,7 +42,6 @@
|
||||
* @run main/othervm -Xbootclasspath/a:.
|
||||
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
|
||||
* -XX:+UnlockExperimentalVMOptions -XX:+EnableJVMCI
|
||||
* -Djvmci.Compiler=null
|
||||
* compiler.jvmci.compilerToVM.InvalidateInstalledCodeTest
|
||||
*/
|
||||
|
||||
|
||||
@ -24,7 +24,7 @@
|
||||
/*
|
||||
* @test
|
||||
* @bug 8136421
|
||||
* @requires vm.jvmci
|
||||
* @requires vm.jvmci & (vm.opt.TieredStopAtLevel == null | vm.opt.TieredStopAtLevel == 4)
|
||||
* @library / /test/lib
|
||||
* ../common/patches
|
||||
* @modules java.base/jdk.internal.misc
|
||||
@ -38,7 +38,6 @@
|
||||
* sun.hotspot.WhiteBox$WhiteBoxPermission
|
||||
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||
* -XX:+WhiteBoxAPI -XX:+UnlockExperimentalVMOptions -XX:+EnableJVMCI -Xbatch
|
||||
* -Djvmci.Compiler=null
|
||||
* compiler.jvmci.compilerToVM.IsMatureVsReprofileTest
|
||||
*/
|
||||
|
||||
|
||||
@ -35,9 +35,8 @@
|
||||
* compiler.jvmci.compilerToVM.JVM_RegisterJVMCINatives
|
||||
* @run main/othervm -XX:+UnlockExperimentalVMOptions
|
||||
* -Dcompiler.jvmci.compilerToVM.JVM_RegisterJVMCINatives.positive=false
|
||||
* -XX:-EnableJVMCI
|
||||
* -XX:-EnableJVMCI -XX:-UseJVMCICompiler
|
||||
* compiler.jvmci.compilerToVM.JVM_RegisterJVMCINatives
|
||||
|
||||
*/
|
||||
|
||||
package compiler.jvmci.compilerToVM;
|
||||
|
||||
@ -49,7 +49,6 @@
|
||||
* -XX:+DoEscapeAnalysis -XX:-UseCounterDecay
|
||||
* -Dcompiler.jvmci.compilerToVM.MaterializeVirtualObjectTest.materializeFirst=true
|
||||
* -Dcompiler.jvmci.compilerToVM.MaterializeVirtualObjectTest.invalidate=false
|
||||
* -Djvmci.Compiler=null
|
||||
* compiler.jvmci.compilerToVM.MaterializeVirtualObjectTest
|
||||
* @run main/othervm -Xmixed -Xbatch -Xbootclasspath/a:.
|
||||
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
|
||||
@ -61,7 +60,6 @@
|
||||
* -XX:+DoEscapeAnalysis -XX:-UseCounterDecay
|
||||
* -Dcompiler.jvmci.compilerToVM.MaterializeVirtualObjectTest.materializeFirst=false
|
||||
* -Dcompiler.jvmci.compilerToVM.MaterializeVirtualObjectTest.invalidate=false
|
||||
* -Djvmci.Compiler=null
|
||||
* compiler.jvmci.compilerToVM.MaterializeVirtualObjectTest
|
||||
* @run main/othervm -Xmixed -Xbatch -Xbootclasspath/a:.
|
||||
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
|
||||
@ -73,7 +71,6 @@
|
||||
* -XX:+DoEscapeAnalysis -XX:-UseCounterDecay
|
||||
* -Dcompiler.jvmci.compilerToVM.MaterializeVirtualObjectTest.materializeFirst=true
|
||||
* -Dcompiler.jvmci.compilerToVM.MaterializeVirtualObjectTest.invalidate=true
|
||||
* -Djvmci.Compiler=null
|
||||
* compiler.jvmci.compilerToVM.MaterializeVirtualObjectTest
|
||||
* @run main/othervm -Xmixed -Xbatch -Xbootclasspath/a:.
|
||||
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
|
||||
@ -85,7 +82,6 @@
|
||||
* -XX:+DoEscapeAnalysis -XX:-UseCounterDecay
|
||||
* -Dcompiler.jvmci.compilerToVM.MaterializeVirtualObjectTest.materializeFirst=false
|
||||
* -Dcompiler.jvmci.compilerToVM.MaterializeVirtualObjectTest.invalidate=true
|
||||
* -Djvmci.Compiler=null
|
||||
* compiler.jvmci.compilerToVM.MaterializeVirtualObjectTest
|
||||
*/
|
||||
|
||||
|
||||
@ -24,7 +24,7 @@
|
||||
/**
|
||||
* @test
|
||||
* @bug 8136421
|
||||
* @requires vm.jvmci & (vm.opt.TieredStopAtLevel == null | vm.opt.TieredStopAtLevel == 3)
|
||||
* @requires vm.jvmci & (vm.opt.TieredStopAtLevel == null | vm.opt.TieredStopAtLevel == 4)
|
||||
* @library /test/lib /
|
||||
* @library ../common/patches
|
||||
* @modules java.base/jdk.internal.misc
|
||||
@ -40,7 +40,7 @@
|
||||
* @run main/othervm -Xbootclasspath/a:.
|
||||
* -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
|
||||
* -XX:+UnlockExperimentalVMOptions -XX:+EnableJVMCI
|
||||
* -Xmixed -Xbatch -Djvmci.Compiler=null
|
||||
* -Xmixed -Xbatch
|
||||
* compiler.jvmci.compilerToVM.ReprofileTest
|
||||
*/
|
||||
|
||||
|
||||
@ -74,7 +74,7 @@ public class JvmciShutdownEventTest {
|
||||
"Unexpected exit code with -EnableJVMCI",
|
||||
"Unexpected output with -EnableJVMCI", ExitCode.OK,
|
||||
addTestVMOptions, "-XX:+UnlockExperimentalVMOptions",
|
||||
"-XX:-EnableJVMCI", "-Xbootclasspath/a:.",
|
||||
"-XX:-EnableJVMCI", "-XX:-UseJVMCICompiler", "-Xbootclasspath/a:.",
|
||||
JvmciShutdownEventListener.class.getName()
|
||||
);
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user