This commit is contained in:
Erik Trimble 2010-05-27 12:42:44 -07:00
commit 18d0b0100c
48 changed files with 549 additions and 389 deletions

View File

@ -1,5 +1,5 @@
/*
* Copyright 2007 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2007-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -78,9 +78,8 @@ public class CMSBitMap extends VMObject {
}
public BitMap bm() {
BitMap bitMap = new BitMap((int) (bmWordSize() >> (shifter() + 3) ));
BitMap bitMap = new BitMap((int) (bmWordSize() >> shifter() ));
VirtualSpace vs = virtualSpace();
//bitMap.set_size((int)vs.committedSize());
bitMap.set_map(vs.low());
return bitMap;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -326,7 +326,13 @@ public class JavaThread extends Thread {
/** Gets the Java-side thread object for this JavaThread */
public Oop getThreadObj() {
return VM.getVM().getObjectHeap().newOop(threadObjField.getValue(addr));
Oop obj = null;
try {
obj = VM.getVM().getObjectHeap().newOop(threadObjField.getValue(addr));
} catch (Exception e) {
e.printStackTrace();
}
return obj;
}
/** Get the Java-side name of this thread */

View File

@ -33,14 +33,8 @@ Obj_Files += solaris_x86_64.o
#
ifeq ("${Platform_compiler}", "sparcWorks")
# Temporary until C++ compiler is fixed
# _lwp_create_interpose must have a frame
OPT_CFLAGS/os_solaris_x86_64.o = -xO1
# Temporary until SS10 C++ compiler is fixed
OPT_CFLAGS/generateOptoStub.o = -xO2
OPT_CFLAGS/thread.o = -xO2
else

View File

@ -36,15 +36,15 @@ OPT_CFLAGS/BYFILE = $(OPT_CFLAGS/$@)$(OPT_CFLAGS/DEFAULT$(OPT_CFLAGS/$@))
ifeq ("${Platform_compiler}", "sparcWorks")
OPT_CFLAGS/SLOWER = -xO2
# Problem with SS12 compiler, dtrace doesn't like the .o files (bug 6693876)
ifeq ($(COMPILER_REV_NUMERIC), 509)
# To avoid jvm98 crash
OPT_CFLAGS/instanceKlass.o = $(OPT_CFLAGS/SLOWER)
# Not clear this workaround could be skipped in some cases.
OPT_CFLAGS/vmGCOperations.o = $(OPT_CFLAGS/SLOWER)
OPT_CFLAGS/java.o = $(OPT_CFLAGS/SLOWER)
OPT_CFLAGS/jni.o = $(OPT_CFLAGS/SLOWER)
endif
# To avoid jvm98 crash
OPT_CFLAGS/instanceKlass.o = $(OPT_CFLAGS/SLOWER)
endif # COMPILER_NUMERIC_REV == 509
ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 509), 1)
# dtrace cannot handle tail call optimization (6672627, 6693876)
OPT_CFLAGS/jni.o = $(OPT_CFLAGS/DEFAULT) $(OPT_CCFLAGS/NO_TAIL_CALL_OPT)
endif # COMPILER_NUMERIC_REV >= 509
ifeq ($(COMPILER_REV_NUMERIC), 505)
# CC 5.5 has bug 4908364 with -xO4 (Fixed in 5.6)

View File

@ -32,25 +32,6 @@ Obj_Files += solaris_x86_32.o
#
# Special case flags for compilers and compiler versions on i486.
#
ifeq ("${Platform_compiler}", "sparcWorks")
# _lwp_create_interpose must have a frame
OPT_CFLAGS/os_solaris_x86.o = -xO1
else
ifeq ("${Platform_compiler}", "gcc")
# gcc
# _lwp_create_interpose must have a frame
OPT_CFLAGS/os_solaris_x86.o = -fno-omit-frame-pointer
#
else
# error
_JUNK2_ := $(shell echo >&2 \
"*** ERROR: this compiler is not yet supported by this code base!")
@exit 1
endif
endif
ifeq ("${Platform_compiler}", "sparcWorks")
# ILD is gone as of SS11 (5.8), not supported in SS10 (5.7)
ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \< 507), 1)

View File

@ -80,15 +80,12 @@ launcher.c:
} > $@
$(LAUNCHER): $(LAUNCHER.o) $(LIBJVM) $(LAUNCHER_MAPFILE)
ifeq ($(filter -sbfast -xsbfast, $(CFLAGS_BROWSE)),)
@echo Linking launcher...
$(QUIETLY) $(LINK_LAUNCHER/PRE_HOOK)
$(QUIETLY) \
case "$(CFLAGS_BROWSE)" in \
-sbfast|-xsbfast) \
;; \
*) \
echo Linking launcher...; \
$(LINK_LAUNCHER/PRE_HOOK) \
$(LINK_LAUNCHER) $(LFLAGS_LAUNCHER) -o $@ $(LAUNCHER.o) $(LIBS_LAUNCHER); \
$(LINK_LAUNCHER/POST_HOOK) \
[ -f $(LAUNCHER_G) ] || { ln -s $@ $(LAUNCHER_G); }; \
;; \
esac
$(LINK_LAUNCHER) $(LFLAGS_LAUNCHER) -o $@ $(LAUNCHER.o) $(LIBS_LAUNCHER)
$(QUIETLY) $(LINK_LAUNCHER/POST_HOOK)
[ -f $(LAUNCHER_G) ] || ln -s $@ $(LAUNCHER_G)
endif # filter -sbfast -xsbfast

View File

@ -32,13 +32,10 @@ OPT_CFLAGS/BYFILE = $(OPT_CFLAGS/$@)$(OPT_CFLAGS/DEFAULT$(OPT_CFLAGS/$@))
# (OPT_CFLAGS/SLOWER is also available, to alter compilation of buggy files)
ifeq ("${Platform_compiler}", "sparcWorks")
# Problem with SS12 compiler, dtrace doesn't like the .o files (bug 6693876)
ifeq ($(COMPILER_REV_NUMERIC),509)
# Not clear this workaround could be skipped in some cases.
OPT_CFLAGS/vmGCOperations.o = $(OPT_CFLAGS/SLOWER) -g
OPT_CFLAGS/java.o = $(OPT_CFLAGS/SLOWER) -g
OPT_CFLAGS/jni.o = $(OPT_CFLAGS/SLOWER) -g
endif
ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 509), 1)
# dtrace cannot handle tail call optimization (6672627, 6693876)
OPT_CFLAGS/jni.o = $(OPT_CFLAGS/DEFAULT) $(OPT_CCFLAGS/NO_TAIL_CALL_OPT)
endif # COMPILER_NUMERIC_REV >= 509
# Workaround SS11 bug 6345274 (all platforms) (Fixed in SS11 patch and SS12)
ifeq ($(COMPILER_REV_NUMERIC),508)

View File

@ -40,13 +40,10 @@ endif
# (OPT_CFLAGS/SLOWER is also available, to alter compilation of buggy files)
ifeq ("${Platform_compiler}", "sparcWorks")
# Problem with SS12 compiler, dtrace doesn't like the .o files (bug 6693876)
ifeq ($(COMPILER_REV_NUMERIC),509)
# Not clear this workaround could be skipped in some cases.
OPT_CFLAGS/vmGCOperations.o = $(OPT_CFLAGS/SLOWER) -g
OPT_CFLAGS/java.o = $(OPT_CFLAGS/SLOWER) -g
OPT_CFLAGS/jni.o = $(OPT_CFLAGS/SLOWER) -g
endif
ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 509), 1)
# dtrace cannot handle tail call optimization (6672627, 6693876)
OPT_CFLAGS/jni.o = $(OPT_CFLAGS/DEFAULT) $(OPT_CCFLAGS/NO_TAIL_CALL_OPT)
endif # COMPILER_NUMERIC_REV >= 509
# Workaround SS11 bug 6345274 (all platforms) (Fixed in SS11 patch and SS12)
ifeq ($(COMPILER_REV_NUMERIC),508)

View File

@ -48,27 +48,33 @@ $(shell $(CC) -V 2>&1 | sed -n 's/^.*[ ,\t]C[ ,\t]\([1-9]\.[0-9][0-9]*\).*/\1/p'
# Pick which compiler is validated
ifeq ($(JRE_RELEASE_VER),1.6.0)
# Validated compiler for JDK6 is SS11 (5.8)
VALIDATED_COMPILER_REV := 5.8
VALIDATED_C_COMPILER_REV := 5.8
VALIDATED_COMPILER_REVS := 5.8
VALIDATED_C_COMPILER_REVS := 5.8
else
# Validated compiler for JDK7 is SS12 (5.9)
VALIDATED_COMPILER_REV := 5.9
VALIDATED_C_COMPILER_REV := 5.9
# Validated compilers for JDK7 are SS12 (5.9) or SS12 update 1 (5.10)
VALIDATED_COMPILER_REVS := 5.9 5.10
VALIDATED_C_COMPILER_REVS := 5.9 5.10
endif
# Warning messages about not using the above validated version
ENFORCE_COMPILER_REV${ENFORCE_COMPILER_REV} := ${VALIDATED_COMPILER_REV}
ifneq (${COMPILER_REV},${ENFORCE_COMPILER_REV})
dummy_target_to_enforce_compiler_rev:=\
$(shell echo >&2 WARNING: You are using CC version ${COMPILER_REV} \
and should be using version ${ENFORCE_COMPILER_REV}. Set ENFORCE_COMPILER_REV=${COMPILER_REV} to avoid this warning.)
# Warning messages about not using the above validated versions
ENFORCE_COMPILER_REV${ENFORCE_COMPILER_REV} := $(strip ${VALIDATED_COMPILER_REVS})
ifeq ($(filter ${ENFORCE_COMPILER_REV},${COMPILER_REV}),)
PRINTABLE_CC_REVS := $(subst $(shell echo ' '), or ,${ENFORCE_COMPILER_REV})
dummy_var_to_enforce_compiler_rev := $(shell \
echo >&2 WARNING: You are using CC version ${COMPILER_REV} and \
should be using version ${PRINTABLE_CC_REVS}.; \
echo >&2 Set ENFORCE_COMPILER_REV=${COMPILER_REV} to avoid this \
warning.)
endif
ENFORCE_C_COMPILER_REV${ENFORCE_C_COMPILER_REV} := ${VALIDATED_C_COMPILER_REV}
ifneq (${C_COMPILER_REV},${ENFORCE_C_COMPILER_REV})
dummy_target_to_enforce_c_compiler_rev:=\
$(shell echo >&2 WARNING: You are using cc version ${C_COMPILER_REV} \
and should be using version ${ENFORCE_C_COMPILER_REV}. Set ENFORCE_C_COMPILER_REV=${C_COMPILER_REV} to avoid this warning.)
ENFORCE_C_COMPILER_REV${ENFORCE_C_COMPILER_REV} := $(strip ${VALIDATED_C_COMPILER_REVS})
ifeq ($(filter ${ENFORCE_C_COMPILER_REV},${C_COMPILER_REV}),)
PRINTABLE_C_REVS := $(subst $(shell echo ' '), or ,${ENFORCE_C_COMPILER_REV})
dummy_var_to_enforce_c_compiler_rev := $(shell \
echo >&2 WARNING: You are using cc version ${C_COMPILER_REV} and \
should be using version ${PRINTABLE_C_REVS}.; \
echo >&2 Set ENFORCE_C_COMPILER_REV=${C_COMPILER_REV} to avoid this \
warning.)
endif
COMPILER_REV_NUMERIC := $(shell echo $(COMPILER_REV) | awk -F. '{ print $$1 * 100 + $$2 }')
@ -139,6 +145,13 @@ OPT_CFLAGS/SLOWER=-xO3
OPT_CFLAGS/O2=-xO2
OPT_CFLAGS/NOOPT=-xO1
ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 509), 1)
ifeq ($(Platform_arch), x86)
OPT_CFLAGS/NO_TAIL_CALL_OPT = -Wu,-O~yz
OPT_CCFLAGS/NO_TAIL_CALL_OPT = -Qoption ube -O~yz
endif # Platform_arch == x86
endif # COMPILER_REV_NUMERIC >= 509
#################################################
# Begin current (>=5.6) Forte compiler options #
#################################################
@ -181,10 +194,7 @@ endif # sparc
ifeq ("${Platform_arch_model}", "x86_32")
OPT_CFLAGS=-xtarget=pentium $(EXTRA_OPT_CFLAGS)
# UBE (CC 5.5) has bug 4923569 with -xO4
OPT_CFLAGS+=-xO3
OPT_CFLAGS=-xtarget=pentium -xO4 $(EXTRA_OPT_CFLAGS)
endif # 32bit x86
@ -461,7 +471,7 @@ FASTDEBUG_CFLAGS = -g0
# The -g0 setting allows the C++ frontend to inline, which is a big win.
# Special global options for SS12
ifeq ($(COMPILER_REV_NUMERIC),509)
ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 509), 1)
# There appears to be multiple issues with the new Dwarf2 debug format, so
# we tell the compiler to use the older 'stabs' debug format all the time.
# Note that this needs to be used in optimized compiles too to be 100%.

View File

@ -174,19 +174,16 @@ LINK_VM = $(LINK_LIB.CC)
endif
# making the library:
$(LIBJVM): $(LIBJVM.o) $(LIBJVM_MAPFILE)
$(QUIETLY) \
case "$(CFLAGS_BROWSE)" in \
-sbfast|-xsbfast) \
;; \
*) \
echo Linking vm...; \
$(LINK_LIB.CC/PRE_HOOK) \
$(LINK_VM) $(LFLAGS_VM) -o $@ $(LIBJVM.o) $(LIBS_VM); \
$(LINK_LIB.CC/POST_HOOK) \
rm -f $@.1; ln -s $@ $@.1; \
[ -f $(LIBJVM_G) ] || { ln -s $@ $(LIBJVM_G); ln -s $@.1 $(LIBJVM_G).1; }; \
;; \
esac
ifeq ($(filter -sbfast -xsbfast, $(CFLAGS_BROWSE)),)
@echo Linking vm...
$(QUIETLY) $(LINK_LIB.CC/PRE_HOOK)
$(QUIETLY) $(LINK_VM) $(LFLAGS_VM) -o $@ $(LIBJVM.o) $(LIBS_VM)
$(QUIETLY) $(LINK_LIB.CC/POST_HOOK)
$(QUIETLY) rm -f $@.1 && ln -s $@ $@.1
$(QUIETLY) [ -f $(LIBJVM_G) ] || ln -s $@ $(LIBJVM_G)
$(QUIETLY) [ -f $(LIBJVM_G).1 ] || ln -s $@.1 $(LIBJVM_G).1
endif # filter -sbfast -xsbfast
DEST_JVM = $(JDK_LIBDIR)/$(VM_SUBDIR)/$(LIBJVM)

View File

@ -2233,7 +2233,7 @@ public:
AddressLiteral constant_oop_address(jobject obj); // find_index
inline void set_oop (jobject obj, Register d); // uses allocate_oop_address
inline void set_oop_constant (jobject obj, Register d); // uses constant_oop_address
inline void set_oop (AddressLiteral& obj_addr, Register d); // same as load_address
inline void set_oop (const AddressLiteral& obj_addr, Register d); // same as load_address
void set_narrow_oop( jobject obj, Register d );

View File

@ -712,7 +712,7 @@ inline void MacroAssembler::set_oop_constant(jobject obj, Register d) {
}
inline void MacroAssembler::set_oop(AddressLiteral& obj_addr, Register d) {
inline void MacroAssembler::set_oop(const AddressLiteral& obj_addr, Register d) {
assert(obj_addr.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
set(obj_addr, d);
}

View File

@ -2788,7 +2788,7 @@ char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) {
}
// attach to the region
addr = (char*)shmat(shmid, NULL, 0);
addr = (char*)shmat(shmid, req_addr, 0);
int err = errno;
// Remove shmid. If shmat() is successful, the actual shared memory segment

View File

@ -47,40 +47,56 @@ inline void Atomic::dec_ptr(volatile void* dest) { (void)add_ptr(-1, dest);
// For Sun Studio - implementation is in solaris_x86_[32/64].il.
// For gcc - implementation is just below.
extern "C" jint _Atomic_add(jint add_value, volatile jint* dest, int mp);
extern "C" jint _Atomic_xchg(jint exchange_value, volatile jint* dest);
extern "C" jint _Atomic_cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value, int mp);
extern "C" jlong _Atomic_cmpxchg_long(jlong exchange_value, volatile jlong* dest, jlong compare_value, int mp);
// The lock prefix can be omitted for certain instructions on uniprocessors; to
// facilitate this, os::is_MP() is passed as an additional argument. 64-bit
// processors are assumed to be multi-threaded and/or multi-core, so the extra
// argument is unnecessary.
#ifndef _LP64
#define IS_MP_DECL() , int is_mp
#define IS_MP_ARG() , (int) os::is_MP()
#else
#define IS_MP_DECL()
#define IS_MP_ARG()
#endif // _LP64
extern "C" {
jint _Atomic_add(jint add_value, volatile jint* dest IS_MP_DECL());
jint _Atomic_xchg(jint exchange_value, volatile jint* dest);
jint _Atomic_cmpxchg(jint exchange_value, volatile jint* dest,
jint compare_value IS_MP_DECL());
jlong _Atomic_cmpxchg_long(jlong exchange_value, volatile jlong* dest,
jlong compare_value IS_MP_DECL());
}
inline jint Atomic::add (jint add_value, volatile jint* dest) {
return _Atomic_add(add_value, dest, (int) os::is_MP());
return _Atomic_add(add_value, dest IS_MP_ARG());
}
inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) {
return _Atomic_xchg(exchange_value, dest);
}
inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value) {
return _Atomic_cmpxchg(exchange_value, dest, compare_value, (int) os::is_MP());
return _Atomic_cmpxchg(exchange_value, dest, compare_value IS_MP_ARG());
}
inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value) {
return _Atomic_cmpxchg_long(exchange_value, dest, compare_value, (int) os::is_MP());
return _Atomic_cmpxchg_long(exchange_value, dest, compare_value IS_MP_ARG());
}
#ifdef AMD64
inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
extern "C" jlong _Atomic_add_long(jlong add_value, volatile jlong* dest, int mp);
extern "C" jlong _Atomic_add_long(jlong add_value, volatile jlong* dest);
extern "C" jlong _Atomic_xchg_long(jlong exchange_value, volatile jlong* dest);
inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
return (intptr_t)_Atomic_add_long((jlong)add_value, (volatile jlong*)dest, (int) os::is_MP());
return (intptr_t)_Atomic_add_long((jlong)add_value, (volatile jlong*)dest);
}
inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
return (void*)_Atomic_add_long((jlong)add_value, (volatile jlong*)dest, (int) os::is_MP());
}
inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) {
return _Atomic_xchg(exchange_value, dest);
return (void*)_Atomic_add_long((jlong)add_value, (volatile jlong*)dest);
}
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
@ -92,11 +108,11 @@ inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* des
}
inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value) {
return (intptr_t)_Atomic_cmpxchg_long((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, (int) os::is_MP());
return (intptr_t)_Atomic_cmpxchg_long((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value);
}
inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value) {
return (void*)_Atomic_cmpxchg_long((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, (int) os::is_MP());
return (void*)_Atomic_cmpxchg_long((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value);
}
inline jlong Atomic::load(volatile jlong* src) { return *src; }
@ -111,13 +127,6 @@ inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
return (void*)add((jint)add_value, (volatile jint*)dest);
}
inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) {
// We noticed a CC5.5 bug (4894807), so keep calling the stub just to be safe.
// Will use the inline template version after 4894807 is fixed.
// return _Atomic_xchg(exchange_value, dest);
return (*os::atomic_xchg_func)(exchange_value, dest);
}
inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
}
@ -179,9 +188,6 @@ extern "C" {
#endif // AMD64
inline jint _Atomic_xchg(jint exchange_value, volatile jint* dest) {
// 32bit version originally did nothing!!
__asm__ __volatile__ ("xchgl (%2),%0"
: "=r" (exchange_value)
: "0" (exchange_value), "r" (dest)

View File

@ -50,10 +50,12 @@
movl 4(%esp), %edx // dest
movl %eax, %ecx
cmpl $0, 8(%esp) // MP test
je 1f
lock
1: xaddl %eax, (%edx)
addl %ecx, %eax
jne 1f
xaddl %eax, (%edx)
jmp 2f
1: lock
xaddl %eax, (%edx)
2: addl %ecx, %eax
.end
// Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest).
@ -72,9 +74,12 @@
movl 0(%esp), %ecx // exchange_value
movl 4(%esp), %edx // dest
cmp $0, 12(%esp) // MP test
je 1f
lock
1: cmpxchgl %ecx, (%edx)
jne 1f
cmpxchgl %ecx, (%edx)
jmp 2f
1: lock
cmpxchgl %ecx, (%edx)
2:
.end
// Support for jlong Atomic::cmpxchg(jlong exchange_value,
@ -90,10 +95,12 @@
movl 8(%esp), %ebx // exchange_value (low)
movl 12(%esp), %ecx // exchange_high (high)
cmp $0, 28(%esp) // MP test
je 1f
lock
1: cmpxchg8b (%edi)
popl %edi
jne 1f
cmpxchg8b (%edi)
jmp 2f
1: lock
cmpxchg8b (%edi)
2: popl %edi
popl %ebx
.end

View File

@ -37,24 +37,18 @@
.end
// Support for jint Atomic::add(jint add_value, volatile jint* dest)
// An additional bool (os::is_MP()) is passed as the last argument.
.inline _Atomic_add,3
.inline _Atomic_add,2
movl %edi, %eax // save add_value for return
testl %edx, %edx // MP test
je 1f
lock
1: xaddl %edi, (%rsi)
xaddl %edi, (%rsi)
addl %edi, %eax
.end
// Support for jlong Atomic::add(jlong add_value, volatile jlong* dest)
// An additional bool (os::is_MP()) is passed as the last argument.
.inline _Atomic_add_long,3
.inline _Atomic_add_long,2
movq %rdi, %rax // save add_value for return
testq %rdx, %rdx // MP test
je 1f
lock
1: xaddq %rdi, (%rsi)
xaddq %rdi, (%rsi)
addq %rdi, %rax
.end
@ -73,25 +67,19 @@
// Support for jint Atomic::cmpxchg(jint exchange_value,
// volatile jint *dest,
// jint compare_value)
// An additional bool (os::is_MP()) is passed as the last argument.
.inline _Atomic_cmpxchg,4
.inline _Atomic_cmpxchg,3
movl %edx, %eax // compare_value
testl %ecx, %ecx // MP test
je 1f
lock
1: cmpxchgl %edi, (%rsi)
cmpxchgl %edi, (%rsi)
.end
// Support for jlong Atomic::cmpxchg(jlong exchange_value,
// volatile jlong* dest,
// jlong compare_value)
// An additional bool (os::is_MP()) is passed as the last argument.
.inline _Atomic_cmpxchg_long,6
.inline _Atomic_cmpxchg_long,3
movq %rdx, %rax // compare_value
testq %rcx, %rcx // MP test
je 1f
lock
1: cmpxchgq %rdi, (%rsi)
cmpxchgq %rdi, (%rsi)
.end
// Support for OrderAccess::acquire()

View File

@ -735,7 +735,7 @@ int InstructForm::memory_operand(FormDict &globals) const {
// This instruction captures the machine-independent bottom_type
// Expected use is for pointer vs oop determination for LoadP
bool InstructForm::captures_bottom_type() const {
bool InstructForm::captures_bottom_type(FormDict &globals) const {
if( _matrule && _matrule->_rChild &&
(!strcmp(_matrule->_rChild->_opType,"CastPP") || // new result type
!strcmp(_matrule->_rChild->_opType,"CastX2P") || // new result type
@ -748,6 +748,8 @@ bool InstructForm::captures_bottom_type() const {
else if ( is_ideal_load() == Form::idealP ) return true;
else if ( is_ideal_store() != Form::none ) return true;
if (needs_base_oop_edge(globals)) return true;
return false;
}
@ -1061,7 +1063,7 @@ const char *InstructForm::reduce_left(FormDict &globals) const {
// Base class for this instruction, MachNode except for calls
const char *InstructForm::mach_base_class() const {
const char *InstructForm::mach_base_class(FormDict &globals) const {
if( is_ideal_call() == Form::JAVA_STATIC ) {
return "MachCallStaticJavaNode";
}
@ -1092,7 +1094,7 @@ const char *InstructForm::mach_base_class() const {
else if (is_ideal_nop()) {
return "MachNopNode";
}
else if (captures_bottom_type()) {
else if (captures_bottom_type(globals)) {
return "MachTypeNode";
} else {
return "MachNode";

View File

@ -188,7 +188,7 @@ public:
// This instruction captures the machine-independent bottom_type
// Expected use is for pointer vs oop determination for LoadP
virtual bool captures_bottom_type() const;
virtual bool captures_bottom_type(FormDict& globals) const;
virtual const char *cost(); // Access ins_cost attribute
virtual uint num_opnds(); // Count of num_opnds for MachNode class
@ -229,7 +229,7 @@ public:
const char *reduce_left(FormDict &globals) const;
// Base class for this instruction, MachNode except for calls
virtual const char *mach_base_class() const;
virtual const char *mach_base_class(FormDict &globals) const;
// Check if this instruction can cisc-spill to 'alternate'
bool cisc_spills_to(ArchDesc &AD, InstructForm *alternate);
@ -252,7 +252,7 @@ public:
bool has_short_branch_form() { return _short_branch_form != NULL; }
// Output short branch prototypes and method bodies
void declare_short_branch_methods(FILE *fp_cpp);
bool define_short_branch_methods(FILE *fp_cpp);
bool define_short_branch_methods(ArchDesc &AD, FILE *fp_cpp);
uint alignment() { return _alignment; }
void set_alignment(uint val) { _alignment = val; }

View File

@ -1382,7 +1382,7 @@ static void generate_peepreplace( FILE *fp, FormDict &globals, PeepMatch *pmatch
inst_num, unmatched_edge);
}
// If new instruction captures bottom type
if( root_form->captures_bottom_type() ) {
if( root_form->captures_bottom_type(globals) ) {
// Get bottom type from instruction whose result we are replacing
fprintf(fp, " root->_bottom_type = inst%d->bottom_type();\n", inst_num);
}
@ -2963,7 +2963,7 @@ void ArchDesc::defineClasses(FILE *fp) {
used |= instr->define_cisc_version(*this, fp);
// Output code to convert to the short branch version, if applicable
used |= instr->define_short_branch_methods(fp);
used |= instr->define_short_branch_methods(*this, fp);
}
// Construct the method called by cisc_version() to copy inputs and operands.
@ -3708,7 +3708,7 @@ void ArchDesc::buildMachNode(FILE *fp_cpp, InstructForm *inst, const char *inden
}
// Fill in the bottom_type where requested
if ( inst->captures_bottom_type() ) {
if ( inst->captures_bottom_type(_globalNames) ) {
fprintf(fp_cpp, "%s node->_bottom_type = _leaf->bottom_type();\n", indent);
}
if( inst->is_ideal_if() ) {
@ -3762,7 +3762,7 @@ bool InstructForm::define_cisc_version(ArchDesc &AD, FILE *fp_cpp) {
// Create the MachNode object
fprintf(fp_cpp, " %sNode *node = new (C) %sNode();\n", name, name);
// Fill in the bottom_type where requested
if ( this->captures_bottom_type() ) {
if ( this->captures_bottom_type(AD.globalNames()) ) {
fprintf(fp_cpp, " node->_bottom_type = bottom_type();\n");
}
@ -3798,7 +3798,7 @@ void InstructForm::declare_short_branch_methods(FILE *fp_hpp) {
//---------------------------define_short_branch_methods-----------------------
// Build definitions for short branch methods
bool InstructForm::define_short_branch_methods(FILE *fp_cpp) {
bool InstructForm::define_short_branch_methods(ArchDesc &AD, FILE *fp_cpp) {
if (has_short_branch_form()) {
InstructForm *short_branch = short_branch_form();
const char *name = short_branch->_ident;
@ -3813,7 +3813,7 @@ bool InstructForm::define_short_branch_methods(FILE *fp_cpp) {
fprintf(fp_cpp, " node->_fcnt = _fcnt;\n");
}
// Fill in the bottom_type where requested
if ( this->captures_bottom_type() ) {
if ( this->captures_bottom_type(AD.globalNames()) ) {
fprintf(fp_cpp, " node->_bottom_type = bottom_type();\n");
}

View File

@ -1493,7 +1493,7 @@ void ArchDesc::declareClasses(FILE *fp) {
// Build class definition for this instruction
fprintf(fp,"\n");
fprintf(fp,"class %sNode : public %s { \n",
instr->_ident, instr->mach_base_class() );
instr->_ident, instr->mach_base_class(_globalNames) );
fprintf(fp,"private:\n");
fprintf(fp," MachOper *_opnd_array[%d];\n", instr->num_opnds() );
if ( instr->is_ideal_jump() ) {
@ -1566,7 +1566,7 @@ void ArchDesc::declareClasses(FILE *fp) {
// Use MachNode::ideal_Opcode() for nodes based on MachNode class
// if the ideal_Opcode == Op_Node.
if ( strcmp("Node", instr->ideal_Opcode(_globalNames)) != 0 ||
strcmp("MachNode", instr->mach_base_class()) != 0 ) {
strcmp("MachNode", instr->mach_base_class(_globalNames)) != 0 ) {
fprintf(fp," virtual int ideal_Opcode() const { return Op_%s; }\n",
instr->ideal_Opcode(_globalNames) );
}
@ -1631,7 +1631,7 @@ void ArchDesc::declareClasses(FILE *fp) {
// Use MachNode::oper_input_base() for nodes based on MachNode class
// if the base == 1.
if ( instr->oper_input_base(_globalNames) != 1 ||
strcmp("MachNode", instr->mach_base_class()) != 0 ) {
strcmp("MachNode", instr->mach_base_class(_globalNames)) != 0 ) {
fprintf(fp," virtual uint oper_input_base() const { return %d; }\n",
instr->oper_input_base(_globalNames));
}
@ -1906,11 +1906,6 @@ void ArchDesc::declareClasses(FILE *fp) {
fprintf(fp," const Type *bottom_type() const { const Type *t = in(oper_input_base()+%d)->bottom_type(); return (req() <= oper_input_base()+%d) ? t : t->meet(in(oper_input_base()+%d)->bottom_type()); } // CMoveN\n",
offset, offset+1, offset+1);
}
else if( instr->needs_base_oop_edge(_globalNames) ) {
// Special hack for ideal AddP. Bottom type is an oop IFF it has a
// legal base-pointer input. Otherwise it is NOT an oop.
fprintf(fp," const Type *bottom_type() const { return AddPNode::mach_bottom_type(this); } // AddP\n");
}
else if (instr->is_tls_instruction()) {
// Special hack for tlsLoadP
fprintf(fp," const Type *bottom_type() const { return TypeRawPtr::BOTTOM; } // tlsLoadP\n");

View File

@ -2978,7 +2978,11 @@ bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known) {
bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) {
if (!InlineNatives ) INLINE_BAILOUT("intrinsic method inlining disabled");
if (callee->is_synchronized()) INLINE_BAILOUT("intrinsic method is synchronized");
if (callee->is_synchronized()) {
// We don't currently support any synchronized intrinsics
return false;
}
// callee seems like a good candidate
// determine id
bool preserves_state = false;

View File

@ -124,6 +124,23 @@ nmethod* CodeCache::alive_nmethod(CodeBlob* cb) {
return (nmethod*)cb;
}
nmethod* CodeCache::first_nmethod() {
assert_locked_or_safepoint(CodeCache_lock);
CodeBlob* cb = first();
while (cb != NULL && !cb->is_nmethod()) {
cb = next(cb);
}
return (nmethod*)cb;
}
nmethod* CodeCache::next_nmethod (CodeBlob* cb) {
assert_locked_or_safepoint(CodeCache_lock);
cb = next(cb);
while (cb != NULL && !cb->is_nmethod()) {
cb = next(cb);
}
return (nmethod*)cb;
}
CodeBlob* CodeCache::allocate(int size) {
// Do not seize the CodeCache lock here--if the caller has not
@ -414,7 +431,7 @@ nmethod* CodeCache::find_and_remove_saved_code(methodOop m) {
saved->set_speculatively_disconnected(false);
saved->set_saved_nmethod_link(NULL);
if (PrintMethodFlushing) {
saved->print_on(tty, " ### nmethod is reconnected");
saved->print_on(tty, " ### nmethod is reconnected\n");
}
if (LogCompilation && (xtty != NULL)) {
ttyLocker ttyl;
@ -432,7 +449,8 @@ nmethod* CodeCache::find_and_remove_saved_code(methodOop m) {
}
void CodeCache::remove_saved_code(nmethod* nm) {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
// For conc swpr this will be called with CodeCache_lock taken by caller
assert_locked_or_safepoint(CodeCache_lock);
assert(nm->is_speculatively_disconnected(), "shouldn't call for other nmethods");
nmethod* saved = _saved_nmethods;
nmethod* prev = NULL;
@ -463,7 +481,7 @@ void CodeCache::speculatively_disconnect(nmethod* nm) {
nm->set_saved_nmethod_link(_saved_nmethods);
_saved_nmethods = nm;
if (PrintMethodFlushing) {
nm->print_on(tty, " ### nmethod is speculatively disconnected");
nm->print_on(tty, " ### nmethod is speculatively disconnected\n");
}
if (LogCompilation && (xtty != NULL)) {
ttyLocker ttyl;

View File

@ -102,6 +102,8 @@ class CodeCache : AllStatic {
static CodeBlob* next (CodeBlob* cb);
static CodeBlob* alive(CodeBlob *cb);
static nmethod* alive_nmethod(CodeBlob *cb);
static nmethod* first_nmethod();
static nmethod* next_nmethod (CodeBlob* cb);
static int nof_blobs() { return _number_of_blobs; }
// GC support

View File

@ -1014,9 +1014,7 @@ void nmethod::clear_inline_caches() {
void nmethod::cleanup_inline_caches() {
assert(SafepointSynchronize::is_at_safepoint() &&
!CompiledIC_lock->is_locked() &&
!Patching_lock->is_locked(), "no threads must be updating the inline caches by them selfs");
assert_locked_or_safepoint(CompiledIC_lock);
// If the method is not entrant or zombie then a JMP is plastered over the
// first few bytes. If an oop in the old code was there, that oop
@ -1071,7 +1069,6 @@ void nmethod::mark_as_seen_on_stack() {
// Tell if a non-entrant method can be converted to a zombie (i.e., there is no activations on the stack)
bool nmethod::can_not_entrant_be_converted() {
assert(is_not_entrant(), "must be a non-entrant method");
assert(SafepointSynchronize::is_at_safepoint(), "must be called during a safepoint");
// Since the nmethod sweeper only does partial sweep the sweeper's traversal
// count can be greater than the stack traversal count before it hits the
@ -1127,7 +1124,7 @@ void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
_method = NULL; // Clear the method of this dead nmethod
}
// Make the class unloaded - i.e., change state and notify sweeper
check_safepoint();
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
if (is_in_use()) {
// Transitioning directly from live to unloaded -- so
// we need to force a cache clean-up; remember this
@ -1220,17 +1217,6 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
assert (NativeJump::instruction_size == nmethod::_zombie_instruction_size, "");
}
// When the nmethod becomes zombie it is no longer alive so the
// dependencies must be flushed. nmethods in the not_entrant
// state will be flushed later when the transition to zombie
// happens or they get unloaded.
if (state == zombie) {
assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
flush_dependencies(NULL);
} else {
assert(state == not_entrant, "other cases may need to be handled differently");
}
was_alive = is_in_use(); // Read state under lock
// Change state
@ -1241,6 +1227,17 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
} // leave critical region under Patching_lock
// When the nmethod becomes zombie it is no longer alive so the
// dependencies must be flushed. nmethods in the not_entrant
// state will be flushed later when the transition to zombie
// happens or they get unloaded.
if (state == zombie) {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
flush_dependencies(NULL);
} else {
assert(state == not_entrant, "other cases may need to be handled differently");
}
if (state == not_entrant) {
Events::log("Make nmethod not entrant " INTPTR_FORMAT, this);
} else {
@ -1310,21 +1307,13 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
return true;
}
#ifndef PRODUCT
void nmethod::check_safepoint() {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
}
#endif
void nmethod::flush() {
// Note that there are no valid oops in the nmethod anymore.
assert(is_zombie() || (is_osr_method() && is_unloaded()), "must be a zombie method");
assert(is_marked_for_reclamation() || (is_osr_method() && is_unloaded()), "must be marked for reclamation");
assert (!is_locked_by_vm(), "locked methods shouldn't be flushed");
check_safepoint();
assert_locked_or_safepoint(CodeCache_lock);
// completely deallocate this method
EventMark m("flushing nmethod " INTPTR_FORMAT " %s", this, "");
@ -1373,7 +1362,7 @@ void nmethod::flush() {
// notifies instanceKlasses that are reachable
void nmethod::flush_dependencies(BoolObjectClosure* is_alive) {
assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
assert_locked_or_safepoint(CodeCache_lock);
assert(Universe::heap()->is_gc_active() == (is_alive != NULL),
"is_alive is non-NULL if and only if we are called during GC");
if (!has_flushed_dependencies()) {
@ -2266,7 +2255,6 @@ void nmethod::print() const {
tty->print(" for method " INTPTR_FORMAT , (address)method());
tty->print(" { ");
if (version()) tty->print("v%d ", version());
if (level()) tty->print("l%d ", level());
if (is_in_use()) tty->print("in_use ");
if (is_not_entrant()) tty->print("not_entrant ");
if (is_zombie()) tty->print("zombie ");

View File

@ -82,7 +82,6 @@ class PcDescCache VALUE_OBJ_CLASS_SPEC {
struct nmFlags {
friend class VMStructs;
unsigned int version:8; // version number (0 = first version)
unsigned int level:4; // optimization level
unsigned int age:4; // age (in # of sweep steps)
unsigned int state:2; // {alive, zombie, unloaded)
@ -410,14 +409,13 @@ class nmethod : public CodeBlob {
void flush_dependencies(BoolObjectClosure* is_alive);
bool has_flushed_dependencies() { return flags.hasFlushedDependencies; }
void set_has_flushed_dependencies() {
check_safepoint();
assert(!has_flushed_dependencies(), "should only happen once");
flags.hasFlushedDependencies = 1;
}
bool is_marked_for_reclamation() const { return flags.markedForReclamation; }
void mark_for_reclamation() { check_safepoint(); flags.markedForReclamation = 1; }
void unmark_for_reclamation() { check_safepoint(); flags.markedForReclamation = 0; }
void mark_for_reclamation() { flags.markedForReclamation = 1; }
void unmark_for_reclamation() { flags.markedForReclamation = 0; }
bool has_unsafe_access() const { return flags.has_unsafe_access; }
void set_has_unsafe_access(bool z) { flags.has_unsafe_access = z; }
@ -428,9 +426,6 @@ class nmethod : public CodeBlob {
bool is_speculatively_disconnected() const { return flags.speculatively_disconnected; }
void set_speculatively_disconnected(bool z) { flags.speculatively_disconnected = z; }
int level() const { return flags.level; }
void set_level(int newLevel) { check_safepoint(); flags.level = newLevel; }
int comp_level() const { return _comp_level; }
int version() const { return flags.version; }

View File

@ -461,12 +461,25 @@ void CompileQueue::add(CompileTask* task) {
//
// Get the next CompileTask from a CompileQueue
CompileTask* CompileQueue::get() {
NMethodSweeper::possibly_sweep();
MutexLocker locker(lock());
// Wait for an available CompileTask.
while (_first == NULL) {
// There is no work to be done right now. Wait.
lock()->wait();
if (UseCodeCacheFlushing && (!CompileBroker::should_compile_new_jobs() || CodeCache::needs_flushing())) {
// During the emergency sweeping periods, wake up and sweep occasionally
bool timedout = lock()->wait(!Mutex::_no_safepoint_check_flag, NmethodSweepCheckInterval*1000);
if (timedout) {
MutexUnlocker ul(lock());
// When otherwise not busy, run nmethod sweeping
NMethodSweeper::possibly_sweep();
}
} else {
// During normal operation no need to wake up on timer
lock()->wait();
}
}
CompileTask* task = _first;

View File

@ -1,5 +1,5 @@
/*
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

View File

@ -1,5 +1,5 @@
/*
* Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,30 +32,75 @@ class PromotedObject VALUE_OBJ_CLASS_SPEC {
displaced_mark = nth_bit(2), // i.e. 0x4
next_mask = ~(right_n_bits(3)) // i.e. ~(0x7)
};
intptr_t _next;
// Below, we want _narrow_next in the "higher" 32 bit slot,
// whose position will depend on endian-ness of the platform.
// This is so that there is no interference with the
// cms_free_bit occupying bit position 7 (lsb == 0)
// when we are using compressed oops; see FreeChunk::isFree().
// We cannot move the cms_free_bit down because currently
// biased locking code assumes that age bits are contiguous
// with the lock bits. Even if that assumption were relaxed,
// the least position we could move this bit to would be
// to bit position 3, which would require 16 byte alignment.
typedef struct {
#ifdef VM_LITTLE_ENDIAN
LP64_ONLY(narrowOop _pad;)
narrowOop _narrow_next;
#else
narrowOop _narrow_next;
LP64_ONLY(narrowOop _pad;)
#endif
} Data;
union {
intptr_t _next;
Data _data;
};
public:
inline PromotedObject* next() const {
return (PromotedObject*)(_next & next_mask);
assert(!((FreeChunk*)this)->isFree(), "Error");
PromotedObject* res;
if (UseCompressedOops) {
// The next pointer is a compressed oop stored in the top 32 bits
res = (PromotedObject*)oopDesc::decode_heap_oop(_data._narrow_next);
} else {
res = (PromotedObject*)(_next & next_mask);
}
assert(oop(res)->is_oop_or_null(true /* ignore mark word */), "Not an oop?");
return res;
}
inline void setNext(PromotedObject* x) {
assert(((intptr_t)x & ~next_mask) == 0,
"Conflict in bit usage, "
" or insufficient alignment of objects");
_next |= (intptr_t)x;
assert(((intptr_t)x & ~next_mask) == 0, "Conflict in bit usage, "
"or insufficient alignment of objects");
if (UseCompressedOops) {
assert(_data._narrow_next == 0, "Overwrite?");
_data._narrow_next = oopDesc::encode_heap_oop(oop(x));
} else {
_next |= (intptr_t)x;
}
assert(!((FreeChunk*)this)->isFree(), "Error");
}
inline void setPromotedMark() {
_next |= promoted_mask;
assert(!((FreeChunk*)this)->isFree(), "Error");
}
inline bool hasPromotedMark() const {
assert(!((FreeChunk*)this)->isFree(), "Error");
return (_next & promoted_mask) == promoted_mask;
}
inline void setDisplacedMark() {
_next |= displaced_mark;
assert(!((FreeChunk*)this)->isFree(), "Error");
}
inline bool hasDisplacedMark() const {
assert(!((FreeChunk*)this)->isFree(), "Error");
return (_next & displaced_mark) != 0;
}
inline void clearNext() { _next = 0; }
inline void clearNext() {
_next = 0;
assert(!((FreeChunk*)this)->isFree(), "Error");
}
debug_only(void *next_addr() { return (void *) &_next; })
};

View File

@ -766,10 +766,12 @@ void ConcurrentMark::checkpointRootsInitialPre() {
_has_aborted = false;
#ifndef PRODUCT
if (G1PrintReachableAtInitialMark) {
print_reachable("at-cycle-start",
true /* use_prev_marking */, true /* all */);
}
#endif
// Initialise marking structures. This has to be done in a STW phase.
reset();

View File

@ -471,21 +471,23 @@ HeapRegion* G1CollectedHeap::newAllocRegion_work(size_t word_size,
res->zero_fill_state() == HeapRegion::Allocated)),
"Alloc Regions must be zero filled (and non-H)");
}
if (res != NULL && res->is_empty()) _free_regions--;
assert(res == NULL ||
(!res->isHumongous() &&
(!zero_filled ||
res->zero_fill_state() == HeapRegion::Allocated)),
"Non-young alloc Regions must be zero filled (and non-H)");
if (G1PrintHeapRegions) {
if (res != NULL) {
if (res != NULL) {
if (res->is_empty()) {
_free_regions--;
}
assert(!res->isHumongous() &&
(!zero_filled || res->zero_fill_state() == HeapRegion::Allocated),
err_msg("Non-young alloc Regions must be zero filled (and non-H):"
" res->isHumongous()=%d, zero_filled=%d, res->zero_fill_state()=%d",
res->isHumongous(), zero_filled, res->zero_fill_state()));
assert(!res->is_on_unclean_list(),
"Alloc Regions must not be on the unclean list");
if (G1PrintHeapRegions) {
gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], "
"top "PTR_FORMAT,
res->hrs_index(), res->bottom(), res->end(), res->top());
}
}
return res;
}
@ -2338,10 +2340,12 @@ void G1CollectedHeap::verify(bool allow_dirty,
gclog_or_tty->print_cr("Heap:");
print_on(gclog_or_tty, true /* extended */);
gclog_or_tty->print_cr("");
#ifndef PRODUCT
if (VerifyDuringGC && G1VerifyDuringGCPrintReachable) {
concurrent_mark()->print_reachable("at-verification-failure",
use_prev_marking, false /* all */);
}
#endif
gclog_or_tty->flush();
}
guarantee(!failures, "there should not have been any failures");
@ -4600,6 +4604,15 @@ void G1CollectedHeap::wait_for_cleanup_complete_locked() {
void
G1CollectedHeap::put_region_on_unclean_list_locked(HeapRegion* r) {
assert(ZF_mon->owned_by_self(), "precondition.");
#ifdef ASSERT
if (r->is_gc_alloc_region()) {
ResourceMark rm;
stringStream region_str;
print_on(&region_str);
assert(!r->is_gc_alloc_region(), err_msg("Unexpected GC allocation region: %s",
region_str.as_string()));
}
#endif
_unclean_region_list.insert_before_head(r);
}

View File

@ -554,11 +554,19 @@ HeapWord* HeapRegion::allocate(size_t size) {
#endif
void HeapRegion::set_zero_fill_state_work(ZeroFillState zfs) {
assert(top() == bottom() || zfs == Allocated,
"Region must be empty, or we must be setting it to allocated.");
assert(ZF_mon->owned_by_self() ||
Universe::heap()->is_gc_active(),
"Must hold the lock or be a full GC to modify.");
#ifdef ASSERT
if (top() != bottom() && zfs != Allocated) {
ResourceMark rm;
stringStream region_str;
print_on(&region_str);
assert(top() == bottom() || zfs == Allocated,
err_msg("Region must be empty, or we must be setting it to allocated. "
"_zfs=%d, zfs=%d, region: %s", _zfs, zfs, region_str.as_string()));
}
#endif
_zfs = zfs;
}

View File

@ -109,7 +109,7 @@ class SpaceMangler: public CHeapObj {
// is fully constructed. Also is used when a generation is expanded
// and possibly before the spaces have been reshaped to to the new
// size of the generation.
static void mangle_region(MemRegion mr);
static void mangle_region(MemRegion mr) PRODUCT_RETURN;
};
class ContiguousSpace;

View File

@ -32,10 +32,12 @@ HS_DTRACE_PROBE_DECL(hotspot, gc__end);
// for the other file anymore. The dtrace probes have to remain stable.
void VM_GC_Operation::notify_gc_begin(bool full) {
HS_DTRACE_PROBE1(hotspot, gc__begin, full);
HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
}
void VM_GC_Operation::notify_gc_end() {
HS_DTRACE_PROBE(hotspot, gc__end);
HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
}
void VM_GC_Operation::acquire_pending_list_lock() {

View File

@ -65,7 +65,7 @@ CollectedHeap::CollectedHeap()
void CollectedHeap::pre_initialize() {
// Used for ReduceInitialCardMarks (when COMPILER2 is used);
// otherwise remains unused.
#ifdef COMPLER2
#ifdef COMPILER2
_defer_initial_card_mark = ReduceInitialCardMarks && can_elide_tlab_store_barriers()
&& (DeferInitialCardMark || card_mark_must_follow_store());
#else
@ -309,7 +309,7 @@ void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap)
DEBUG_ONLY(fill_args_check(start, words);)
HandleMark hm; // Free handles before leaving.
#ifdef LP64
#ifdef _LP64
// A single array can fill ~8G, so multiple objects are needed only in 64-bit.
// First fill with arrays, ensuring that any remaining space is big enough to
// fill. The remainder is filled with a single object.

View File

@ -27,12 +27,26 @@
// Note that the mark is not a real oop but just a word.
// It is placed in the oop hierarchy for historical reasons.
//
// Bit-format of an object header (most significant first):
// Bit-format of an object header (most significant first, big endian layout below):
//
// 32 bits: unused:0 hash:25 age:4 biased_lock:1 lock:2
// 64 bits: unused:24 hash:31 cms:2 age:4 biased_lock:1 lock:2
// unused:20 size:35 cms:2 age:4 biased_lock:1 lock:2 (if cms
// free chunk)
// 32 bits:
// --------
// hash:25 ------------>| age:4 biased_lock:1 lock:2 (normal object)
// JavaThread*:23 epoch:2 age:4 biased_lock:1 lock:2 (biased object)
// size:32 ------------------------------------------>| (CMS free block)
// PromotedObject*:29 ---------->| promo_bits:3 ----->| (CMS promoted object)
//
// 64 bits:
// --------
// unused:25 hash:31 -->| unused:1 age:4 biased_lock:1 lock:2 (normal object)
// JavaThread*:54 epoch:2 unused:1 age:4 biased_lock:1 lock:2 (biased object)
// PromotedObject*:61 --------------------->| promo_bits:3 ----->| (CMS promoted object)
// size:64 ----------------------------------------------------->| (CMS free block)
//
// unused:25 hash:31 -->| cms_free:1 age:4 biased_lock:1 lock:2 (COOPs && normal object)
// JavaThread*:54 epoch:2 cms_free:1 age:4 biased_lock:1 lock:2 (COOPs && biased object)
// narrowOop:32 unused:24 cms_free:1 unused:4 promo_bits:3 ----->| (COOPs && CMS promoted object)
// unused:21 size:35 -->| cms_free:1 unused:7 ------------------>| (COOPs && CMS free block)
//
// - hash contains the identity hash value: largest value is
// 31 bits, see os::random(). Also, 64-bit vm's require
@ -61,8 +75,9 @@
// significant fraction of the eden semispaces and were not
// promoted promptly, causing an increase in the amount of copying
// performed. The runtime system aligns all JavaThread* pointers to
// a very large value (currently 128 bytes) to make room for the
// age bits when biased locking is enabled.
// a very large value (currently 128 bytes (32bVM) or 256 bytes (64bVM))
// to make room for the age bits & the epoch bits (used in support of
// biased locking), and for the CMS "freeness" bit in the 64bVM (+COOPs).
//
// [JavaThread* | epoch | age | 1 | 01] lock is biased toward given thread
// [0 | epoch | age | 1 | 01] lock is anonymously biased

View File

@ -714,71 +714,6 @@ uint AddPNode::match_edge(uint idx) const {
return idx > Base;
}
//---------------------------mach_bottom_type----------------------------------
// Utility function for use by ADLC. Implements bottom_type for matched AddP.
const Type *AddPNode::mach_bottom_type( const MachNode* n) {
Node* base = n->in(Base);
const Type *t = base->bottom_type();
if ( t == Type::TOP ) {
// an untyped pointer
return TypeRawPtr::BOTTOM;
}
const TypePtr* tp = t->isa_oopptr();
if ( tp == NULL ) return t;
if ( tp->_offset == TypePtr::OffsetBot ) return tp;
// We must carefully add up the various offsets...
intptr_t offset = 0;
const TypePtr* tptr = NULL;
uint numopnds = n->num_opnds();
uint index = n->oper_input_base();
for ( uint i = 1; i < numopnds; i++ ) {
MachOper *opnd = n->_opnds[i];
// Check for any interesting operand info.
// In particular, check for both memory and non-memory operands.
// %%%%% Clean this up: use xadd_offset
intptr_t con = opnd->constant();
if ( con == TypePtr::OffsetBot ) goto bottom_out;
offset += con;
con = opnd->constant_disp();
if ( con == TypePtr::OffsetBot ) goto bottom_out;
offset += con;
if( opnd->scale() != 0 ) goto bottom_out;
// Check each operand input edge. Find the 1 allowed pointer
// edge. Other edges must be index edges; track exact constant
// inputs and otherwise assume the worst.
for ( uint j = opnd->num_edges(); j > 0; j-- ) {
Node* edge = n->in(index++);
const Type* et = edge->bottom_type();
const TypeX* eti = et->isa_intptr_t();
if ( eti == NULL ) {
// there must be one pointer among the operands
guarantee(tptr == NULL, "must be only one pointer operand");
if (UseCompressedOops && Universe::narrow_oop_shift() == 0) {
// 32-bits narrow oop can be the base of address expressions
tptr = et->make_ptr()->isa_oopptr();
} else {
// only regular oops are expected here
tptr = et->isa_oopptr();
}
guarantee(tptr != NULL, "non-int operand must be pointer");
if (tptr->higher_equal(tp->add_offset(tptr->offset())))
tp = tptr; // Set more precise type for bailout
continue;
}
if ( eti->_hi != eti->_lo ) goto bottom_out;
offset += eti->_lo;
}
}
guarantee(tptr != NULL, "must be exactly one pointer operand");
return tptr->add_offset(offset);
bottom_out:
return tp->add_offset(TypePtr::OffsetBot);
}
//=============================================================================
//------------------------------Identity---------------------------------------
Node *OrINode::Identity( PhaseTransform *phase ) {

View File

@ -151,7 +151,6 @@ public:
// Do not match base-ptr edge
virtual uint match_edge(uint idx) const;
static const Type *mach_bottom_type(const MachNode* n); // used by ad_<arch>.hpp
};
//------------------------------OrINode----------------------------------------

View File

@ -1654,6 +1654,64 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
if (opt != NULL) return opt;
}
if (in(1) != NULL && in(1)->Opcode() == Op_AddP && can_reshape) {
// Try to undo Phi of AddP:
// (Phi (AddP base base y) (AddP base2 base2 y))
// becomes:
// newbase := (Phi base base2)
// (AddP newbase newbase y)
//
// This occurs as a result of unsuccessful split_thru_phi and
// interferes with taking advantage of addressing modes. See the
// clone_shift_expressions code in matcher.cpp
Node* addp = in(1);
const Type* type = addp->in(AddPNode::Base)->bottom_type();
Node* y = addp->in(AddPNode::Offset);
if (y != NULL && addp->in(AddPNode::Base) == addp->in(AddPNode::Address)) {
// make sure that all the inputs are similar to the first one,
// i.e. AddP with base == address and same offset as first AddP
bool doit = true;
for (uint i = 2; i < req(); i++) {
if (in(i) == NULL ||
in(i)->Opcode() != Op_AddP ||
in(i)->in(AddPNode::Base) != in(i)->in(AddPNode::Address) ||
in(i)->in(AddPNode::Offset) != y) {
doit = false;
break;
}
// Accumulate type for resulting Phi
type = type->meet(in(i)->in(AddPNode::Base)->bottom_type());
}
Node* base = NULL;
if (doit) {
// Check for neighboring AddP nodes in a tree.
// If they have a base, use that it.
for (DUIterator_Fast kmax, k = this->fast_outs(kmax); k < kmax; k++) {
Node* u = this->fast_out(k);
if (u->is_AddP()) {
Node* base2 = u->in(AddPNode::Base);
if (base2 != NULL && !base2->is_top()) {
if (base == NULL)
base = base2;
else if (base != base2)
{ doit = false; break; }
}
}
}
}
if (doit) {
if (base == NULL) {
base = new (phase->C, in(0)->req()) PhiNode(in(0), type, NULL);
for (uint i = 1; i < req(); i++) {
base->init_req(i, in(i)->in(AddPNode::Base));
}
phase->is_IterGVN()->register_new_node_with_optimizer(base);
}
return new (phase->C, 4) AddPNode(base, base, y);
}
}
}
// Split phis through memory merges, so that the memory merges will go away.
// Piggy-back this transformation on the search for a unique input....
// It will be as if the merged memory is the unique value of the phi.

View File

@ -1989,20 +1989,15 @@ void ConnectionGraph::process_call_result(ProjNode *resproj, PhaseTransform *pha
case Op_Allocate:
{
Node *k = call->in(AllocateNode::KlassNode);
const TypeKlassPtr *kt;
if (k->Opcode() == Op_LoadKlass) {
kt = k->as_Load()->type()->isa_klassptr();
} else {
// Also works for DecodeN(LoadNKlass).
kt = k->as_Type()->type()->isa_klassptr();
}
const TypeKlassPtr *kt = k->bottom_type()->isa_klassptr();
assert(kt != NULL, "TypeKlassPtr required.");
ciKlass* cik = kt->klass();
ciInstanceKlass* ciik = cik->as_instance_klass();
PointsToNode::EscapeState es;
uint edge_to;
if (cik->is_subclass_of(_compile->env()->Thread_klass()) || ciik->has_finalizer()) {
if (cik->is_subclass_of(_compile->env()->Thread_klass()) ||
!cik->is_instance_klass() || // StressReflectiveCode
cik->as_instance_klass()->has_finalizer()) {
es = PointsToNode::GlobalEscape;
edge_to = _phantom_object; // Could not be worse
} else {
@ -2017,13 +2012,28 @@ void ConnectionGraph::process_call_result(ProjNode *resproj, PhaseTransform *pha
case Op_AllocateArray:
{
int length = call->in(AllocateNode::ALength)->find_int_con(-1);
if (length < 0 || length > EliminateAllocationArraySizeLimit) {
// Not scalar replaceable if the length is not constant or too big.
ptnode_adr(call_idx)->_scalar_replaceable = false;
Node *k = call->in(AllocateNode::KlassNode);
const TypeKlassPtr *kt = k->bottom_type()->isa_klassptr();
assert(kt != NULL, "TypeKlassPtr required.");
ciKlass* cik = kt->klass();
PointsToNode::EscapeState es;
uint edge_to;
if (!cik->is_array_klass()) { // StressReflectiveCode
es = PointsToNode::GlobalEscape;
edge_to = _phantom_object;
} else {
es = PointsToNode::NoEscape;
edge_to = call_idx;
int length = call->in(AllocateNode::ALength)->find_int_con(-1);
if (length < 0 || length > EliminateAllocationArraySizeLimit) {
// Not scalar replaceable if the length is not constant or too big.
ptnode_adr(call_idx)->_scalar_replaceable = false;
}
}
set_escape_state(call_idx, PointsToNode::NoEscape);
add_pointsto_edge(resproj_idx, call_idx);
set_escape_state(call_idx, es);
add_pointsto_edge(resproj_idx, edge_to);
_processed.set(resproj_idx);
break;
}

View File

@ -2764,6 +2764,9 @@ class CommandLineFlags {
product(intx, NmethodSweepFraction, 4, \
"Number of invocations of sweeper to cover all nmethods") \
\
product(intx, NmethodSweepCheckInterval, 5, \
"Compilers wake up every n seconds to possibly sweep nmethods") \
\
notproduct(intx, MemProfilingInterval, 500, \
"Time between each invocation of the MemProfiler") \
\

View File

@ -470,6 +470,7 @@ void vm_exit(int code) {
void notify_vm_shutdown() {
// For now, just a dtrace probe.
HS_DTRACE_PROBE(hotspot, vm__shutdown);
HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
}
void vm_direct_exit(int code) {

View File

@ -1,5 +1,5 @@
/*
* Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved.
* Copyright 1998-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -63,8 +63,14 @@ class JNIHandles : AllStatic {
// refers to NULL (as is the case for any weak reference).
static jmethodID make_jmethod_id(methodHandle mh);
static void destroy_jmethod_id(jmethodID mid);
// Use resolve_jmethod_id() in situations where the caller is expected
// to provide a valid jmethodID; the only sanity checks are in asserts;
// result guaranteed not to be NULL.
inline static methodOop resolve_jmethod_id(jmethodID mid);
inline static methodOop checked_resolve_jmethod_id(jmethodID mid); // NULL on invalid jmethodID
// Use checked_resolve_jmethod_id() in situations where the caller
// should provide a valid jmethodID, but might not. NULL is returned
// when the jmethodID does not refer to a valid method.
inline static methodOop checked_resolve_jmethod_id(jmethodID mid);
static void change_method_associated_with_jmethod_id(jmethodID jmid, methodHandle mh);
// Sentinel marking deleted handles in block. Note that we cannot store NULL as
@ -200,12 +206,8 @@ inline methodOop JNIHandles::resolve_jmethod_id(jmethodID mid) {
};
inline methodOop JNIHandles::checked_resolve_jmethod_id(jmethodID mid) {
if (mid == NULL) {
return (methodOop) NULL;
}
oop o = resolve_non_null((jobject) mid);
if (!o->is_method()) {
oop o = resolve_external_guard((jobject) mid);
if (o == NULL || !o->is_method()) {
return (methodOop) NULL;
}

View File

@ -472,7 +472,7 @@ void SafepointSynchronize::do_cleanup_tasks() {
}
TraceTime t4("sweeping nmethods", TraceSafepointCleanupTime);
NMethodSweeper::sweep();
NMethodSweeper::scan_stacks();
}

View File

@ -33,6 +33,8 @@ int NMethodSweeper::_invocations = 0; // No. of invocations left until we
jint NMethodSweeper::_locked_seen = 0;
jint NMethodSweeper::_not_entrant_seen_on_stack = 0;
bool NMethodSweeper::_rescan = false;
bool NMethodSweeper::_do_sweep = false;
jint NMethodSweeper::_sweep_started = 0;
bool NMethodSweeper::_was_full = false;
jint NMethodSweeper::_advise_to_sweep = 0;
jlong NMethodSweeper::_last_was_full = 0;
@ -50,14 +52,20 @@ public:
};
static MarkActivationClosure mark_activation_closure;
void NMethodSweeper::sweep() {
void NMethodSweeper::scan_stacks() {
assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
if (!MethodFlushing) return;
_do_sweep = true;
// No need to synchronize access, since this is always executed at a
// safepoint. If we aren't in the middle of scan and a rescan
// hasn't been requested then just return.
if (_current == NULL && !_rescan) return;
// hasn't been requested then just return. If UseCodeCacheFlushing is on and
// code cache flushing is in progress, don't skip sweeping to help make progress
// clearing space in the code cache.
if ((_current == NULL && !_rescan) && !(UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs())) {
_do_sweep = false;
return;
}
// Make sure CompiledIC_lock in unlocked, since we might update some
// inline caches. If it is, we just bail-out and try later.
@ -68,7 +76,7 @@ void NMethodSweeper::sweep() {
if (_current == NULL) {
_seen = 0;
_invocations = NmethodSweepFraction;
_current = CodeCache::first();
_current = CodeCache::first_nmethod();
_traversals += 1;
if (PrintMethodFlushing) {
tty->print_cr("### Sweep: stack traversal %d", _traversals);
@ -81,48 +89,9 @@ void NMethodSweeper::sweep() {
_not_entrant_seen_on_stack = 0;
}
if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_blobs(), _invocations);
}
// We want to visit all nmethods after NmethodSweepFraction invocations.
// If invocation is 1 we do the rest
int todo = CodeCache::nof_blobs();
if (_invocations != 1) {
todo = (CodeCache::nof_blobs() - _seen) / _invocations;
_invocations--;
}
for(int i = 0; i < todo && _current != NULL; i++) {
CodeBlob* next = CodeCache::next(_current); // Read next before we potentially delete current
if (_current->is_nmethod()) {
process_nmethod((nmethod *)_current);
}
_seen++;
_current = next;
}
// Because we could stop on a codeBlob other than an nmethod we skip forward
// to the next nmethod (if any). codeBlobs other than nmethods can be freed
// async to us and make _current invalid while we sleep.
while (_current != NULL && !_current->is_nmethod()) {
_current = CodeCache::next(_current);
}
if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) {
// we've completed a scan without making progress but there were
// nmethods we were unable to process either because they were
// locked or were still on stack. We don't have to aggresively
// clean them up so just stop scanning. We could scan once more
// but that complicates the control logic and it's unlikely to
// matter much.
if (PrintMethodFlushing) {
tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
}
}
if (UseCodeCacheFlushing) {
if (!CodeCache::needs_flushing()) {
// In a safepoint, no race with setters
// scan_stacks() runs during a safepoint, no race with setters
_advise_to_sweep = 0;
}
@ -155,13 +124,99 @@ void NMethodSweeper::sweep() {
}
}
void NMethodSweeper::possibly_sweep() {
if ((!MethodFlushing) || (!_do_sweep)) return;
if (_invocations > 0) {
// Only one thread at a time will sweep
jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 );
if (old != 0) {
return;
}
sweep_code_cache();
}
_sweep_started = 0;
}
void NMethodSweeper::sweep_code_cache() {
#ifdef ASSERT
jlong sweep_start;
if(PrintMethodFlushing) {
sweep_start = os::javaTimeMillis();
}
#endif
if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_blobs(), _invocations);
}
// We want to visit all nmethods after NmethodSweepFraction invocations.
// If invocation is 1 we do the rest
int todo = CodeCache::nof_blobs();
if (_invocations > 1) {
todo = (CodeCache::nof_blobs() - _seen) / _invocations;
}
// Compilers may check to sweep more often than stack scans happen,
// don't keep trying once it is all scanned
_invocations--;
assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
assert(!CodeCache_lock->owned_by_self(), "just checking");
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
for(int i = 0; i < todo && _current != NULL; i++) {
// Since we will give up the CodeCache_lock, always skip ahead to an nmethod.
// Other blobs can be deleted by other threads
// Read next before we potentially delete current
CodeBlob* next = CodeCache::next_nmethod(_current);
// Now ready to process nmethod and give up CodeCache_lock
{
MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
process_nmethod((nmethod *)_current);
}
_seen++;
_current = next;
}
// Skip forward to the next nmethod (if any). Code blobs other than nmethods
// can be freed async to us and make _current invalid while we sleep.
_current = CodeCache::next_nmethod(_current);
}
if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) {
// we've completed a scan without making progress but there were
// nmethods we were unable to process either because they were
// locked or were still on stack. We don't have to aggresively
// clean them up so just stop scanning. We could scan once more
// but that complicates the control logic and it's unlikely to
// matter much.
if (PrintMethodFlushing) {
tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
}
}
#ifdef ASSERT
if(PrintMethodFlushing) {
jlong sweep_end = os::javaTimeMillis();
tty->print_cr("### sweeper: sweep time(%d): " INT64_FORMAT, _invocations, sweep_end - sweep_start);
}
#endif
}
void NMethodSweeper::process_nmethod(nmethod *nm) {
assert(!CodeCache_lock->owned_by_self(), "just checking");
// Skip methods that are currently referenced by the VM
if (nm->is_locked_by_vm()) {
// But still remember to clean-up inline caches for alive nmethods
if (nm->is_alive()) {
// Clean-up all inline caches that points to zombie/non-reentrant methods
MutexLocker cl(CompiledIC_lock);
nm->cleanup_inline_caches();
} else {
_locked_seen++;
@ -178,6 +233,7 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
}
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
nm->flush();
} else {
if (PrintMethodFlushing && Verbose) {
@ -197,10 +253,11 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
_rescan = true;
} else {
// Still alive, clean up its inline caches
MutexLocker cl(CompiledIC_lock);
nm->cleanup_inline_caches();
// we coudn't transition this nmethod so don't immediately
// request a rescan. If this method stays on the stack for a
// long time we don't want to keep rescanning at every safepoint.
// long time we don't want to keep rescanning the code cache.
_not_entrant_seen_on_stack++;
}
} else if (nm->is_unloaded()) {
@ -209,6 +266,7 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
if (nm->is_osr_method()) {
// No inline caches will ever point to osr methods, so we can just remove it
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
nm->flush();
} else {
nm->make_zombie();
@ -227,6 +285,7 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
}
// Clean-up all inline caches that points to zombie/non-reentrant methods
MutexLocker cl(CompiledIC_lock);
nm->cleanup_inline_caches();
}
}
@ -235,8 +294,8 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
// they will call a vm op that comes here. This code attempts to speculatively
// unload the oldest half of the nmethods (based on the compile job id) by
// saving the old code in a list in the CodeCache. Then
// execution resumes. If a method so marked is not called by the second
// safepoint from the current one, the nmethod will be marked non-entrant and
// execution resumes. If a method so marked is not called by the second sweeper
// stack traversal after the current one, the nmethod will be marked non-entrant and
// got rid of by normal sweeping. If the method is called, the methodOop's
// _code field is restored and the methodOop/nmethod
// go back to their normal state.
@ -364,8 +423,8 @@ void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
xtty->end_elem();
}
// Shut off compiler. Sweeper will run exiting from this safepoint
// and turn it back on if it clears enough space
// Shut off compiler. Sweeper will start over with a new stack scan and
// traversal cycle and turn it back on if it clears enough space.
if (was_full()) {
_last_was_full = os::javaTimeMillis();
CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);

View File

@ -35,6 +35,8 @@ class NMethodSweeper : public AllStatic {
static bool _rescan; // Indicates that we should do a full rescan of the
// of the code cache looking for work to do.
static bool _do_sweep; // Flag to skip the conc sweep if no stack scan happened
static jint _sweep_started; // Flag to control conc sweeper
static int _locked_seen; // Number of locked nmethods encountered during the scan
static int _not_entrant_seen_on_stack; // Number of not entrant nmethod were are still on stack
@ -48,7 +50,9 @@ class NMethodSweeper : public AllStatic {
public:
static long traversal_count() { return _traversals; }
static void sweep(); // Invoked at the end of each safepoint
static void scan_stacks(); // Invoked at the end of each safepoint
static void sweep_code_cache(); // Concurrent part of sweep job
static void possibly_sweep(); // Compiler threads call this to sweep
static void notify(nmethod* nm) {
// Perform a full scan of the code cache from the beginning. No

View File

@ -101,8 +101,8 @@ GrowableArray<MonitorInfo*>* javaVFrame::locked_monitors() {
bool found_first_monitor = false;
ObjectMonitor *pending_monitor = thread()->current_pending_monitor();
ObjectMonitor *waiting_monitor = thread()->current_waiting_monitor();
oop pending_obj = (pending_monitor != NULL ? (oop) pending_monitor->object() : NULL);
oop waiting_obj = (waiting_monitor != NULL ? (oop) waiting_monitor->object() : NULL);
oop pending_obj = (pending_monitor != NULL ? (oop) pending_monitor->object() : (oop) NULL);
oop waiting_obj = (waiting_monitor != NULL ? (oop) waiting_monitor->object() : (oop) NULL);
for (int index = (mons->length()-1); index >= 0; index--) {
MonitorInfo* monitor = mons->at(index);

View File

@ -190,6 +190,8 @@ const char* Abstract_VM_Version::internal_vm_info_string() {
#define HOTSPOT_BUILD_COMPILER "Workshop 5.8"
#elif __SUNPRO_CC == 0x590
#define HOTSPOT_BUILD_COMPILER "Workshop 5.9"
#elif __SUNPRO_CC == 0x5100
#define HOTSPOT_BUILD_COMPILER "Sun Studio 12u1"
#else
#define HOTSPOT_BUILD_COMPILER "unknown Workshop:" XSTR(__SUNPRO_CC)
#endif

View File

@ -29,6 +29,10 @@
#define DTRACE_ONLY(x) x
#define NOT_DTRACE(x)
// Work around dtrace tail call bug 6672627 until it is fixed in solaris 10.
#define HS_DTRACE_WORKAROUND_TAIL_CALL_BUG() \
do { volatile size_t dtrace_workaround_tail_call_bug = 1; } while (0)
#else // ndef SOLARIS || ndef DTRACE_ENABLED
#define DTRACE_ONLY(x)
@ -41,6 +45,8 @@
#define DTRACE_PROBE4(a,b,c,d,e,f) {;}
#define DTRACE_PROBE5(a,b,c,d,e,f,g) {;}
#define HS_DTRACE_WORKAROUND_TAIL_CALL_BUG()
#endif
#define HS_DTRACE_PROBE_FN(provider,name)\