This commit is contained in:
Yumin Qi 2015-01-10 12:38:18 -08:00
commit a8ee05a956
569 changed files with 11445 additions and 4407 deletions

View File

@ -285,3 +285,5 @@ b409bc51bc23cfd51f2bd04ea919ec83535af9d0 jdk9-b37
82f4cb44b2d7af2352f48568a64b7b6a5ae960cd jdk9-b40
9fffb959eb4197ff806e4ac12244761815b4deee jdk9-b41
3107be2ba9c6e208a0b86bc7100a141abbc5b5fb jdk9-b42
6494b13f88a867026ee316b444d9a4fa589dd6bd jdk9-b43
abbfccd659b91a7bb815d5e36fed635dcdd40f31 jdk9-b44

View File

@ -285,3 +285,5 @@ d42c0a90afc3c66ca87543076ec9aafd4b4680de jdk9-b38
cf136458ee747e151a27aa9ea0c1492ea55ef3e7 jdk9-b40
67395f7ca2db3b52e3a62a84888487de5cb9210a jdk9-b41
f7c11da0b0481d49cc7a65a453336c108191e821 jdk9-b42
02ee8c65622e8bd97496d584e22fc7dcf0edc4ae jdk9-b43
8994f5d87b3bb5e8d317d4e8ccb326da1a73684a jdk9-b44

View File

@ -4329,7 +4329,7 @@ TOOLCHAIN_DESCRIPTION_xlc="IBM XL C/C++"
#CUSTOM_AUTOCONF_INCLUDE
# Do not change or remove the following line, it is needed for consistency checks:
DATE_WHEN_GENERATED=1418036274
DATE_WHEN_GENERATED=1418395009
###############################################################################
#
@ -13965,7 +13965,8 @@ $as_echo "$COMPILE_TYPE" >&6; }
# ZERO_ARCHDEF is used to enable architecture-specific code
case "${OPENJDK_TARGET_CPU}" in
ppc*) ZERO_ARCHDEF=PPC ;;
ppc) ZERO_ARCHDEF=PPC32 ;;
ppc64) ZERO_ARCHDEF=PPC64 ;;
s390*) ZERO_ARCHDEF=S390 ;;
sparc*) ZERO_ARCHDEF=SPARC ;;
x86_64*) ZERO_ARCHDEF=AMD64 ;;

View File

@ -367,7 +367,8 @@ AC_DEFUN([PLATFORM_SETUP_LEGACY_VARS],
# ZERO_ARCHDEF is used to enable architecture-specific code
case "${OPENJDK_TARGET_CPU}" in
ppc*) ZERO_ARCHDEF=PPC ;;
ppc) ZERO_ARCHDEF=PPC32 ;;
ppc64) ZERO_ARCHDEF=PPC64 ;;
s390*) ZERO_ARCHDEF=S390 ;;
sparc*) ZERO_ARCHDEF=SPARC ;;
x86_64*) ZERO_ARCHDEF=AMD64 ;;

View File

@ -106,12 +106,15 @@ if [ ${vflag} = "true" ] ; then
echo "# Mercurial command: ${command}" > ${status_output}
fi
# capture command options and arguments (if any)
command_args="${@:-}"
# At this point all command options and args are in "$@".
# Always use "$@" (within double quotes) to avoid breaking
# args with spaces into separate args.
if [ ${vflag} = "true" ] ; then
echo "# Mercurial command arguments: ${command_args}" > ${status_output}
echo "# Mercurial command argument count: $#" > ${status_output}
for cmdarg in "$@" ; do
echo "# Mercurial command argument: ${cmdarg}" > ${status_output}
done
fi
# Clean out the temporary directory that stores the pid files.
@ -205,13 +208,14 @@ if [ "${command}" = "clone" -o "${command}" = "fclone" -o "${command}" = "tclone
pull_default_tail=`echo ${pull_default} | sed -e 's@^.*://[^/]*/\(.*\)@\1@'`
if [ -n "${command_args}" ] ; then
if [ $# -gt 0 ] ; then
# if there is an "extra sources" path then reparent "extra" repos to that path
if [ "x${pull_default}" = "x${pull_default_tail}" ] ; then
echo "ERROR: Need initial clone from non-local source" > ${status_output}
exit 1
fi
pull_extra="${command_args}/${pull_default_tail}"
# assume that "extra sources" path is the first arg
pull_extra="${1}/${pull_default_tail}"
# determine which extra subrepos need to be cloned.
for i in ${subrepos_extra} ; do
@ -356,8 +360,8 @@ else
(PYTHONUNBUFFERED=true hg${global_opts} clone ${clone_newrepo} ${i}; echo "$?" > ${tmp}/${repopidfile}.pid.rc ) 2>&1 &
else
# run the command.
echo "cd ${i} && hg${global_opts} ${command} ${command_args}" > ${status_output}
cd ${i} && (PYTHONUNBUFFERED=true hg${global_opts} ${command} ${command_args}; echo "$?" > ${tmp}/${repopidfile}.pid.rc ) 2>&1 &
echo "cd ${i} && hg${global_opts} ${command} ${@}" > ${status_output}
cd ${i} && (PYTHONUNBUFFERED=true hg${global_opts} ${command} "${@}"; echo "$?" > ${tmp}/${repopidfile}.pid.rc ) 2>&1 &
fi
echo $! > ${tmp}/${repopidfile}.pid

View File

@ -285,3 +285,5 @@ ffd90c81d4ef9d94d880fc852e2fc482ecd9b374 jdk9-b36
e27c725d6c9d155667b35255f442d4ceb8c3c084 jdk9-b40
1908b886ba1eda46fa725cf1160fe5d30fd1a7e5 jdk9-b41
078bb11af876fe528d4b516f33ad4dd9bb60549e jdk9-b42
9645e35616b60c5c07b4fdf11a132afc8081dfa8 jdk9-b43
1f57bd728c9e6865ccb9d43ccd80a1c11230a32f jdk9-b44

View File

@ -445,3 +445,5 @@ c363a8b87e477ee45d6d3cb2a36cb365141bc596 jdk9-b38
6b09b3193d731e3288e2a240c504a20d0a06c766 jdk9-b40
1d29b13e8a515a7ea3b882f140576d5d675bc11f jdk9-b41
38cb4fbd47e3472bd1b5ebac83bda96fe4869c4f jdk9-b42
65a9747147b8090037541040ba67156ec914db6a jdk9-b43
43a44b56dca61a4d766a20f0528fdd8b5ceff873 jdk9-b44

View File

@ -22,6 +22,9 @@
*
*/
// Disable CRT security warning against strcpy/strcat
#pragma warning(disable: 4996)
// this is source code windbg based SA debugger agent to debug
// Dr. Watson dump files and process snapshots.

View File

@ -33,6 +33,8 @@
*/
#ifdef _WINDOWS
// Disable CRT security warning against _snprintf
#pragma warning (disable : 4996)
#define snprintf _snprintf
#define vsnprintf _vsnprintf
@ -90,12 +92,8 @@ static int getLastErrorString(char *buf, size_t len)
if (errno != 0)
{
/* C runtime error that has no corresponding DOS error code */
const char *s = strerror(errno);
size_t n = strlen(s);
if (n >= len) n = len - 1;
strncpy(buf, s, n);
buf[n] = '\0';
return (int)n;
strerror_s(buf, len, errno);
return strlen(buf);
}
return 0;
}
@ -111,16 +109,30 @@ JNIEXPORT jlong JNICALL Java_sun_jvm_hotspot_asm_Disassembler_load_1library(JNIE
jstring jrepath_s,
jstring libname_s) {
uintptr_t func = 0;
const char* error_message = NULL;
jboolean isCopy;
const char * jrepath = (*env)->GetStringUTFChars(env, jrepath_s, &isCopy); // like $JAVA_HOME/jre/lib/sparc/
const char * libname = (*env)->GetStringUTFChars(env, libname_s, &isCopy);
const char *error_message = NULL;
const char *jrepath = NULL;
const char *libname = NULL;
char buffer[128];
#ifdef _WINDOWS
HINSTANCE hsdis_handle = (HINSTANCE) NULL;
#else
void* hsdis_handle = NULL;
#endif
jrepath = (*env)->GetStringUTFChars(env, jrepath_s, NULL); // like $JAVA_HOME/jre/lib/sparc/
if (jrepath == NULL || (*env)->ExceptionOccurred(env)) {
return 0;
}
libname = (*env)->GetStringUTFChars(env, libname_s, NULL);
if (libname == NULL || (*env)->ExceptionOccurred(env)) {
(*env)->ReleaseStringUTFChars(env, jrepath_s, jrepath);
return 0;
}
/* Load the hsdis library */
#ifdef _WINDOWS
HINSTANCE hsdis_handle;
hsdis_handle = LoadLibrary(libname);
if (hsdis_handle == NULL) {
snprintf(buffer, sizeof(buffer), "%s%s", jrepath, libname);
@ -134,7 +146,6 @@ JNIEXPORT jlong JNICALL Java_sun_jvm_hotspot_asm_Disassembler_load_1library(JNIE
error_message = buffer;
}
#else
void* hsdis_handle;
hsdis_handle = dlopen(libname, RTLD_LAZY | RTLD_GLOBAL);
if (hsdis_handle == NULL) {
snprintf(buffer, sizeof(buffer), "%s%s", jrepath, libname);
@ -156,6 +167,11 @@ JNIEXPORT jlong JNICALL Java_sun_jvm_hotspot_asm_Disassembler_load_1library(JNIE
* platform dependent error message.
*/
jclass eclass = (*env)->FindClass(env, "sun/jvm/hotspot/debugger/DebuggerException");
if ((*env)->ExceptionOccurred(env)) {
/* Can't throw exception, probably OOM, so silently return 0 */
return (jlong) 0;
}
(*env)->ThrowNew(env, eclass, error_message);
}
return (jlong)func;
@ -184,16 +200,22 @@ typedef struct {
/* event callback binding to Disassembler.handleEvent */
static void* event_to_env(void* env_pv, const char* event, void* arg) {
jlong result = 0;
decode_env* denv = (decode_env*)env_pv;
JNIEnv* env = denv->env;
jstring event_string = (*env)->NewStringUTF(env, event);
jlong result = (*env)->CallLongMethod(env, denv->dis, denv->handle_event, denv->visitor,
event_string, (jlong) (uintptr_t)arg);
if ((*env)->ExceptionOccurred(env) != NULL) {
if ((*env)->ExceptionOccurred(env)) {
return NULL;
}
result = (*env)->CallLongMethod(env, denv->dis, denv->handle_event, denv->visitor,
event_string, (jlong) (uintptr_t)arg);
if ((*env)->ExceptionOccurred(env)) {
/* ignore exceptions for now */
(*env)->ExceptionClear(env);
result = 0;
return NULL;
}
return (void*)(uintptr_t)result;
}
@ -219,10 +241,13 @@ static int printf_to_env(void* env_pv, const char* format, ...) {
}
if (raw != NULL) {
jstring output = (*env)->NewStringUTF(env, raw);
(*env)->CallVoidMethod(env, denv->dis, denv->raw_print, denv->visitor, output);
if ((*env)->ExceptionOccurred(env) != NULL) {
if (!(*env)->ExceptionOccurred(env)) {
/* make sure that UTF allocation doesn't cause OOM */
(*env)->CallVoidMethod(env, denv->dis, denv->raw_print, denv->visitor, output);
}
if ((*env)->ExceptionOccurred(env)) {
/* ignore exceptions for now */
(*env)->ExceptionClear(env);
(*env)->ExceptionClear(env);
}
return (int) flen;
}
@ -231,11 +256,16 @@ static int printf_to_env(void* env_pv, const char* format, ...) {
va_end(ap);
output = (*env)->NewStringUTF(env, denv->buffer);
(*env)->CallVoidMethod(env, denv->dis, denv->raw_print, denv->visitor, output);
if ((*env)->ExceptionOccurred(env) != NULL) {
if (!(*env)->ExceptionOccurred(env)) {
/* make sure that UTF allocation doesn't cause OOM */
(*env)->CallVoidMethod(env, denv->dis, denv->raw_print, denv->visitor, output);
}
if ((*env)->ExceptionOccurred(env)) {
/* ignore exceptions for now */
(*env)->ExceptionClear(env);
}
return cnt;
}
@ -251,13 +281,24 @@ JNIEXPORT void JNICALL Java_sun_jvm_hotspot_asm_Disassembler_decode(JNIEnv * env
jbyteArray code,
jstring options_s,
jlong decode_instructions_virtual) {
jboolean isCopy;
jbyte* start = (*env)->GetByteArrayElements(env, code, &isCopy);
jbyte* end = start + (*env)->GetArrayLength(env, code);
const char * options = (*env)->GetStringUTFChars(env, options_s, &isCopy);
jclass disclass = (*env)->GetObjectClass(env, dis);
jbyte *start = NULL;
jbyte *end = NULL;
jclass disclass = NULL;
const char *options = NULL;
decode_env denv;
start = (*env)->GetByteArrayElements(env, code, NULL);
if ((*env)->ExceptionOccurred(env)) {
return;
}
end = start + (*env)->GetArrayLength(env, code);
options = (*env)->GetStringUTFChars(env, options_s, NULL);
if ((*env)->ExceptionOccurred(env)) {
(*env)->ReleaseByteArrayElements(env, code, start, JNI_ABORT);
return;
}
disclass = (*env)->GetObjectClass(env, dis);
denv.env = env;
denv.dis = dis;
denv.visitor = visitor;
@ -266,6 +307,8 @@ JNIEXPORT void JNICALL Java_sun_jvm_hotspot_asm_Disassembler_decode(JNIEnv * env
denv.handle_event = (*env)->GetMethodID(env, disclass, "handleEvent",
"(Lsun/jvm/hotspot/asm/InstructionVisitor;Ljava/lang/String;J)J");
if ((*env)->ExceptionOccurred(env)) {
(*env)->ReleaseByteArrayElements(env, code, start, JNI_ABORT);
(*env)->ReleaseStringUTFChars(env, options_s, options);
return;
}
@ -273,11 +316,13 @@ JNIEXPORT void JNICALL Java_sun_jvm_hotspot_asm_Disassembler_decode(JNIEnv * env
denv.raw_print = (*env)->GetMethodID(env, disclass, "rawPrint",
"(Lsun/jvm/hotspot/asm/InstructionVisitor;Ljava/lang/String;)V");
if ((*env)->ExceptionOccurred(env)) {
(*env)->ReleaseByteArrayElements(env, code, start, JNI_ABORT);
(*env)->ReleaseStringUTFChars(env, options_s, options);
return;
}
/* decode the buffer */
(*(decode_func)(uintptr_t)decode_instructions_virtual)(startPc,
(*(decode_func)(uintptr_t)decode_instructions_virtual)((uintptr_t) startPc,
startPc + end - start,
(unsigned char*)start,
end - start,

View File

@ -72,7 +72,6 @@ SUNWprivate_1.1 {
JVM_FillInStackTrace;
JVM_FindClassFromCaller;
JVM_FindClassFromClass;
JVM_FindClassFromClassLoader;
JVM_FindClassFromBootLoader;
JVM_FindLibraryEntry;
JVM_FindLoadedClass;

View File

@ -72,7 +72,6 @@ SUNWprivate_1.1 {
JVM_FillInStackTrace;
JVM_FindClassFromCaller;
JVM_FindClassFromClass;
JVM_FindClassFromClassLoader;
JVM_FindClassFromBootLoader;
JVM_FindLibraryEntry;
JVM_FindLoadedClass;

View File

@ -70,7 +70,6 @@
_JVM_FillInStackTrace
_JVM_FindClassFromCaller
_JVM_FindClassFromClass
_JVM_FindClassFromClassLoader
_JVM_FindClassFromBootLoader
_JVM_FindLibraryEntry
_JVM_FindLoadedClass

View File

@ -70,7 +70,6 @@
_JVM_FillInStackTrace
_JVM_FindClassFromCaller
_JVM_FindClassFromClass
_JVM_FindClassFromClassLoader
_JVM_FindClassFromBootLoader
_JVM_FindLibraryEntry
_JVM_FindLoadedClass

View File

@ -72,7 +72,6 @@ SUNWprivate_1.1 {
JVM_FillInStackTrace;
JVM_FindClassFromCaller;
JVM_FindClassFromClass;
JVM_FindClassFromClassLoader;
JVM_FindClassFromBootLoader;
JVM_FindLibraryEntry;
JVM_FindLoadedClass;

View File

@ -72,7 +72,6 @@ SUNWprivate_1.1 {
JVM_FillInStackTrace;
JVM_FindClassFromCaller;
JVM_FindClassFromClass;
JVM_FindClassFromClassLoader;
JVM_FindClassFromBootLoader;
JVM_FindLibraryEntry;
JVM_FindLoadedClass;

View File

@ -72,7 +72,6 @@ SUNWprivate_1.1 {
JVM_FillInStackTrace;
JVM_FindClassFromCaller;
JVM_FindClassFromClass;
JVM_FindClassFromClassLoader;
JVM_FindClassFromBootLoader;
JVM_FindLibraryEntry;
JVM_FindLoadedClass;

View File

@ -72,7 +72,6 @@ SUNWprivate_1.1 {
JVM_FillInStackTrace;
JVM_FindClassFromCaller;
JVM_FindClassFromClass;
JVM_FindClassFromClassLoader;
JVM_FindClassFromBootLoader;
JVM_FindLibraryEntry;
JVM_FindLoadedClass;

View File

@ -72,7 +72,6 @@ SUNWprivate_1.1 {
JVM_FillInStackTrace;
JVM_FindClassFromCaller;
JVM_FindClassFromClass;
JVM_FindClassFromClassLoader;
JVM_FindClassFromBootLoader;
JVM_FindLibraryEntry;
JVM_FindLoadedClass;

View File

@ -544,6 +544,9 @@ void InterpreterMacroAssembler::index_check_without_pop(Register Rarray, Registe
cmplw(CCR0, Rindex, Rlength);
sldi(RsxtIndex, RsxtIndex, index_shift);
blt(CCR0, LnotOOR);
// Index should be in R17_tos, array should be in R4_ARG2.
mr(R17_tos, Rindex);
mr(R4_ARG2, Rarray);
load_dispatch_table(Rtmp, (address*)Interpreter::_throw_ArrayIndexOutOfBoundsException_entry);
mtctr(Rtmp);
bctr();
@ -1678,6 +1681,228 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
}
}
// Argument and return type profilig.
// kills: tmp, tmp2, R0, CR0, CR1
void InterpreterMacroAssembler::profile_obj_type(Register obj, Register mdo_addr_base,
RegisterOrConstant mdo_addr_offs, Register tmp, Register tmp2) {
Label do_nothing, do_update;
// tmp2 = obj is allowed
assert_different_registers(obj, mdo_addr_base, tmp, R0);
assert_different_registers(tmp2, mdo_addr_base, tmp, R0);
const Register klass = tmp2;
verify_oop(obj);
ld(tmp, mdo_addr_offs, mdo_addr_base);
// Set null_seen if obj is 0.
cmpdi(CCR0, obj, 0);
ori(R0, tmp, TypeEntries::null_seen);
beq(CCR0, do_update);
load_klass(klass, obj);
clrrdi(R0, tmp, exact_log2(-TypeEntries::type_klass_mask));
// Basically same as andi(R0, tmp, TypeEntries::type_klass_mask);
cmpd(CCR1, R0, klass);
// Klass seen before, nothing to do (regardless of unknown bit).
//beq(CCR1, do_nothing);
andi_(R0, klass, TypeEntries::type_unknown);
// Already unknown. Nothing to do anymore.
//bne(CCR0, do_nothing);
crorc(/*CCR0 eq*/2, /*CCR1 eq*/4+2, /*CCR0 eq*/2); // cr0 eq = cr1 eq or cr0 ne
beq(CCR0, do_nothing);
clrrdi_(R0, tmp, exact_log2(-TypeEntries::type_mask));
orr(R0, klass, tmp); // Combine klass and null_seen bit (only used if (tmp & type_mask)==0).
beq(CCR0, do_update); // First time here. Set profile type.
// Different than before. Cannot keep accurate profile.
ori(R0, tmp, TypeEntries::type_unknown);
bind(do_update);
// update profile
std(R0, mdo_addr_offs, mdo_addr_base);
align(32, 12);
bind(do_nothing);
}
void InterpreterMacroAssembler::profile_arguments_type(Register callee, Register tmp1, Register tmp2, bool is_virtual) {
if (!ProfileInterpreter) {
return;
}
assert_different_registers(callee, tmp1, tmp2, R28_mdx);
if (MethodData::profile_arguments() || MethodData::profile_return()) {
Label profile_continue;
test_method_data_pointer(profile_continue);
int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size());
lbz(tmp1, in_bytes(DataLayout::tag_offset()) - off_to_start, R28_mdx);
cmpwi(CCR0, tmp1, is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag);
bne(CCR0, profile_continue);
if (MethodData::profile_arguments()) {
Label done;
int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset());
add(R28_mdx, off_to_args, R28_mdx);
for (int i = 0; i < TypeProfileArgsLimit; i++) {
if (i > 0 || MethodData::profile_return()) {
// If return value type is profiled we may have no argument to profile.
ld(tmp1, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, R28_mdx);
cmpdi(CCR0, tmp1, (i+1)*TypeStackSlotEntries::per_arg_count());
addi(tmp1, tmp1, -i*TypeStackSlotEntries::per_arg_count());
blt(CCR0, done);
}
ld(tmp1, in_bytes(Method::const_offset()), callee);
lhz(tmp1, in_bytes(ConstMethod::size_of_parameters_offset()), tmp1);
// Stack offset o (zero based) from the start of the argument
// list, for n arguments translates into offset n - o - 1 from
// the end of the argument list. But there's an extra slot at
// the top of the stack. So the offset is n - o from Lesp.
ld(tmp2, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args, R28_mdx);
subf(tmp1, tmp2, tmp1);
sldi(tmp1, tmp1, Interpreter::logStackElementSize);
ldx(tmp1, tmp1, R15_esp);
profile_obj_type(tmp1, R28_mdx, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args, tmp2, tmp1);
int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
addi(R28_mdx, R28_mdx, to_add);
off_to_args += to_add;
}
if (MethodData::profile_return()) {
ld(tmp1, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, R28_mdx);
addi(tmp1, tmp1, -TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
}
bind(done);
if (MethodData::profile_return()) {
// We're right after the type profile for the last
// argument. tmp1 is the number of cells left in the
// CallTypeData/VirtualCallTypeData to reach its end. Non null
// if there's a return to profile.
assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
sldi(tmp1, tmp1, exact_log2(DataLayout::cell_size));
add(R28_mdx, tmp1, R28_mdx);
}
} else {
assert(MethodData::profile_return(), "either profile call args or call ret");
update_mdp_by_constant(in_bytes(TypeEntriesAtCall::return_only_size()));
}
// Mdp points right after the end of the
// CallTypeData/VirtualCallTypeData, right after the cells for the
// return value type if there's one.
align(32, 12);
bind(profile_continue);
}
}
void InterpreterMacroAssembler::profile_return_type(Register ret, Register tmp1, Register tmp2) {
assert_different_registers(ret, tmp1, tmp2);
if (ProfileInterpreter && MethodData::profile_return()) {
Label profile_continue;
test_method_data_pointer(profile_continue);
if (MethodData::profile_return_jsr292_only()) {
// If we don't profile all invoke bytecodes we must make sure
// it's a bytecode we indeed profile. We can't go back to the
// begining of the ProfileData we intend to update to check its
// type because we're right after it and we don't known its
// length.
lbz(tmp1, 0, R14_bcp);
lbz(tmp2, Method::intrinsic_id_offset_in_bytes(), R19_method);
cmpwi(CCR0, tmp1, Bytecodes::_invokedynamic);
cmpwi(CCR1, tmp1, Bytecodes::_invokehandle);
cror(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2);
cmpwi(CCR1, tmp2, vmIntrinsics::_compiledLambdaForm);
cror(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2);
bne(CCR0, profile_continue);
}
profile_obj_type(ret, R28_mdx, -in_bytes(ReturnTypeEntry::size()), tmp1, tmp2);
align(32, 12);
bind(profile_continue);
}
}
void InterpreterMacroAssembler::profile_parameters_type(Register tmp1, Register tmp2, Register tmp3, Register tmp4) {
if (ProfileInterpreter && MethodData::profile_parameters()) {
Label profile_continue, done;
test_method_data_pointer(profile_continue);
// Load the offset of the area within the MDO used for
// parameters. If it's negative we're not profiling any parameters.
lwz(tmp1, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset()), R28_mdx);
cmpwi(CCR0, tmp1, 0);
blt(CCR0, profile_continue);
// Compute a pointer to the area for parameters from the offset
// and move the pointer to the slot for the last
// parameters. Collect profiling from last parameter down.
// mdo start + parameters offset + array length - 1
// Pointer to the parameter area in the MDO.
const Register mdp = tmp1;
add(mdp, tmp1, R28_mdx);
// Pffset of the current profile entry to update.
const Register entry_offset = tmp2;
// entry_offset = array len in number of cells
ld(entry_offset, in_bytes(ArrayData::array_len_offset()), mdp);
int off_base = in_bytes(ParametersTypeData::stack_slot_offset(0));
assert(off_base % DataLayout::cell_size == 0, "should be a number of cells");
// entry_offset (number of cells) = array len - size of 1 entry + offset of the stack slot field
addi(entry_offset, entry_offset, -TypeStackSlotEntries::per_arg_count() + (off_base / DataLayout::cell_size));
// entry_offset in bytes
sldi(entry_offset, entry_offset, exact_log2(DataLayout::cell_size));
Label loop;
align(32, 12);
bind(loop);
// Load offset on the stack from the slot for this parameter.
ld(tmp3, entry_offset, mdp);
sldi(tmp3, tmp3, Interpreter::logStackElementSize);
neg(tmp3, tmp3);
// Read the parameter from the local area.
ldx(tmp3, tmp3, R18_locals);
// Make entry_offset now point to the type field for this parameter.
int type_base = in_bytes(ParametersTypeData::type_offset(0));
assert(type_base > off_base, "unexpected");
addi(entry_offset, entry_offset, type_base - off_base);
// Profile the parameter.
profile_obj_type(tmp3, mdp, entry_offset, tmp4, tmp3);
// Go to next parameter.
int delta = TypeStackSlotEntries::per_arg_count() * DataLayout::cell_size + (type_base - off_base);
cmpdi(CCR0, entry_offset, off_base + delta);
addi(entry_offset, entry_offset, -delta);
bge(CCR0, loop);
align(32, 12);
bind(profile_continue);
}
}
// Add a InterpMonitorElem to stack (see frame_sparc.hpp).
void InterpreterMacroAssembler::add_monitor_to_stack(bool stack_is_empty, Register Rtemp1, Register Rtemp2) {
@ -2039,20 +2264,19 @@ void InterpreterMacroAssembler::verify_oop_or_return_address(Register reg, Regis
bne(CCR0, test);
address fd = CAST_FROM_FN_PTR(address, verify_return_address);
unsigned int nbytes_save = 10*8; // 10 volatile gprs
save_LR_CR(Rtmp);
const int nbytes_save = 11*8; // volatile gprs except R0
save_volatile_gprs(R1_SP, -nbytes_save); // except R0
save_LR_CR(Rtmp); // Save in old frame.
push_frame_reg_args(nbytes_save, Rtmp);
save_volatile_gprs(R1_SP, 112); // except R0
load_const_optimized(Rtmp, fd, R0);
mr_if_needed(R4_ARG2, reg);
mr(R3_ARG1, R19_method);
call_c(Rtmp); // call C
restore_volatile_gprs(R1_SP, 112); // except R0
pop_frame();
restore_LR_CR(Rtmp);
restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
b(skip);
// Perform a more elaborate out-of-line call.

View File

@ -255,6 +255,12 @@ class InterpreterMacroAssembler: public MacroAssembler {
void record_klass_in_profile(Register receiver, Register scratch1, Register scratch2, bool is_virtual_call);
void record_klass_in_profile_helper(Register receiver, Register scratch1, Register scratch2, int start_row, Label& done, bool is_virtual_call);
// Argument and return type profiling.
void profile_obj_type(Register obj, Register mdo_addr_base, RegisterOrConstant mdo_addr_offs, Register tmp, Register tmp2);
void profile_arguments_type(Register callee, Register tmp1, Register tmp2, bool is_virtual);
void profile_return_type(Register ret, Register tmp1, Register tmp2);
void profile_parameters_type(Register tmp1, Register tmp2, Register tmp3, Register tmp4);
#endif // !CC_INTERP
// Debugging

View File

@ -807,6 +807,7 @@ void MacroAssembler::restore_nonvolatile_gprs(Register src, int offset) {
// For verify_oops.
void MacroAssembler::save_volatile_gprs(Register dst, int offset) {
std(R2, offset, dst); offset += 8;
std(R3, offset, dst); offset += 8;
std(R4, offset, dst); offset += 8;
std(R5, offset, dst); offset += 8;
@ -821,6 +822,7 @@ void MacroAssembler::save_volatile_gprs(Register dst, int offset) {
// For verify_oops.
void MacroAssembler::restore_volatile_gprs(Register src, int offset) {
ld(R2, offset, src); offset += 8;
ld(R3, offset, src); offset += 8;
ld(R4, offset, src); offset += 8;
ld(R5, offset, src); offset += 8;
@ -1187,6 +1189,16 @@ void MacroAssembler::call_VM(Register oop_result, address entry_point, Register
call_VM(oop_result, entry_point, check_exceptions);
}
void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3,
bool check_exceptions) {
// R3_ARG1 is reserved for the thread
mr_if_needed(R4_ARG2, arg_1);
assert(arg_2 != R4_ARG2, "smashed argument");
mr_if_needed(R5_ARG3, arg_2);
mr_if_needed(R6_ARG4, arg_3);
call_VM(oop_result, entry_point, check_exceptions);
}
void MacroAssembler::call_VM_leaf(address entry_point) {
call_VM_leaf_base(entry_point);
}
@ -3059,35 +3071,27 @@ void MacroAssembler::verify_oop(Register oop, const char* msg) {
if (!VerifyOops) {
return;
}
// Will be preserved.
Register tmp = R11;
assert(oop != tmp, "precondition");
unsigned int nbytes_save = 10*8; // 10 volatile gprs
address/* FunctionDescriptor** */fd = StubRoutines::verify_oop_subroutine_entry_address();
// save tmp
mr(R0, tmp);
// kill tmp
save_LR_CR(tmp);
const Register tmp = R11; // Will be preserved.
const int nbytes_save = 11*8; // Volatile gprs except R0.
save_volatile_gprs(R1_SP, -nbytes_save); // except R0
if (oop == tmp) mr(R4_ARG2, oop);
save_LR_CR(tmp); // save in old frame
push_frame_reg_args(nbytes_save, tmp);
// restore tmp
mr(tmp, R0);
save_volatile_gprs(R1_SP, 112); // except R0
// load FunctionDescriptor** / entry_address *
load_const(tmp, fd);
load_const_optimized(tmp, fd, R0);
// load FunctionDescriptor* / entry_address
ld(tmp, 0, tmp);
mr(R4_ARG2, oop);
load_const(R3_ARG1, (address)msg);
// call destination for its side effect
if (oop != tmp) mr_if_needed(R4_ARG2, oop);
load_const_optimized(R3_ARG1, (address)msg, R0);
// Call destination for its side effect.
call_c(tmp);
restore_volatile_gprs(R1_SP, 112); // except R0
pop_frame();
// save tmp
mr(R0, tmp);
// kill tmp
restore_LR_CR(tmp);
// restore tmp
mr(tmp, R0);
restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
}
const char* stop_types[] = {

View File

@ -369,6 +369,7 @@ class MacroAssembler: public Assembler {
void call_VM(Register oop_result, address entry_point, bool check_exceptions = true);
void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true);
void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg3, bool check_exceptions = true);
void call_VM_leaf(address entry_point);
void call_VM_leaf(address entry_point, Register arg_1);
void call_VM_leaf(address entry_point, Register arg_1, Register arg_2);

View File

@ -100,10 +100,7 @@ void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) {
MacroAssembler* a = new MacroAssembler(&cb);
// Patch the call.
if (ReoptimizeCallSequences &&
a->is_within_range_of_b(dest, addr_call)) {
a->bl(dest);
} else {
if (!ReoptimizeCallSequences || !a->is_within_range_of_b(dest, addr_call)) {
address trampoline_stub_addr = get_trampoline();
// We did not find a trampoline stub because the current codeblob
@ -115,9 +112,12 @@ void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) {
// Patch the constant in the call's trampoline stub.
NativeCallTrampolineStub_at(trampoline_stub_addr)->set_destination(dest);
a->bl(trampoline_stub_addr);
dest = trampoline_stub_addr;
}
OrderAccess::release();
a->bl(dest);
ICache::ppc64_flush_icache_bytes(addr_call, code_size);
}

View File

@ -1936,8 +1936,9 @@ ArchOpcode MachSpillCopyNode_archOpcode(MachSpillCopyNode *n, PhaseRegAlloc *ra_
// --------------------------------------------------------------------
// Check for hi bits still needing moving. Only happens for misaligned
// arguments to native calls.
if (src_hi == dst_hi)
if (src_hi == dst_hi) {
return ppc64Opcode_none; // Self copy; no move.
}
ShouldNotReachHere();
return ppc64Opcode_undefined;
@ -1959,14 +1960,15 @@ void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *) const {
}
uint MachNopNode::size(PhaseRegAlloc *ra_) const {
return _count * 4;
return _count * 4;
}
#ifndef PRODUCT
void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
int reg = ra_->get_reg_first(this);
st->print("ADDI %s, SP, %d \t// box node", Matcher::regName[reg], offset);
char reg_str[128];
ra_->dump_register(this, reg_str);
st->print("ADDI %s, SP, %d \t// box node", reg_str, offset);
}
#endif

View File

@ -91,7 +91,7 @@ address TemplateInterpreterGenerator::generate_ClassCastException_verbose_handle
// Thread will be loaded to R3_ARG1.
// Target class oop is in register R5_ARG3 by convention!
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException_verbose, R17_tos, R5_ARG3));
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException_verbose), R17_tos, R5_ARG3);
// Above call must not return here since exception pending.
DEBUG_ONLY(__ should_not_reach_here();)
return entry;
@ -172,6 +172,10 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
// Compiled code destroys templateTableBase, reload.
__ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R12_scratch2);
if (state == atos) {
__ profile_return_type(R3_RET, R11_scratch1, R12_scratch2);
}
const Register cache = R11_scratch1;
const Register size = R12_scratch2;
__ get_cache_and_index_at_bcp(cache, 1, index_size);
@ -1189,6 +1193,10 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
__ li(R0, 1);
__ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread);
}
// Argument and return type profiling.
__ profile_parameters_type(R3_ARG1, R4_ARG2, R5_ARG3, R6_ARG4);
// Increment invocation counter and check for overflow.
if (inc_counter) {
generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue);
@ -1469,6 +1477,8 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
__ resize_frame_absolute(R12_scratch2, R11_scratch1, R0);
if (ProfileInterpreter) {
__ set_method_data_pointer_for_bcp();
__ ld(R11_scratch1, 0, R1_SP);
__ std(R28_mdx, _ijava_state_neg(mdx), R11_scratch1);
}
#if INCLUDE_JVMTI
Label L_done;
@ -1480,13 +1490,11 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
// Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
__ ld(R4_ARG2, 0, R18_locals);
__ call_VM(R11_scratch1, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null),
R4_ARG2, R19_method, R14_bcp);
__ cmpdi(CCR0, R11_scratch1, 0);
__ MacroAssembler::call_VM(R4_ARG2, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), R4_ARG2, R19_method, R14_bcp, false);
__ restore_interpreter_state(R11_scratch1, /*bcp_and_mdx_only*/ true);
__ cmpdi(CCR0, R4_ARG2, 0);
__ beq(CCR0, L_done);
__ std(R11_scratch1, wordSize, R15_esp);
__ std(R4_ARG2, wordSize, R15_esp);
__ bind(L_done);
#endif // INCLUDE_JVMTI
__ dispatch_next(vtos);

View File

@ -3235,6 +3235,8 @@ void TemplateTable::generate_vtable_call(Register Rrecv_klass, Register Rindex,
// Load target.
__ addi(Rrecv_klass, Rrecv_klass, base + vtableEntry::method_offset_in_bytes());
__ ldx(Rtarget_method, Rindex, Rrecv_klass);
// Argument and return type profiling.
__ profile_arguments_type(Rtarget_method, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */, true);
__ call_from_interpreter(Rtarget_method, Rret, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */);
}
@ -3318,6 +3320,8 @@ void TemplateTable::invokevfinal_helper(Register Rmethod, Register Rflags, Regis
__ null_check_throw(Rrecv, -1, Rscratch1);
__ profile_final_call(Rrecv, Rscratch1);
// Argument and return type profiling.
__ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true);
// Do the call.
__ call_from_interpreter(Rmethod, Rret_addr, Rscratch1, Rscratch2);
@ -3339,6 +3343,8 @@ void TemplateTable::invokespecial(int byte_no) {
__ null_check_throw(Rreceiver, -1, R11_scratch1);
__ profile_call(R11_scratch1, R12_scratch2);
// Argument and return type profiling.
__ profile_arguments_type(Rmethod, R11_scratch1, R12_scratch2, false);
__ call_from_interpreter(Rmethod, Rret_addr, R11_scratch1, R12_scratch2);
}
@ -3353,6 +3359,8 @@ void TemplateTable::invokestatic(int byte_no) {
prepare_invoke(byte_no, R19_method, Rret_addr, noreg, noreg, Rflags, R11_scratch1);
__ profile_call(R11_scratch1, R12_scratch2);
// Argument and return type profiling.
__ profile_arguments_type(R19_method, R11_scratch1, R12_scratch2, false);
__ call_from_interpreter(R19_method, Rret_addr, R11_scratch1, R12_scratch2);
}
@ -3374,6 +3382,8 @@ void TemplateTable::invokeinterface_object_method(Register Rrecv_klass,
// Final call case.
__ profile_final_call(Rtemp1, Rscratch);
// Argument and return type profiling.
__ profile_arguments_type(Rindex, Rscratch, Rrecv_klass /* scratch */, true);
// Do the final call - the index (f2) contains the method.
__ call_from_interpreter(Rindex, Rret, Rscratch, Rrecv_klass /* scratch */);
@ -3425,6 +3435,8 @@ void TemplateTable::invokeinterface(int byte_no) {
__ cmpdi(CCR0, Rindex, 0);
__ beq(CCR0, Lthrow_ame);
// Found entry. Jump off!
// Argument and return type profiling.
__ profile_arguments_type(Rindex, Rscratch1, Rscratch2, true);
__ call_from_interpreter(Rindex, Rret_addr, Rscratch1, Rscratch2);
// Vtable entry was NULL => Throw abstract method error.
@ -3468,6 +3480,8 @@ void TemplateTable::invokedynamic(int byte_no) {
// to be the callsite object the bootstrap method returned. This is passed to a
// "link" method which does the dispatch (Most likely just grabs the MH stored
// inside the callsite and does an invokehandle).
// Argument and return type profiling.
__ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, false);
__ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */);
}
@ -3488,6 +3502,8 @@ void TemplateTable::invokehandle(int byte_no) {
__ profile_final_call(Rrecv, Rscratch1);
// Still no call from handle => We call the method handle interpreter here.
// Argument and return type profiling.
__ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true);
__ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */);
}

View File

@ -134,13 +134,44 @@ void VM_Version::initialize() {
}
assert(AllocatePrefetchLines > 0, "invalid value");
if (AllocatePrefetchLines < 1) // Set valid value in product VM.
if (AllocatePrefetchLines < 1) { // Set valid value in product VM.
AllocatePrefetchLines = 1; // Conservative value.
}
if (AllocatePrefetchStyle == 3 && AllocatePrefetchDistance < cache_line_size)
if (AllocatePrefetchStyle == 3 && AllocatePrefetchDistance < cache_line_size) {
AllocatePrefetchStyle = 1; // Fall back if inappropriate.
}
assert(AllocatePrefetchStyle >= 0, "AllocatePrefetchStyle should be positive");
if (UseCRC32Intrinsics) {
if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics))
warning("CRC32 intrinsics are not available on this CPU");
FLAG_SET_DEFAULT(UseCRC32Intrinsics, false);
}
// The AES intrinsic stubs require AES instruction support.
if (UseAES) {
warning("AES instructions are not available on this CPU");
FLAG_SET_DEFAULT(UseAES, false);
}
if (UseAESIntrinsics) {
if (!FLAG_IS_DEFAULT(UseAESIntrinsics))
warning("AES intrinsics are not available on this CPU");
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
}
if (UseSHA) {
warning("SHA instructions are not available on this CPU");
FLAG_SET_DEFAULT(UseSHA, false);
}
if (UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics) {
warning("SHA intrinsics are not available on this CPU");
FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
}
}
void VM_Version::print_features() {

View File

@ -675,7 +675,7 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
case handle_exception_nofpu_id:
case handle_exception_id:
// At this point all registers MAY be live.
oop_map = save_live_registers(sasm, 1 /*thread*/, id == handle_exception_nofpu_id);
oop_map = save_live_registers(sasm, 1 /*thread*/, id != handle_exception_nofpu_id);
break;
case handle_exception_from_callee_id: {
// At this point all registers except exception oop (RAX) and
@ -748,7 +748,7 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
case handle_exception_nofpu_id:
case handle_exception_id:
// Restore the registers that were saved at the beginning.
restore_live_registers(sasm, id == handle_exception_nofpu_id);
restore_live_registers(sasm, id != handle_exception_nofpu_id);
break;
case handle_exception_from_callee_id:
// WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -45,7 +45,8 @@ void OSThread::pd_initialize() {
sigemptyset(&_caller_sigmask);
_startThread_lock = new Monitor(Mutex::event, "startThread_lock", true);
_startThread_lock = new Monitor(Mutex::event, "startThread_lock", true,
Monitor::_safepoint_check_never);
assert(_startThread_lock !=NULL, "check");
}

View File

@ -124,12 +124,6 @@ extern "C" {
}
#endif
// Excerpts from systemcfg.h definitions newer than AIX 5.3
#ifndef PV_7
# define PV_7 0x200000 // Power PC 7
# define PV_7_Compat 0x208000 // Power PC 7
#endif
#define MAX_PATH (2 * K)
// for timer info max values which include all bits
@ -140,17 +134,40 @@ extern "C" {
#define ERROR_MP_VMGETINFO_FAILED 102
#define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
// the semantics in this file are thus that codeptr_t is a *real code ptr*
// The semantics in this file are thus that codeptr_t is a *real code ptr*.
// This means that any function taking codeptr_t as arguments will assume
// a real codeptr and won't handle function descriptors (eg getFuncName),
// whereas functions taking address as args will deal with function
// descriptors (eg os::dll_address_to_library_name)
// descriptors (eg os::dll_address_to_library_name).
typedef unsigned int* codeptr_t;
// typedefs for stackslots, stack pointers, pointers to op codes
// Typedefs for stackslots, stack pointers, pointers to op codes.
typedef unsigned long stackslot_t;
typedef stackslot_t* stackptr_t;
// Excerpts from systemcfg.h definitions newer than AIX 5.3.
#ifndef PV_7
#define PV_7 0x200000 /* Power PC 7 */
#define PV_7_Compat 0x208000 /* Power PC 7 */
#endif
#ifndef PV_8
#define PV_8 0x300000 /* Power PC 8 */
#define PV_8_Compat 0x308000 /* Power PC 8 */
#endif
#define trcVerbose(fmt, ...) { /* PPC port */ \
if (Verbose) { \
fprintf(stderr, fmt, ##__VA_ARGS__); \
fputc('\n', stderr); fflush(stderr); \
} \
}
#define trc(fmt, ...) /* PPC port */
#define ERRBYE(s) { \
trcVerbose(s); \
return -1; \
}
// query dimensions of the stack of the calling thread
static void query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
@ -182,12 +199,12 @@ inline bool is_valid_codepointer(codeptr_t p) {
return true;
}
// macro to check a given stack pointer against given stack limits and to die if test fails
// Macro to check a given stack pointer against given stack limits and to die if test fails.
#define CHECK_STACK_PTR(sp, stack_base, stack_size) { \
guarantee(is_valid_stackpointer((stackptr_t)(sp), (stackptr_t)(stack_base), stack_size), "Stack Pointer Invalid"); \
}
// macro to check the current stack pointer against given stacklimits
// Macro to check the current stack pointer against given stacklimits.
#define CHECK_CURRENT_STACK_PTR(stack_base, stack_size) { \
address sp; \
sp = os::current_stack_pointer(); \
@ -221,7 +238,7 @@ static bool check_signals = true;
static pid_t _initial_pid = 0;
static int SR_signum = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
static sigset_t SR_sigset;
static pthread_mutex_t dl_mutex; // Used to protect dlsym() calls */
static pthread_mutex_t dl_mutex; // Used to protect dlsym() calls.
julong os::available_memory() {
return Aix::available_memory();
@ -253,7 +270,6 @@ bool os::getenv(const char* name, char* buf, int len) {
return false;
}
// Return true if user is running as root.
bool os::have_special_privileges() {
@ -284,8 +300,7 @@ static bool my_disclaim64(char* addr, size_t size) {
for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
//if (Verbose)
fprintf(stderr, "Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
trc("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
return false;
}
p += maxDisclaimSize;
@ -293,8 +308,7 @@ static bool my_disclaim64(char* addr, size_t size) {
if (lastDisclaimSize > 0) {
if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
//if (Verbose)
fprintf(stderr, "Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
trc("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
return false;
}
}
@ -334,11 +348,11 @@ pid_t os::Aix::gettid() {
void os::Aix::initialize_system_info() {
// get the number of online(logical) cpus instead of configured
// Get the number of online(logical) cpus instead of configured.
os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
assert(_processor_count > 0, "_processor_count must be > 0");
// retrieve total physical storage
// Retrieve total physical storage.
os::Aix::meminfo_t mi;
if (!os::Aix::get_meminfo(&mi)) {
fprintf(stderr, "os::Aix::get_meminfo failed.\n"); fflush(stderr);
@ -513,7 +527,6 @@ query_multipage_support_end:
} // end os::Aix::query_multipage_support()
// The code for this method was initially derived from the version in os_linux.cpp.
void os::init_system_properties_values() {
#define DEFAULT_LIBPATH "/usr/lib:/lib"
@ -603,10 +616,11 @@ bool os::Aix::is_sig_ignored(int sig) {
sigaction(sig, (struct sigaction*)NULL, &oact);
void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
: CAST_FROM_FN_PTR(void*, oact.sa_handler);
if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
return true;
else
} else {
return false;
}
}
void os::Aix::signal_sets_init() {
@ -780,6 +794,9 @@ bool os::Aix::get_cpuinfo(cpuinfo_t* pci) {
// get the processor version from _system_configuration
switch (_system_configuration.version) {
case PV_8:
strcpy(pci->version, "Power PC 8");
break;
case PV_7:
strcpy(pci->version, "Power PC 7");
break;
@ -807,6 +824,9 @@ bool os::Aix::get_cpuinfo(cpuinfo_t* pci) {
case PV_7_Compat:
strcpy(pci->version, "PV_7_Compat");
break;
case PV_8_Compat:
strcpy(pci->version, "PV_8_Compat");
break;
default:
strcpy(pci->version, "unknown");
}
@ -942,7 +962,9 @@ bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
pthread_attr_destroy(&attr);
if (ret != 0) {
if (ret == 0) {
// PPC port traceOsMisc(("Created New Thread : pthread-id %u", tid));
} else {
if (PrintMiscellaneous && (Verbose || WizardMode)) {
perror("pthread_create()");
}
@ -1103,8 +1125,7 @@ jlong os::javaTimeNanos() {
if (os::Aix::on_pase()) {
Unimplemented();
return 0;
}
else {
} else {
// On AIX use the precision of processors real time clock
// or time base registers.
timebasestruct_t time;
@ -1152,7 +1173,6 @@ bool os::getTimesSecs(double* process_real_time,
}
}
char * os::local_time_string(char *buf, size_t buflen) {
struct tm t;
time_t long_time;
@ -1190,7 +1210,6 @@ void os::shutdown() {
if (abort_hook != NULL) {
abort_hook();
}
}
// Note: os::abort() might be called very early during initialization, or
@ -1222,8 +1241,7 @@ void os::die() {
// from src/solaris/hpi/src/system_md.c
size_t os::lasterror(char *buf, size_t len) {
if (errno == 0) return 0;
if (errno == 0) return 0;
const char *s = ::strerror(errno);
size_t n = ::strlen(s);
@ -1236,6 +1254,7 @@ size_t os::lasterror(char *buf, size_t len) {
}
intx os::current_thread_id() { return (intx)pthread_self(); }
int os::current_process_id() {
// This implementation returns a unique pid, the pid of the
@ -1372,9 +1391,9 @@ bool os::dll_address_to_function_name(address addr, char *buf,
if (offset) {
*offset = -1;
}
if (buf) {
buf[0] = '\0';
}
// Buf is not optional, but offset is optional.
assert(buf != NULL, "sanity check");
buf[0] = '\0';
// Resolve function ptr literals first.
addr = resolve_function_descriptor_to_code_pointer(addr);
@ -1407,12 +1426,9 @@ static int getModuleName(codeptr_t pc, // [in] program counte
return 0;
}
if (Verbose) {
fprintf(stderr, "pc outside any module");
}
trcVerbose("pc outside any module");
return -1;
}
bool os::dll_address_to_library_name(address addr, char* buf,
@ -1420,9 +1436,9 @@ bool os::dll_address_to_library_name(address addr, char* buf,
if (offset) {
*offset = -1;
}
if (buf) {
buf[0] = '\0';
}
// Buf is not optional, but offset is optional.
assert(buf != NULL, "sanity check");
buf[0] = '\0';
// Resolve function ptr literals first.
addr = resolve_function_descriptor_to_code_pointer(addr);
@ -1437,7 +1453,7 @@ bool os::dll_address_to_library_name(address addr, char* buf,
}
// Loads .dll/.so and in case of error it checks if .dll/.so was built
// for the same architecture as Hotspot is running on
// for the same architecture as Hotspot is running on.
void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
if (ebuf && ebuflen > 0) {
@ -1600,7 +1616,6 @@ void os::print_siginfo(outputStream* st, void* siginfo) {
st->cr();
}
static void print_signal_handler(outputStream* st, int sig,
char* buf, size_t buflen);
@ -1624,7 +1639,7 @@ void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
static char saved_jvm_path[MAXPATHLEN] = {0};
// Find the full path to the current module, libjvm.so or libjvm_g.so
// Find the full path to the current module, libjvm.so.
void os::jvm_path(char *buf, jint buflen) {
// Error checking.
if (buflen < MAXPATHLEN) {
@ -1695,7 +1710,7 @@ void* os::signal(int signal_number, void* handler) {
// Do not block out synchronous signals in the signal handler.
// Blocking synchronous signals only makes sense if you can really
// be sure that those signals won't happen during signal handling,
// when the blocking applies. Normal signal handlers are lean and
// when the blocking applies. Normal signal handlers are lean and
// do not cause signals. But our signal handlers tend to be "risky"
// - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen.
// On AIX, PASE there was a case where a SIGSEGV happened, followed
@ -2861,13 +2876,9 @@ OSReturn os::set_native_priority(Thread* thread, int newpri) {
param.sched_priority = newpri;
int ret = pthread_setschedparam(thr, policy, &param);
if (Verbose) {
if (ret == 0) {
fprintf(stderr, "changed priority of thread %d to %d\n", (int)thr, newpri);
} else {
fprintf(stderr, "Could not changed priority for thread %d to %d (error %d, %s)\n",
(int)thr, newpri, ret, strerror(ret));
}
if (ret != 0) {
trcVerbose("Could not change priority for thread %d to %d (error %d, %s)",
(int)thr, newpri, ret, strerror(ret));
}
return (ret == 0) ? OS_OK : OS_ERR;
}
@ -2988,7 +2999,6 @@ static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
errno = old_errno;
}
static int SR_initialize() {
struct sigaction act;
char *s;
@ -3187,7 +3197,6 @@ void javaSignalHandler(int sig, siginfo_t* info, void* uc) {
JVM_handle_aix_signal(sig, info, uc, true);
}
// This boolean allows users to forward their own non-matching signals
// to JVM_handle_aix_signal, harmlessly.
bool os::Aix::signal_handlers_are_installed = false;
@ -3381,7 +3390,7 @@ void os::Aix::install_signal_handlers() {
set_signal_handler(SIGDANGER, true);
if (libjsig_is_loaded) {
// Tell libjsig jvm finishes setting signal handlers
// Tell libjsig jvm finishes setting signal handlers.
(*end_signal_setting)();
}
@ -3397,7 +3406,7 @@ void os::Aix::install_signal_handlers() {
tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
check_signals = false;
}
// need to initialize check_signal_done
// Need to initialize check_signal_done.
::sigemptyset(&check_signal_done);
}
}
@ -3471,7 +3480,6 @@ static void print_signal_handler(outputStream* st, int sig,
st->cr();
}
#define DO_SIGNAL_CHECK(sig) \
if (!sigismember(&check_signal_done, sig)) \
os::Aix::check_signal_handler(sig)
@ -3532,7 +3540,6 @@ void os::Aix::check_signal_handler(int sig) {
? CAST_FROM_FN_PTR(address, act.sa_sigaction)
: CAST_FROM_FN_PTR(address, act.sa_handler);
switch(sig) {
case SIGSEGV:
case SIGBUS:
@ -3685,15 +3692,13 @@ void os::init(void) {
pthread_mutex_init(&dl_mutex, NULL);
}
// this is called _after_ the global arguments have been parsed
// This is called _after_ the global arguments have been parsed.
jint os::init_2(void) {
if (Verbose) {
fprintf(stderr, "processor count: %d\n", os::_processor_count);
fprintf(stderr, "physical memory: %lu\n", Aix::_physical_memory);
}
trcVerbose("processor count: %d", os::_processor_count);
trcVerbose("physical memory: %lu", Aix::_physical_memory);
// initially build up the loaded dll map
// Initially build up the loaded dll map.
LoadedLibraries::reload();
const int page_size = Aix::page_size();
@ -3743,7 +3748,7 @@ jint os::init_2(void) {
}
if (map_address != (address) MAP_FAILED) {
// map succeeded, but polling_page is not at wished address, unmap and continue.
// Map succeeded, but polling_page is not at wished address, unmap and continue.
::munmap(map_address, map_size);
map_address = (address) MAP_FAILED;
}
@ -3797,7 +3802,7 @@ jint os::init_2(void) {
// Make the stack size a multiple of the page size so that
// the yellow/red zones can be guarded.
// note that this can be 0, if no default stacksize was set
// Note that this can be 0, if no default stacksize was set.
JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size()));
Aix::libpthread_init();
@ -4088,7 +4093,6 @@ int os::open(const char *path, int oflag, int mode) {
return fd;
}
// create binary file, rewriting existing file if required
int os::create_binary_file(const char* path, bool rewrite_existing) {
int oflags = O_WRONLY | O_CREAT;
@ -4169,7 +4173,6 @@ char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
return mapped_address;
}
// Remap a block of memory.
char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
char *addr, size_t bytes, bool read_only,
@ -4217,14 +4220,14 @@ static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong*
jlong sys_time = 0;
jlong user_time = 0;
// reimplemented using getthrds64().
// Reimplemented using getthrds64().
//
// goes like this:
// Works like this:
// For the thread in question, get the kernel thread id. Then get the
// kernel thread statistics using that id.
//
// This only works of course when no pthread scheduling is used,
// ie there is a 1:1 relationship to kernel threads.
// i.e. there is a 1:1 relationship to kernel threads.
// On AIX, see AIXTHREAD_SCOPE variable.
pthread_t pthtid = thread->osthread()->pthread_id();
@ -4371,14 +4374,12 @@ void os::Aix::initialize_os_info() {
memset(&uts, 0, sizeof(uts));
strcpy(uts.sysname, "?");
if (::uname(&uts) == -1) {
fprintf(stderr, "uname failed (%d)\n", errno);
trc("uname failed (%d)", errno);
guarantee(0, "Could not determine whether we run on AIX or PASE");
} else {
if (Verbose) {
fprintf(stderr,"uname says: sysname \"%s\" version \"%s\" release \"%s\" "
"node \"%s\" machine \"%s\"\n",
uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
}
trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" "
"node \"%s\" machine \"%s\"\n",
uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
const int major = atoi(uts.version);
assert(major > 0, "invalid OS version");
const int minor = atoi(uts.release);
@ -4390,12 +4391,10 @@ void os::Aix::initialize_os_info() {
// We run on AIX. We do not support versions older than AIX 5.3.
_on_pase = 0;
if (_os_version < 0x0503) {
fprintf(stderr, "AIX release older than AIX 5.3 not supported.\n");
trc("AIX release older than AIX 5.3 not supported.");
assert(false, "AIX release too old.");
} else {
if (Verbose) {
fprintf(stderr, "We run on AIX %d.%d\n", major, minor);
}
trcVerbose("We run on AIX %d.%d\n", major, minor);
}
} else {
assert(false, "unknown OS");
@ -4403,7 +4402,6 @@ void os::Aix::initialize_os_info() {
}
guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
} // end: os::Aix::initialize_os_info()
// Scan environment for important settings which might effect the VM.
@ -4441,12 +4439,10 @@ void os::Aix::scan_environment() {
// Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
// exec() ? before loading the libjvm ? ....)
p = ::getenv("XPG_SUS_ENV");
if (Verbose) {
fprintf(stderr, "XPG_SUS_ENV=%s.\n", p ? p : "<unset>");
}
trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>");
if (p && strcmp(p, "ON") == 0) {
_xpg_sus_mode = 1;
fprintf(stderr, "Unsupported setting: XPG_SUS_ENV=ON\n");
trc("Unsupported setting: XPG_SUS_ENV=ON");
// This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
// clobber address ranges. If we ever want to support that, we have to do some
// testing first.
@ -4458,10 +4454,7 @@ void os::Aix::scan_environment() {
// Switch off AIX internal (pthread) guard pages. This has
// immediate effect for any pthread_create calls which follow.
p = ::getenv("AIXTHREAD_GUARDPAGES");
if (Verbose) {
fprintf(stderr, "AIXTHREAD_GUARDPAGES=%s.\n", p ? p : "<unset>");
fprintf(stderr, "setting AIXTHREAD_GUARDPAGES=0.\n");
}
trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>");
rc = ::putenv("AIXTHREAD_GUARDPAGES=0");
guarantee(rc == 0, "");
@ -4479,7 +4472,7 @@ void os::Aix::initialize_libperfstat() {
assert(os::Aix::on_aix(), "AIX only");
if (!libperfstat::init()) {
fprintf(stderr, "libperfstat initialization failed.\n");
trc("libperfstat initialization failed.");
assert(false, "libperfstat initialization failed");
} else {
if (Verbose) {
@ -4651,7 +4644,6 @@ static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
return abstime;
}
// Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
// Conceptually TryPark() should be equivalent to park(0).
@ -4732,7 +4724,7 @@ int os::PlatformEvent::park(jlong millis) {
while (_Event < 0) {
status = pthread_cond_timedwait(_cond, _mutex, &abst);
assert_status(status == 0 || status == ETIMEDOUT,
status, "cond_timedwait");
status, "cond_timedwait");
if (!FilterSpuriousWakeups) break; // previous semantics
if (status == ETIMEDOUT) break;
// We consume and ignore EINTR and spurious wakeups.
@ -4866,9 +4858,9 @@ void Parker::park(bool isAbsolute, jlong time) {
// Optional fast-path check:
// Return immediately if a permit is available.
if (_counter > 0) {
_counter = 0;
OrderAccess::fence();
return;
_counter = 0;
OrderAccess::fence();
return;
}
Thread* thread = Thread::current();
@ -4890,7 +4882,6 @@ void Parker::park(bool isAbsolute, jlong time) {
unpackTime(&absTime, isAbsolute, time);
}
// Enter safepoint region
// Beware of deadlocks such as 6317397.
// The per-thread Parker:: mutex is a classic leaf-lock.
@ -4978,7 +4969,6 @@ void Parker::unpark() {
}
}
extern char** environ;
// Run the specified command in a separate process. Return its exit value,
@ -4997,44 +4987,43 @@ int os::fork_and_exec(char* cmd) {
} else if (pid == 0) {
// child process
// try to be consistent with system(), which uses "/usr/bin/sh" on AIX
// Try to be consistent with system(), which uses "/usr/bin/sh" on AIX.
execve("/usr/bin/sh", argv, environ);
// execve failed
_exit(-1);
} else {
} else {
// copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
// care about the actual exit code, for now.
int status;
// Wait for the child process to exit. This returns immediately if
// Wait for the child process to exit. This returns immediately if
// the child has already exited. */
while (waitpid(pid, &status, 0) < 0) {
switch (errno) {
switch (errno) {
case ECHILD: return 0;
case EINTR: break;
default: return -1;
}
}
}
if (WIFEXITED(status)) {
// The child exited normally; get its exit code.
return WEXITSTATUS(status);
// The child exited normally; get its exit code.
return WEXITSTATUS(status);
} else if (WIFSIGNALED(status)) {
// The child exited because of a signal
// The best value to return is 0x80 + signal number,
// because that is what all Unix shells do, and because
// it allows callers to distinguish between process exit and
// process death by signal.
return 0x80 + WTERMSIG(status);
// The child exited because of a signal.
// The best value to return is 0x80 + signal number,
// because that is what all Unix shells do, and because
// it allows callers to distinguish between process exit and
// process death by signal.
return 0x80 + WTERMSIG(status);
} else {
// Unknown exit code; pass it through
return status;
// Unknown exit code; pass it through.
return status;
}
}
// Remove warning.
return -1;
}
@ -5049,7 +5038,7 @@ bool os::is_headless_jre() {
struct stat statbuf;
char buf[MAXPATHLEN];
char libmawtpath[MAXPATHLEN];
const char *xawtstr = "/xawt/libmawt.so";
const char *xawtstr = "/xawt/libmawt.so";
const char *new_xawtstr = "/libawt_xawt.so";
char *p;
@ -5090,6 +5079,9 @@ int os::get_core_path(char* buffer, size_t bufferSize) {
return 0;
}
jio_snprintf(buffer, bufferSize, "%s/core or core.%d",
p, current_process_id());
return strlen(buffer);
}

View File

@ -209,7 +209,7 @@ class Aix {
return _can_use_16M_pages == 1 ? true : false;
}
static address ucontext_get_pc(ucontext_t* uc);
static address ucontext_get_pc(const ucontext_t* uc);
static intptr_t* ucontext_get_sp(ucontext_t* uc);
static intptr_t* ucontext_get_fp(ucontext_t* uc);
// Set PC into context. Needed for continuation after signal.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -43,7 +43,8 @@ void OSThread::pd_initialize() {
sigemptyset(&_caller_sigmask);
_startThread_lock = new Monitor(Mutex::event, "startThread_lock", true);
_startThread_lock = new Monitor(Mutex::event, "startThread_lock", true,
Monitor::_safepoint_check_never);
assert(_startThread_lock !=NULL, "check");
}

View File

@ -4673,7 +4673,7 @@ bool os::is_headless_jre() {
// Get the default path to the core file
// Returns the length of the string
int os::get_core_path(char* buffer, size_t bufferSize) {
int n = jio_snprintf(buffer, bufferSize, "/cores");
int n = jio_snprintf(buffer, bufferSize, "/cores/core.%d", current_process_id());
// Truncate if theoretical string was longer than bufferSize
n = MIN2(n, (int)bufferSize);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -39,7 +39,8 @@ void OSThread::pd_initialize() {
sigemptyset(&_caller_sigmask);
_startThread_lock = new Monitor(Mutex::event, "startThread_lock", true);
_startThread_lock = new Monitor(Mutex::event, "startThread_lock", true,
Monitor::_safepoint_check_never);
assert(_startThread_lock !=NULL, "check");
}

View File

@ -5988,13 +5988,70 @@ bool os::is_headless_jre() {
// Get the default path to the core file
// Returns the length of the string
int os::get_core_path(char* buffer, size_t bufferSize) {
const char* p = get_current_directory(buffer, bufferSize);
/*
* Max length of /proc/sys/kernel/core_pattern is 128 characters.
* See https://www.kernel.org/doc/Documentation/sysctl/kernel.txt
*/
const int core_pattern_len = 129;
char core_pattern[core_pattern_len] = {0};
if (p == NULL) {
assert(p != NULL, "failed to get current directory");
int core_pattern_file = ::open("/proc/sys/kernel/core_pattern", O_RDONLY);
if (core_pattern_file != -1) {
ssize_t ret = ::read(core_pattern_file, core_pattern, core_pattern_len);
::close(core_pattern_file);
if (ret > 0) {
char *last_char = core_pattern + strlen(core_pattern) - 1;
if (*last_char == '\n') {
*last_char = '\0';
}
}
}
if (strlen(core_pattern) == 0) {
return 0;
}
char *pid_pos = strstr(core_pattern, "%p");
size_t written;
if (core_pattern[0] == '/') {
written = jio_snprintf(buffer, bufferSize, core_pattern);
} else {
char cwd[PATH_MAX];
const char* p = get_current_directory(cwd, PATH_MAX);
if (p == NULL) {
assert(p != NULL, "failed to get current directory");
return 0;
}
if (core_pattern[0] == '|') {
written = jio_snprintf(buffer, bufferSize,
"\"%s\" (or dumping to %s/core.%d)",
&core_pattern[1], p, current_process_id());
} else {
written = jio_snprintf(buffer, bufferSize, "%s/%s", p, core_pattern);
}
}
if ((written >= 0) && (written < bufferSize)
&& (pid_pos == NULL) && (core_pattern[0] != '|')) {
int core_uses_pid_file = ::open("/proc/sys/kernel/core_uses_pid", O_RDONLY);
if (core_uses_pid_file != -1) {
char core_uses_pid = 0;
ssize_t ret = ::read(core_uses_pid_file, &core_uses_pid, 1);
::close(core_uses_pid_file);
if (core_uses_pid == '1'){
jio_snprintf(buffer + written, bufferSize - written,
".%d", current_process_id());
}
}
}
return strlen(buffer);
}

View File

@ -51,15 +51,24 @@ void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char*
struct rlimit rlim;
bool success;
n = get_core_path(buffer, bufferSize);
char core_path[PATH_MAX];
n = get_core_path(core_path, PATH_MAX);
if (getrlimit(RLIMIT_CORE, &rlim) != 0) {
jio_snprintf(buffer + n, bufferSize - n, "/core or core.%d (may not exist)", current_process_id());
if (n <= 0) {
jio_snprintf(buffer, bufferSize, "core.%d (may not exist)", current_process_id());
success = true;
#ifdef LINUX
} else if (core_path[0] == '"') { // redirect to user process
jio_snprintf(buffer, bufferSize, "Core dumps may be processed with %s", core_path);
success = true;
#endif
} else if (getrlimit(RLIMIT_CORE, &rlim) != 0) {
jio_snprintf(buffer, bufferSize, "%s (may not exist)", core_path);
success = true;
} else {
switch(rlim.rlim_cur) {
case RLIM_INFINITY:
jio_snprintf(buffer + n, bufferSize - n, "/core or core.%d", current_process_id());
jio_snprintf(buffer, bufferSize, "%s", core_path);
success = true;
break;
case 0:
@ -67,11 +76,12 @@ void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char*
success = false;
break;
default:
jio_snprintf(buffer + n, bufferSize - n, "/core or core.%d (max size %lu kB). To ensure a full core dump, try \"ulimit -c unlimited\" before starting Java again", current_process_id(), (unsigned long)(rlim.rlim_cur >> 10));
jio_snprintf(buffer, bufferSize, "%s (max size %lu kB). To ensure a full core dump, try \"ulimit -c unlimited\" before starting Java again", core_path, (unsigned long)(rlim.rlim_cur >> 10));
success = true;
break;
}
}
VMError::report_coredump_status(buffer, success);
}
@ -89,8 +99,8 @@ int os::get_native_stack(address* stack, int frames, int toSkip) {
} else {
stack[frame_idx ++] = fr.pc();
}
if (fr.fp() == NULL || os::is_first_C_frame(&fr)
||fr.sender_pc() == NULL || fr.cb() != NULL) break;
if (fr.fp() == NULL || fr.cb() != NULL ||
fr.sender_pc() == NULL || os::is_first_C_frame(&fr)) break;
if (fr.sender_pc() && !os::is_first_C_frame(&fr)) {
fr = os::get_sender_for_C_frame(&fr);

View File

@ -5979,6 +5979,9 @@ int os::get_core_path(char* buffer, size_t bufferSize) {
return 0;
}
jio_snprintf(buffer, bufferSize, "%s/core or core.%d",
p, current_process_id());
return strlen(buffer);
}

View File

@ -3074,7 +3074,7 @@ char* os::reserve_memory_aligned(size_t size, size_t alignment) {
char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
assert((size_t)addr % os::vm_allocation_granularity() == 0,
"reserve alignment");
assert(bytes % os::vm_allocation_granularity() == 0, "reserve block size");
assert(bytes % os::vm_page_size() == 0, "reserve page size");
char* res;
// note that if UseLargePages is on, all the areas that require interleaving
// will go thru reserve_memory_special rather than thru here.
@ -3768,7 +3768,6 @@ HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf,
return NULL;
}
#define MAX_EXIT_HANDLES PRODUCT_ONLY(32) NOT_PRODUCT(128)
#define EXIT_TIMEOUT PRODUCT_ONLY(1000) NOT_PRODUCT(4000) /* 1 sec in product, 4 sec in debug */
static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) {
@ -3787,7 +3786,7 @@ int os::win32::exit_process_or_thread(Ept what, int exit_code) {
// _endthreadex().
// Should be large enough to avoid blocking the exiting thread due to lack of
// a free slot.
static HANDLE handles[MAX_EXIT_HANDLES];
static HANDLE handles[MAXIMUM_WAIT_OBJECTS];
static int handle_count = 0;
static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT;
@ -3809,32 +3808,34 @@ int os::win32::exit_process_or_thread(Ept what, int exit_code) {
if (res == WAIT_TIMEOUT) {
handles[j++] = handles[i];
} else {
if (res != WAIT_OBJECT_0) {
warning("WaitForSingleObject failed in %s: %d\n", __FILE__, __LINE__);
// Don't keep the handle, if we failed waiting for it.
if (res == WAIT_FAILED) {
warning("WaitForSingleObject failed (%u) in %s: %d\n",
GetLastError(), __FILE__, __LINE__);
}
// Don't keep the handle, if we failed waiting for it.
CloseHandle(handles[i]);
}
}
// If there's no free slot in the array of the kept handles, we'll have to
// wait until at least one thread completes exiting.
if ((handle_count = j) == MAX_EXIT_HANDLES) {
if ((handle_count = j) == MAXIMUM_WAIT_OBJECTS) {
// Raise the priority of the oldest exiting thread to increase its chances
// to complete sooner.
SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL);
res = WaitForMultipleObjects(MAX_EXIT_HANDLES, handles, FALSE, EXIT_TIMEOUT);
if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAX_EXIT_HANDLES)) {
res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT);
if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) {
i = (res - WAIT_OBJECT_0);
handle_count = MAX_EXIT_HANDLES - 1;
handle_count = MAXIMUM_WAIT_OBJECTS - 1;
for (; i < handle_count; ++i) {
handles[i] = handles[i + 1];
}
} else {
warning("WaitForMultipleObjects %s in %s: %d\n",
(res == WAIT_FAILED ? "failed" : "timed out"), __FILE__, __LINE__);
warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
(res == WAIT_FAILED ? "failed" : "timed out"),
GetLastError(), __FILE__, __LINE__);
// Don't keep handles, if we failed waiting for them.
for (i = 0; i < MAX_EXIT_HANDLES; ++i) {
for (i = 0; i < MAXIMUM_WAIT_OBJECTS; ++i) {
CloseHandle(handles[i]);
}
handle_count = 0;
@ -3846,7 +3847,8 @@ int os::win32::exit_process_or_thread(Ept what, int exit_code) {
hthr = GetCurrentThread();
if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count],
0, FALSE, DUPLICATE_SAME_ACCESS)) {
warning("DuplicateHandle failed in %s: %d\n", __FILE__, __LINE__);
warning("DuplicateHandle failed (%u) in %s: %d\n",
GetLastError(), __FILE__, __LINE__);
} else {
++handle_count;
}
@ -3869,9 +3871,10 @@ int os::win32::exit_process_or_thread(Ept what, int exit_code) {
SetThreadPriority(handles[i], THREAD_PRIORITY_ABOVE_NORMAL);
}
res = WaitForMultipleObjects(handle_count, handles, TRUE, EXIT_TIMEOUT);
if (res < WAIT_OBJECT_0 || res >= (WAIT_OBJECT_0 + MAX_EXIT_HANDLES)) {
warning("WaitForMultipleObjects %s in %s: %d\n",
(res == WAIT_FAILED ? "failed" : "timed out"), __FILE__, __LINE__);
if (res == WAIT_FAILED || res == WAIT_TIMEOUT) {
warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
(res == WAIT_FAILED ? "failed" : "timed out"),
GetLastError(), __FILE__, __LINE__);
}
for (i = 0; i < handle_count; ++i) {
CloseHandle(handles[i]);
@ -3909,7 +3912,6 @@ int os::win32::exit_process_or_thread(Ept what, int exit_code) {
return exit_code;
}
#undef MAX_EXIT_HANDLES
#undef EXIT_TIMEOUT
void os::win32::setmode_streams() {

View File

@ -91,8 +91,9 @@ void os::initialize_thread(Thread *thread) { }
// Frame information (pc, sp, fp) retrieved via ucontext
// always looks like a C-frame according to the frame
// conventions in frame_ppc64.hpp.
address os::Aix::ucontext_get_pc(ucontext_t * uc) {
// conventions in frame_ppc.hpp.
address os::Aix::ucontext_get_pc(const ucontext_t * uc) {
return (address)uc->uc_mcontext.jmp_context.iar;
}
@ -486,7 +487,7 @@ void os::Aix::init_thread_fpu_state(void) {
////////////////////////////////////////////////////////////////////////////////
// thread stack
size_t os::Aix::min_stack_allowed = 768*K;
size_t os::Aix::min_stack_allowed = 128*K;
// Aix is always in floating stack mode. The stack size for a new
// thread can be set via pthread_attr_setstacksize().
@ -499,7 +500,7 @@ size_t os::Aix::default_stack_size(os::ThreadType thr_type) {
// because of the strange 'fallback logic' in os::create_thread().
// Better set CompilerThreadStackSize in globals_<os_cpu>.hpp if you want to
// specify a different stack size for compiler threads!
size_t s = (thr_type == os::compiler_thread ? 4 * M : 1024 * K);
size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
return s;
}

View File

@ -23,8 +23,8 @@
*
*/
#ifndef OS_CPU_AIX_OJDKPPC_VM_OS_AIX_PPC_HPP
#define OS_CPU_AIX_OJDKPPC_VM_OS_AIX_PPC_HPP
#ifndef OS_CPU_AIX_PPC_VM_OS_AIX_PPC_HPP
#define OS_CPU_AIX_PPC_VM_OS_AIX_PPC_HPP
static void setup_fpu() {}
@ -32,4 +32,4 @@
// Note: Currently only used in 64 bit Windows implementations
static bool register_code_area(char *low, char *high) { return true; }
#endif // OS_CPU_AIX_OJDKPPC_VM_OS_AIX_PPC_HPP
#endif // OS_CPU_AIX_PPC_VM_OS_AIX_PPC_HPP

View File

@ -23,8 +23,8 @@
*
*/
#ifndef OS_CPU_AIX_PPC_64_VM_PREFETCH_AIX_PPC_64_INLINE_HPP
#define OS_CPU_AIX_PPC_64_VM_PREFETCH_AIX_PPC_64_INLINE_HPP
#ifndef OS_CPU_AIX_PPC_VM_PREFETCH_AIX_PPC_INLINE_HPP
#define OS_CPU_AIX_PPC_VM_PREFETCH_AIX_PPC_INLINE_HPP
#include "runtime/prefetch.hpp"
@ -55,4 +55,4 @@ inline void Prefetch::write(void *loc, intx interval) {
#endif
}
#endif // OS_CPU_AIX_PPC_64_VM_PREFETCH_AIX_PPC_64_INLINE_HPP
#endif // OS_CPU_AIX_PPC_VM_PREFETCH_AIX_PPC_INLINE_HPP

View File

@ -23,8 +23,8 @@
*
*/
#ifndef OS_CPU_AIX_OJDKPPC_VM_THREADLS_AIX_PPC_HPP
#define OS_CPU_AIX_OJDKPPC_VM_THREADLS_AIX_PPC_HPP
#ifndef OS_CPU_AIX_PPC_VM_THREADLS_AIX_PPC_HPP
#define OS_CPU_AIX_PPC_VM_THREADLS_AIX_PPC_HPP
// Processor dependent parts of ThreadLocalStorage
@ -33,4 +33,4 @@ public:
return (Thread *) os::thread_local_storage_at(thread_index());
}
#endif // OS_CPU_AIX_OJDKPPC_VM_THREADLS_AIX_PPC_HPP
#endif // OS_CPU_AIX_PPC_VM_THREADLS_AIX_PPC_HPP

View File

@ -23,8 +23,8 @@
*
*/
#ifndef OS_CPU_AIX_OJDKPPC_VM_THREAD_AIX_PPC_HPP
#define OS_CPU_AIX_OJDKPPC_VM_THREAD_AIX_PPC_HPP
#ifndef OS_CPU_AIX_PPC_VM_THREAD_AIX_PPC_HPP
#define OS_CPU_AIX_PPC_VM_THREAD_AIX_PPC_HPP
private:
void pd_initialize() {
@ -76,4 +76,4 @@
intptr_t* last_interpreter_fp() { return _last_interpreter_fp; }
#endif // OS_CPU_AIX_OJDKPPC_VM_THREAD_AIX_PPC_HPP
#endif // OS_CPU_AIX_PPC_VM_THREAD_AIX_PPC_HPP

View File

@ -453,7 +453,7 @@ void os::Linux::set_fpu_control_word(int fpu_control) {
////////////////////////////////////////////////////////////////////////////////
// thread stack
size_t os::Linux::min_stack_allowed = 768*K;
size_t os::Linux::min_stack_allowed = 128*K;
bool os::Linux::supports_variable_stack_size() { return true; }

View File

@ -3108,21 +3108,39 @@ void ClassFileParser::apply_parsed_class_attributes(instanceKlassHandle k) {
}
}
// Transfer ownership of metadata allocated to the InstanceKlass.
void ClassFileParser::apply_parsed_class_metadata(
instanceKlassHandle this_klass,
int java_fields_count, TRAPS) {
// Assign annotations if needed
if (_annotations != NULL || _type_annotations != NULL ||
_fields_annotations != NULL || _fields_type_annotations != NULL) {
// Create the Annotations object that will
// hold the annotations array for the Klass.
void ClassFileParser::create_combined_annotations(TRAPS) {
if (_annotations == NULL &&
_type_annotations == NULL &&
_fields_annotations == NULL &&
_fields_type_annotations == NULL) {
// Don't create the Annotations object unnecessarily.
return;
}
Annotations* annotations = Annotations::allocate(_loader_data, CHECK);
annotations->set_class_annotations(_annotations);
annotations->set_class_type_annotations(_type_annotations);
annotations->set_fields_annotations(_fields_annotations);
annotations->set_fields_type_annotations(_fields_type_annotations);
this_klass->set_annotations(annotations);
}
// This is the Annotations object that will be
// assigned to InstanceKlass being constructed.
_combined_annotations = annotations;
// The annotations arrays below has been transfered the
// _combined_annotations so these fields can now be cleared.
_annotations = NULL;
_type_annotations = NULL;
_fields_annotations = NULL;
_fields_type_annotations = NULL;
}
// Transfer ownership of metadata allocated to the InstanceKlass.
void ClassFileParser::apply_parsed_class_metadata(
instanceKlassHandle this_klass,
int java_fields_count, TRAPS) {
_cp->set_pool_holder(this_klass());
this_klass->set_constants(_cp);
this_klass->set_fields(_fields, java_fields_count);
@ -3130,6 +3148,7 @@ void ClassFileParser::apply_parsed_class_metadata(
this_klass->set_inner_classes(_inner_classes);
this_klass->set_local_interfaces(_local_interfaces);
this_klass->set_transitive_interfaces(_transitive_interfaces);
this_klass->set_annotations(_combined_annotations);
// Clear out these fields so they don't get deallocated by the destructor
clear_class_metadata();
@ -4002,6 +4021,10 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
ClassAnnotationCollector parsed_annotations;
parse_classfile_attributes(&parsed_annotations, CHECK_(nullHandle));
// Finalize the Annotations metadata object,
// now that all annotation arrays have been created.
create_combined_annotations(CHECK_(nullHandle));
// Make sure this is the end of class file stream
guarantee_property(cfs->at_eos(), "Extra bytes at the end of class file %s", CHECK_(nullHandle));
@ -4302,10 +4325,27 @@ ClassFileParser::~ClassFileParser() {
InstanceKlass::deallocate_interfaces(_loader_data, _super_klass(),
_local_interfaces, _transitive_interfaces);
MetadataFactory::free_array<u1>(_loader_data, _annotations);
MetadataFactory::free_array<u1>(_loader_data, _type_annotations);
Annotations::free_contents(_loader_data, _fields_annotations);
Annotations::free_contents(_loader_data, _fields_type_annotations);
if (_combined_annotations != NULL) {
// After all annotations arrays have been created, they are installed into the
// Annotations object that will be assigned to the InstanceKlass being created.
// Deallocate the Annotations object and the installed annotations arrays.
_combined_annotations->deallocate_contents(_loader_data);
// If the _combined_annotations pointer is non-NULL,
// then the other annotations fields should have been cleared.
assert(_annotations == NULL, "Should have been cleared");
assert(_type_annotations == NULL, "Should have been cleared");
assert(_fields_annotations == NULL, "Should have been cleared");
assert(_fields_type_annotations == NULL, "Should have been cleared");
} else {
// If the annotations arrays were not installed into the Annotations object,
// then they have to be deallocated explicitly.
MetadataFactory::free_array<u1>(_loader_data, _annotations);
MetadataFactory::free_array<u1>(_loader_data, _type_annotations);
Annotations::free_contents(_loader_data, _fields_annotations);
Annotations::free_contents(_loader_data, _fields_type_annotations);
}
clear_class_metadata();

View File

@ -75,6 +75,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
Array<u2>* _inner_classes;
Array<Klass*>* _local_interfaces;
Array<Klass*>* _transitive_interfaces;
Annotations* _combined_annotations;
AnnotationArray* _annotations;
AnnotationArray* _type_annotations;
Array<AnnotationArray*>* _fields_annotations;
@ -86,6 +87,8 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
void set_class_generic_signature_index(u2 x) { _generic_signature_index = x; }
void set_class_sde_buffer(char* x, int len) { _sde_buffer = x; _sde_length = len; }
void create_combined_annotations(TRAPS);
void init_parsed_class_attributes(ClassLoaderData* loader_data) {
_loader_data = loader_data;
_synthetic_flag = false;
@ -110,6 +113,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
_inner_classes = NULL;
_local_interfaces = NULL;
_transitive_interfaces = NULL;
_combined_annotations = NULL;
_annotations = _type_annotations = NULL;
_fields_annotations = _fields_type_annotations = NULL;
}

View File

@ -81,7 +81,8 @@ ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous, Depen
_metaspace(NULL), _unloading(false), _klasses(NULL),
_claimed(0), _jmethod_ids(NULL), _handles(NULL), _deallocate_list(NULL),
_next(NULL), _dependencies(dependencies),
_metaspace_lock(new Mutex(Monitor::leaf+1, "Metaspace allocation lock", true)) {
_metaspace_lock(new Mutex(Monitor::leaf+1, "Metaspace allocation lock", true,
Monitor::_safepoint_check_never)) {
// empty
}

View File

@ -83,9 +83,11 @@ CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
// Note: this requires that CFLspace c'tors
// are called serially in the order in which the locks are
// are acquired in the program text. This is true today.
_freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
_freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true,
Monitor::_safepoint_check_sometimes),
_parDictionaryAllocLock(Mutex::leaf - 1, // == rank(ExpandHeap_lock) - 1
"CompactibleFreeListSpace._dict_par_lock", true),
"CompactibleFreeListSpace._dict_par_lock", true,
Monitor::_safepoint_check_never),
_rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
CMSRescanMultiple),
_marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
@ -152,8 +154,7 @@ CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
// Initialize locks for parallel case.
for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
_indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
"a freelist par lock",
true);
"a freelist par lock", true, Mutex::_safepoint_check_sometimes);
DEBUG_ONLY(
_indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
)
@ -2559,12 +2560,12 @@ void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
x }
// Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
// OldPLABSize, whose static default is different; if overridden at the
// Initialize with default setting for CMS, _not_
// generic OldPLABSize, whose static default is different; if overridden at the
// command-line, this will get reinitialized via a call to
// modify_initialization() below.
AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[] =
VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CFLS_LAB::_default_dynamic_old_plab_size));
size_t CFLS_LAB::_global_num_blocks[] = VECTOR_257(0);
uint CFLS_LAB::_global_num_workers[] = VECTOR_257(0);

View File

@ -690,6 +690,9 @@ class CFLS_LAB : public CHeapObj<mtGC> {
void get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl);
public:
static const int _default_dynamic_old_plab_size = 16;
static const int _default_static_old_plab_size = 50;
CFLS_LAB(CompactibleFreeListSpace* cfls);
// Allocate and return a block of the given size, or else return NULL.

View File

@ -42,6 +42,7 @@
#include "gc_implementation/shared/isGCActiveMark.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
#include "memory/allocation.hpp"
#include "memory/cardGeneration.inline.hpp"
#include "memory/cardTableRS.hpp"
#include "memory/collectorPolicy.hpp"
#include "memory/gcLocker.inline.hpp"
@ -479,7 +480,9 @@ CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
_restart_addr(NULL),
_overflow_list(NULL),
_stats(cmsGen),
_eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true)),
_eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true,
//verify that this lock should be acquired with safepoint check.
Monitor::_safepoint_check_sometimes)),
_eden_chunk_array(NULL), // may be set in ctor body
_eden_chunk_capacity(0), // -- ditto --
_eden_chunk_index(0), // -- ditto --
@ -793,11 +796,6 @@ void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
}
}
CompactibleSpace*
ConcurrentMarkSweepGeneration::first_compaction_space() const {
return _cmsSpace;
}
void ConcurrentMarkSweepGeneration::reset_after_compaction() {
// Clear the promotion information. These pointers can be adjusted
// along with all the other pointers into the heap but
@ -808,10 +806,6 @@ void ConcurrentMarkSweepGeneration::reset_after_compaction() {
}
}
void ConcurrentMarkSweepGeneration::space_iterate(SpaceClosure* blk, bool usedOnly) {
blk->do_space(_cmsSpace);
}
void ConcurrentMarkSweepGeneration::compute_new_size() {
assert_locked_or_safepoint(Heap_lock);
@ -882,7 +876,7 @@ void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
expand_bytes);
}
// safe if expansion fails
expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr(" Expanded free fraction %f",
((double) free()) / capacity());
@ -1048,8 +1042,7 @@ oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
if (res == NULL) {
// expand and retry
size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords
expand(s*HeapWordSize, MinHeapDeltaBytes,
CMSExpansionCause::_satisfy_promotion);
expand_for_gc_cause(s*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_promotion);
// Since there's currently no next generation, we don't try to promote
// into a more senior generation.
assert(next_gen() == NULL, "assumption, based upon which no attempt "
@ -1618,14 +1611,15 @@ void CMSCollector::acquire_control_and_collect(bool full,
// If the collection is being acquired from the background
// collector, there may be references on the discovered
// references lists that have NULL referents (being those
// that were concurrently cleared by a mutator) or
// that are no longer active (having been enqueued concurrently
// by the mutator).
// Scrub the list of those references because Mark-Sweep-Compact
// code assumes referents are not NULL and that all discovered
// Reference objects are active.
ref_processor()->clean_up_discovered_references();
// references lists. Abandon those references, since some
// of them may have become unreachable after concurrent
// discovery; the STW compacting collector will redo discovery
// more precisely, without being subject to floating garbage.
// Leaving otherwise unreachable references in the discovered
// lists would require special handling.
ref_processor()->disable_discovery();
ref_processor()->abandon_partial_discovery();
ref_processor()->verify_no_references_recorded();
if (first_state > Idling) {
save_heap_summary();
@ -1691,7 +1685,7 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
ref_processor()->set_enqueuing_is_done(false);
ref_processor()->enable_discovery(false /*verify_disabled*/, false /*check_no_refs*/);
ref_processor()->enable_discovery();
ref_processor()->setup_policy(clear_all_soft_refs);
// If an asynchronous collection finishes, the _modUnionTable is
// all clear. If we are assuming the collection from an asynchronous
@ -2624,13 +2618,6 @@ oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
void
ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
cl->set_generation(this);
younger_refs_in_space_iterate(_cmsSpace, cl);
cl->reset_generation();
}
void
ConcurrentMarkSweepGeneration::oop_iterate(ExtendedOopClosure* cl) {
if (freelistLock()->owned_by_self()) {
@ -2803,23 +2790,17 @@ ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
CMSSynchronousYieldRequest yr;
assert(!tlab, "Can't deal with TLAB allocation");
MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
expand(word_size*HeapWordSize, MinHeapDeltaBytes,
CMSExpansionCause::_satisfy_allocation);
expand_for_gc_cause(word_size*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_allocation);
if (GCExpandToAllocateDelayMillis > 0) {
os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
}
return have_lock_and_allocate(word_size, tlab);
}
// YSR: All of this generation expansion/shrinking stuff is an exact copy of
// TenuredGeneration, which makes me wonder if we should move this
// to CardGeneration and share it...
bool ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes) {
return CardGeneration::expand(bytes, expand_bytes);
}
void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes,
CMSExpansionCause::Cause cause)
void ConcurrentMarkSweepGeneration::expand_for_gc_cause(
size_t bytes,
size_t expand_bytes,
CMSExpansionCause::Cause cause)
{
bool success = expand(bytes, expand_bytes);
@ -2848,8 +2829,7 @@ HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThr
return NULL;
}
// Otherwise, we try expansion.
expand(word_sz*HeapWordSize, MinHeapDeltaBytes,
CMSExpansionCause::_allocate_par_lab);
expand_for_gc_cause(word_sz*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_lab);
// Now go around the loop and try alloc again;
// A competing par_promote might beat us to the expansion space,
// so we may go around the loop again if promotion fails again.
@ -2876,8 +2856,7 @@ bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
return false;
}
// Otherwise, we try expansion.
expand(refill_size_bytes, MinHeapDeltaBytes,
CMSExpansionCause::_allocate_par_spooling_space);
expand_for_gc_cause(refill_size_bytes, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_spooling_space);
// Now go around the loop and try alloc again;
// A competing allocation might beat us to the expansion space,
// so we may go around the loop again if allocation fails again.
@ -2887,77 +2866,16 @@ bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
}
}
void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) {
assert_locked_or_safepoint(ExpandHeap_lock);
// Shrink committed space
_virtual_space.shrink_by(bytes);
// Shrink space; this also shrinks the space's BOT
_cmsSpace->set_end((HeapWord*) _virtual_space.high());
size_t new_word_size = heap_word_size(_cmsSpace->capacity());
// Shrink the shared block offset array
_bts->resize(new_word_size);
MemRegion mr(_cmsSpace->bottom(), new_word_size);
// Shrink the card table
Universe::heap()->barrier_set()->resize_covered_region(mr);
if (Verbose && PrintGC) {
size_t new_mem_size = _virtual_space.committed_size();
size_t old_mem_size = new_mem_size + bytes;
gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
name(), old_mem_size/K, new_mem_size/K);
}
}
void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
assert_locked_or_safepoint(Heap_lock);
size_t size = ReservedSpace::page_align_size_down(bytes);
// Only shrink if a compaction was done so that all the free space
// in the generation is in a contiguous block at the end.
if (size > 0 && did_compact()) {
shrink_by(size);
if (did_compact()) {
CardGeneration::shrink(bytes);
}
}
bool ConcurrentMarkSweepGeneration::grow_by(size_t bytes) {
void ConcurrentMarkSweepGeneration::assert_correct_size_change_locking() {
assert_locked_or_safepoint(Heap_lock);
bool result = _virtual_space.expand_by(bytes);
if (result) {
size_t new_word_size =
heap_word_size(_virtual_space.committed_size());
MemRegion mr(_cmsSpace->bottom(), new_word_size);
_bts->resize(new_word_size); // resize the block offset shared array
Universe::heap()->barrier_set()->resize_covered_region(mr);
// Hmmmm... why doesn't CFLS::set_end verify locking?
// This is quite ugly; FIX ME XXX
_cmsSpace->assert_locked(freelistLock());
_cmsSpace->set_end((HeapWord*)_virtual_space.high());
// update the space and generation capacity counters
if (UsePerfData) {
_space_counters->update_capacity();
_gen_counters->update_all();
}
if (Verbose && PrintGC) {
size_t new_mem_size = _virtual_space.committed_size();
size_t old_mem_size = new_mem_size - bytes;
gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
name(), old_mem_size/K, bytes/K, new_mem_size/K);
}
}
return result;
}
bool ConcurrentMarkSweepGeneration::grow_to_reserved() {
assert_locked_or_safepoint(Heap_lock);
bool success = true;
const size_t remaining_bytes = _virtual_space.uncommitted_size();
if (remaining_bytes > 0) {
success = grow_by(remaining_bytes);
DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
}
return success;
}
void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
@ -3084,7 +3002,7 @@ void CMSCollector::checkpointRootsInitial() {
Mutex::_no_safepoint_check_flag);
checkpointRootsInitialWork();
// enable ("weak") refs discovery
rp->enable_discovery(true /*verify_disabled*/, true /*check_no_refs*/);
rp->enable_discovery();
_collectorState = Marking;
}
SpecializationStats::print();
@ -6031,7 +5949,8 @@ HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
_bm(),
_shifter(shifter),
_lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true) : NULL)
_lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true,
Monitor::_safepoint_check_sometimes) : NULL)
{
_bmStartWord = 0;
_bmWordSize = 0;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,9 +30,10 @@
#include "gc_implementation/shared/gcStats.hpp"
#include "gc_implementation/shared/gcWhen.hpp"
#include "gc_implementation/shared/generationCounters.hpp"
#include "memory/cardGeneration.hpp"
#include "memory/freeBlockDictionary.hpp"
#include "memory/generation.hpp"
#include "memory/iterator.hpp"
#include "memory/space.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/virtualspace.hpp"
#include "services/memoryService.hpp"
@ -171,9 +172,7 @@ class CMSBitMap VALUE_OBJ_CLASS_SPEC {
// Represents a marking stack used by the CMS collector.
// Ideally this should be GrowableArray<> just like MSC's marking stack(s).
class CMSMarkStack: public CHeapObj<mtGC> {
//
friend class CMSCollector; // To get at expansion stats further below.
//
VirtualSpace _virtual_space; // Space for the stack
oop* _base; // Bottom of stack
@ -188,7 +187,8 @@ class CMSMarkStack: public CHeapObj<mtGC> {
public:
CMSMarkStack():
_par_lock(Mutex::event, "CMSMarkStack._par_lock", true),
_par_lock(Mutex::event, "CMSMarkStack._par_lock", true,
Monitor::_safepoint_check_never),
_hit_limit(0),
_failed_double(0) {}
@ -1031,6 +1031,9 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;}
CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; }
// Accessing spaces
CompactibleSpace* space() const { return (CompactibleSpace*)_cmsSpace; }
private:
// For parallel young-gen GC support.
CMSParGCThreadState** _par_gc_thread_states;
@ -1064,6 +1067,10 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
double initiating_occupancy() const { return _initiating_occupancy; }
void init_initiating_occupancy(intx io, uintx tr);
void expand_for_gc_cause(size_t bytes, size_t expand_bytes, CMSExpansionCause::Cause cause);
void assert_correct_size_change_locking();
public:
ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
int level, CardTableRS* ct,
@ -1100,23 +1107,14 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
// Override
virtual void ref_processor_init();
// Grow generation by specified size (returns false if unable to grow)
bool grow_by(size_t bytes);
// Grow generation to reserved size.
bool grow_to_reserved();
void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; }
// Space enquiries
size_t capacity() const;
size_t used() const;
size_t free() const;
double occupancy() const { return ((double)used())/((double)capacity()); }
size_t contiguous_available() const;
size_t unsafe_max_alloc_nogc() const;
// over-rides
MemRegion used_region() const;
MemRegion used_region_at_save_marks() const;
// Does a "full" (forced) collection invoked on this generation collect
@ -1127,10 +1125,6 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
return !ScavengeBeforeFullGC;
}
void space_iterate(SpaceClosure* blk, bool usedOnly = false);
// Support for compaction
CompactibleSpace* first_compaction_space() const;
// Adjust quantities in the generation affected by
// the compaction.
void reset_after_compaction();
@ -1190,18 +1184,13 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
}
// Allocation failure
void expand(size_t bytes, size_t expand_bytes,
CMSExpansionCause::Cause cause);
virtual bool expand(size_t bytes, size_t expand_bytes);
void shrink(size_t bytes);
void shrink_by(size_t bytes);
HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz);
bool expand_and_ensure_spooling_space(PromotionInfo* promo);
// Iteration support and related enquiries
void save_marks();
bool no_allocs_since_save_marks();
void younger_refs_iterate(OopsInGenClosure* cl);
// Iteration support specific to CMS generations
void save_sweep_limit();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -369,22 +369,6 @@ inline void ConcurrentMarkSweepGeneration::save_sweep_limit() {
cmsSpace()->save_sweep_limit();
}
inline size_t ConcurrentMarkSweepGeneration::capacity() const {
return _cmsSpace->capacity();
}
inline size_t ConcurrentMarkSweepGeneration::used() const {
return _cmsSpace->used();
}
inline size_t ConcurrentMarkSweepGeneration::free() const {
return _cmsSpace->free();
}
inline MemRegion ConcurrentMarkSweepGeneration::used_region() const {
return _cmsSpace->used_region();
}
inline MemRegion ConcurrentMarkSweepGeneration::used_region_at_save_marks() const {
return _cmsSpace->used_region_at_save_marks();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -52,7 +52,8 @@ ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread *nex
// The 0th worker in notified by mutator threads and has a special monitor.
// The last worker is used for young gen rset size sampling.
if (worker_id > 0) {
_monitor = new Monitor(Mutex::nonleaf, "Refinement monitor", true);
_monitor = new Monitor(Mutex::nonleaf, "Refinement monitor", true,
Monitor::_safepoint_check_never);
} else {
_monitor = DirtyCardQ_CBL_mon;
}

View File

@ -971,7 +971,7 @@ void ConcurrentMark::checkpointRootsInitialPost() {
// Start Concurrent Marking weak-reference discovery.
ReferenceProcessor* rp = g1h->ref_processor_cm();
// enable ("weak") refs discovery
rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
rp->enable_discovery();
rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();

View File

@ -254,25 +254,23 @@ void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size,
bool force) {
assert(!force, "not supported for GC alloc regions");
return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForSurvived);
return _g1h->new_gc_alloc_region(word_size, count(), InCSetState::Young);
}
void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region,
size_t allocated_bytes) {
_g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
GCAllocForSurvived);
_g1h->retire_gc_alloc_region(alloc_region, allocated_bytes, InCSetState::Young);
}
HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size,
bool force) {
assert(!force, "not supported for GC alloc regions");
return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForTenured);
return _g1h->new_gc_alloc_region(word_size, count(), InCSetState::Old);
}
void OldGCAllocRegion::retire_region(HeapRegion* alloc_region,
size_t allocated_bytes) {
_g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
GCAllocForTenured);
_g1h->retire_gc_alloc_region(alloc_region, allocated_bytes, InCSetState::Old);
}
HeapRegion* OldGCAllocRegion::release() {

View File

@ -113,15 +113,16 @@ void G1DefaultAllocator::abandon_gc_alloc_regions() {
G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
ParGCAllocBuffer(gclab_word_size), _retired(true) { }
HeapWord* G1ParGCAllocator::allocate_slow(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context) {
HeapWord* obj = NULL;
size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
HeapWord* G1ParGCAllocator::allocate_direct_or_new_plab(InCSetState dest,
size_t word_sz,
AllocationContext_t context) {
size_t gclab_word_size = _g1h->desired_plab_sz(dest);
if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose, context);
G1ParGCAllocBuffer* alloc_buf = alloc_buffer(dest, context);
add_to_alloc_buffer_waste(alloc_buf->words_remaining());
alloc_buf->retire(false /* end_of_gc */, false /* retain */);
HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size, context);
HeapWord* buf = _g1h->par_allocate_during_gc(dest, gclab_word_size, context);
if (buf == NULL) {
return NULL; // Let caller handle allocation failure.
}
@ -129,30 +130,33 @@ HeapWord* G1ParGCAllocator::allocate_slow(GCAllocPurpose purpose, size_t word_sz
alloc_buf->set_word_size(gclab_word_size);
alloc_buf->set_buf(buf);
obj = alloc_buf->allocate(word_sz);
HeapWord* const obj = alloc_buf->allocate(word_sz);
assert(obj != NULL, "buffer was definitely big enough...");
return obj;
} else {
obj = _g1h->par_allocate_during_gc(purpose, word_sz, context);
return _g1h->par_allocate_during_gc(dest, word_sz, context);
}
return obj;
}
G1DefaultParGCAllocator::G1DefaultParGCAllocator(G1CollectedHeap* g1h) :
G1ParGCAllocator(g1h),
_surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
_tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)) {
_alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
_alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer;
G1ParGCAllocator(g1h),
_surviving_alloc_buffer(g1h->desired_plab_sz(InCSetState::Young)),
_tenured_alloc_buffer(g1h->desired_plab_sz(InCSetState::Old)) {
for (uint state = 0; state < InCSetState::Num; state++) {
_alloc_buffers[state] = NULL;
}
_alloc_buffers[InCSetState::Young] = &_surviving_alloc_buffer;
_alloc_buffers[InCSetState::Old] = &_tenured_alloc_buffer;
}
void G1DefaultParGCAllocator::retire_alloc_buffers() {
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
size_t waste = _alloc_buffers[ap]->words_remaining();
add_to_alloc_buffer_waste(waste);
_alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
true /* end_of_gc */,
false /* retain */);
for (uint state = 0; state < InCSetState::Num; state++) {
G1ParGCAllocBuffer* const buf = _alloc_buffers[state];
if (buf != NULL) {
add_to_alloc_buffer_waste(buf->words_remaining());
buf->flush_stats_and_retire(_g1h->alloc_buffer_stats(state),
true /* end_of_gc */,
false /* retain */);
}
}
}

View File

@ -27,14 +27,9 @@
#include "gc_implementation/g1/g1AllocationContext.hpp"
#include "gc_implementation/g1/g1AllocRegion.hpp"
#include "gc_implementation/g1/g1InCSetState.hpp"
#include "gc_implementation/shared/parGCAllocBuffer.hpp"
enum GCAllocPurpose {
GCAllocForTenured,
GCAllocForSurvived,
GCAllocPurposeCount
};
// Base class for G1 allocators.
class G1Allocator : public CHeapObj<mtGC> {
friend class VMStructs;
@ -178,20 +173,40 @@ class G1ParGCAllocator : public CHeapObj<mtGC> {
protected:
G1CollectedHeap* _g1h;
// The survivor alignment in effect in bytes.
// == 0 : don't align survivors
// != 0 : align survivors to that alignment
// These values were chosen to favor the non-alignment case since some
// architectures have a special compare against zero instructions.
const uint _survivor_alignment_bytes;
size_t _alloc_buffer_waste;
size_t _undo_waste;
void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context);
virtual void retire_alloc_buffers() = 0;
virtual G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose, AllocationContext_t context) = 0;
virtual G1ParGCAllocBuffer* alloc_buffer(InCSetState dest, AllocationContext_t context) = 0;
// Calculate the survivor space object alignment in bytes. Returns that or 0 if
// there are no restrictions on survivor alignment.
static uint calc_survivor_alignment_bytes() {
assert(SurvivorAlignmentInBytes >= ObjectAlignmentInBytes, "sanity");
if (SurvivorAlignmentInBytes == ObjectAlignmentInBytes) {
// No need to align objects in the survivors differently, return 0
// which means "survivor alignment is not used".
return 0;
} else {
assert(SurvivorAlignmentInBytes > 0, "sanity");
return SurvivorAlignmentInBytes;
}
}
public:
G1ParGCAllocator(G1CollectedHeap* g1h) :
_g1h(g1h), _alloc_buffer_waste(0), _undo_waste(0) {
_g1h(g1h), _survivor_alignment_bytes(calc_survivor_alignment_bytes()),
_alloc_buffer_waste(0), _undo_waste(0) {
}
static G1ParGCAllocator* create_allocator(G1CollectedHeap* g1h);
@ -199,24 +214,40 @@ public:
size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
size_t undo_waste() {return _undo_waste; }
HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context) {
HeapWord* obj = NULL;
if (purpose == GCAllocForSurvived) {
obj = alloc_buffer(purpose, context)->allocate_aligned(word_sz, SurvivorAlignmentInBytes);
// Allocate word_sz words in dest, either directly into the regions or by
// allocating a new PLAB. Returns the address of the allocated memory, NULL if
// not successful.
HeapWord* allocate_direct_or_new_plab(InCSetState dest,
size_t word_sz,
AllocationContext_t context);
// Allocate word_sz words in the PLAB of dest. Returns the address of the
// allocated memory, NULL if not successful.
HeapWord* plab_allocate(InCSetState dest,
size_t word_sz,
AllocationContext_t context) {
G1ParGCAllocBuffer* buffer = alloc_buffer(dest, context);
if (_survivor_alignment_bytes == 0) {
return buffer->allocate(word_sz);
} else {
obj = alloc_buffer(purpose, context)->allocate(word_sz);
return buffer->allocate_aligned(word_sz, _survivor_alignment_bytes);
}
}
HeapWord* allocate(InCSetState dest, size_t word_sz,
AllocationContext_t context) {
HeapWord* const obj = plab_allocate(dest, word_sz, context);
if (obj != NULL) {
return obj;
}
return allocate_slow(purpose, word_sz, context);
return allocate_direct_or_new_plab(dest, word_sz, context);
}
void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
if (alloc_buffer(purpose, context)->contains(obj)) {
assert(alloc_buffer(purpose, context)->contains(obj + word_sz - 1),
void undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
if (alloc_buffer(dest, context)->contains(obj)) {
assert(alloc_buffer(dest, context)->contains(obj + word_sz - 1),
"should contain whole object");
alloc_buffer(purpose, context)->undo_allocation(obj, word_sz);
alloc_buffer(dest, context)->undo_allocation(obj, word_sz);
} else {
CollectedHeap::fill_with_object(obj, word_sz);
add_to_undo_waste(word_sz);
@ -227,13 +258,17 @@ public:
class G1DefaultParGCAllocator : public G1ParGCAllocator {
G1ParGCAllocBuffer _surviving_alloc_buffer;
G1ParGCAllocBuffer _tenured_alloc_buffer;
G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
G1ParGCAllocBuffer* _alloc_buffers[InCSetState::Num];
public:
G1DefaultParGCAllocator(G1CollectedHeap* g1h);
virtual G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose, AllocationContext_t context) {
return _alloc_buffers[purpose];
virtual G1ParGCAllocBuffer* alloc_buffer(InCSetState dest, AllocationContext_t context) {
assert(dest.is_valid(),
err_msg("Allocation buffer index out-of-bounds: " CSETSTATE_FORMAT, dest.value()));
assert(_alloc_buffers[dest.value()] != NULL,
err_msg("Allocation buffer is NULL: " CSETSTATE_FORMAT, dest.value()));
return _alloc_buffers[dest.value()];
}
virtual void retire_alloc_buffers() ;

View File

@ -352,7 +352,7 @@ void YoungList::print() {
}
void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
OtherRegionsTable::invalidate(start_idx, num_regions);
HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
}
void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
@ -1301,7 +1301,7 @@ bool G1CollectedHeap::do_collection(bool explicit_gc,
// Temporarily clear the STW ref processor's _is_alive_non_header field.
ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
ref_processor_stw()->enable_discovery();
ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
// Do collection work
@ -1886,13 +1886,12 @@ jint G1CollectedHeap::initialize() {
initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
// Create the gen rem set (and barrier set) for the entire reserved region.
_rem_set = collector_policy()->create_rem_set(reserved_region());
set_barrier_set(rem_set()->bs());
if (!barrier_set()->is_a(BarrierSet::G1SATBCTLogging)) {
vm_exit_during_initialization("G1 requires a G1SATBLoggingCardTableModRefBS");
return JNI_ENOMEM;
}
// Create the barrier set for the entire reserved region.
G1SATBCardTableLoggingModRefBS* bs
= new G1SATBCardTableLoggingModRefBS(reserved_region());
bs->initialize();
assert(bs->is_a(BarrierSet::G1SATBCTLogging), "sanity");
set_barrier_set(bs);
// Also create a G1 rem set.
_g1_rem_set = new G1RemSet(this, g1_barrier_set());
@ -3153,8 +3152,6 @@ void G1CollectedHeap::verify(bool silent, VerifyOption vo) {
failures = true;
}
}
if (!silent) gclog_or_tty->print("RemSet ");
rem_set()->verify();
if (G1StringDedup::is_enabled()) {
if (!silent) gclog_or_tty->print("StrDedup ");
@ -3750,8 +3747,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
// reference processing currently works in G1.
// Enable discovery in the STW reference processor
ref_processor_stw()->enable_discovery(true /*verify_disabled*/,
true /*verify_no_refs*/);
ref_processor_stw()->enable_discovery();
{
// We want to temporarily turn off discovery by the
@ -3819,6 +3815,8 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
register_humongous_regions_with_in_cset_fast_test();
assert(check_cset_fast_test(), "Inconsistency in the InCSetState table.");
_cm->note_start_of_gc();
// We should not verify the per-thread SATB buffers given that
// we have not filtered them yet (we'll do so during the
@ -4048,29 +4046,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
return true;
}
size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose)
{
size_t gclab_word_size;
switch (purpose) {
case GCAllocForSurvived:
gclab_word_size = _survivor_plab_stats.desired_plab_sz();
break;
case GCAllocForTenured:
gclab_word_size = _old_plab_stats.desired_plab_sz();
break;
default:
assert(false, "unknown GCAllocPurpose");
gclab_word_size = _old_plab_stats.desired_plab_sz();
break;
}
// Prevent humongous PLAB sizes for two reasons:
// * PLABs are allocated using a similar paths as oops, but should
// never be in a humongous region
// * Allowing humongous PLABs needlessly churns the region free lists
return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
}
void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
_drain_in_progress = false;
set_evac_failure_closure(cl);
@ -4196,35 +4171,6 @@ void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) {
}
}
HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
size_t word_size,
AllocationContext_t context) {
if (purpose == GCAllocForSurvived) {
HeapWord* result = survivor_attempt_allocation(word_size, context);
if (result != NULL) {
return result;
} else {
// Let's try to allocate in the old gen in case we can fit the
// object there.
return old_attempt_allocation(word_size, context);
}
} else {
assert(purpose == GCAllocForTenured, "sanity");
HeapWord* result = old_attempt_allocation(word_size, context);
if (result != NULL) {
return result;
} else {
// Let's try to allocate in the survivors in case we can fit the
// object there.
return survivor_attempt_allocation(word_size, context);
}
}
ShouldNotReachHere();
// Trying to keep some compilers happy.
return NULL;
}
void G1ParCopyHelper::mark_object(oop obj) {
assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
@ -4267,15 +4213,14 @@ void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
assert(_worker_id == _par_scan_state->queue_num(), "sanity");
G1CollectedHeap::in_cset_state_t state = _g1->in_cset_state(obj);
if (state == G1CollectedHeap::InCSet) {
const InCSetState state = _g1->in_cset_state(obj);
if (state.is_in_cset()) {
oop forwardee;
markOop m = obj->mark();
if (m->is_marked()) {
forwardee = (oop) m->decode_pointer();
} else {
forwardee = _par_scan_state->copy_to_survivor_space(obj, m);
forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m);
}
assert(forwardee != NULL, "forwardee should not be NULL");
oopDesc::encode_store_heap_oop(p, forwardee);
@ -4289,7 +4234,7 @@ void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
do_klass_barrier(p, forwardee);
}
} else {
if (state == G1CollectedHeap::IsHumongous) {
if (state.is_humongous()) {
_g1->set_humongous_is_live(obj);
}
// The object is not in collection set. If we're a root scanning
@ -4609,7 +4554,7 @@ void
G1CollectedHeap::
g1_process_roots(OopClosure* scan_non_heap_roots,
OopClosure* scan_non_heap_weak_roots,
OopsInHeapRegionClosure* scan_rs,
G1ParPushHeapRSClosure* scan_rs,
CLDClosure* scan_strong_clds,
CLDClosure* scan_weak_clds,
CodeBlobClosure* scan_strong_code,
@ -4933,7 +4878,7 @@ private:
}
};
Monitor* G1CodeCacheUnloadingTask::_lock = new Monitor(Mutex::leaf, "Code Cache Unload lock");
Monitor* G1CodeCacheUnloadingTask::_lock = new Monitor(Mutex::leaf, "Code Cache Unload lock", false, Monitor::_safepoint_check_never);
class G1KlassCleaningTask : public StackObj {
BoolObjectClosure* _is_alive;
@ -5145,17 +5090,17 @@ public:
oop obj = *p;
assert(obj != NULL, "the caller should have filtered out NULL values");
G1CollectedHeap::in_cset_state_t cset_state = _g1->in_cset_state(obj);
if (cset_state == G1CollectedHeap::InNeither) {
const InCSetState cset_state = _g1->in_cset_state(obj);
if (!cset_state.is_in_cset_or_humongous()) {
return;
}
if (cset_state == G1CollectedHeap::InCSet) {
if (cset_state.is_in_cset()) {
assert( obj->is_forwarded(), "invariant" );
*p = obj->forwardee();
} else {
assert(!obj->is_forwarded(), "invariant" );
assert(cset_state == G1CollectedHeap::IsHumongous,
err_msg("Only allowed InCSet state is IsHumongous, but is %d", cset_state));
assert(cset_state.is_humongous(),
err_msg("Only allowed InCSet state is IsHumongous, but is %d", cset_state.value()));
_g1->set_humongous_is_live(obj);
}
}
@ -5640,8 +5585,6 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
init_for_evac_failure(NULL);
rem_set()->prepare_for_younger_refs_iterate(true);
assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
double start_par_time_sec = os::elapsedTime();
double end_par_time_sec;
@ -5951,6 +5894,70 @@ void G1CollectedHeap::check_bitmaps(const char* caller) {
heap_region_iterate(&cl);
guarantee(!cl.failures(), "bitmap verification");
}
class G1CheckCSetFastTableClosure : public HeapRegionClosure {
private:
bool _failures;
public:
G1CheckCSetFastTableClosure() : HeapRegionClosure(), _failures(false) { }
virtual bool doHeapRegion(HeapRegion* hr) {
uint i = hr->hrm_index();
InCSetState cset_state = (InCSetState) G1CollectedHeap::heap()->_in_cset_fast_test.get_by_index(i);
if (hr->is_humongous()) {
if (hr->in_collection_set()) {
gclog_or_tty->print_cr("\n## humongous region %u in CSet", i);
_failures = true;
return true;
}
if (cset_state.is_in_cset()) {
gclog_or_tty->print_cr("\n## inconsistent cset state %d for humongous region %u", cset_state.value(), i);
_failures = true;
return true;
}
if (hr->is_continues_humongous() && cset_state.is_humongous()) {
gclog_or_tty->print_cr("\n## inconsistent cset state %d for continues humongous region %u", cset_state.value(), i);
_failures = true;
return true;
}
} else {
if (cset_state.is_humongous()) {
gclog_or_tty->print_cr("\n## inconsistent cset state %d for non-humongous region %u", cset_state.value(), i);
_failures = true;
return true;
}
if (hr->in_collection_set() != cset_state.is_in_cset()) {
gclog_or_tty->print_cr("\n## in CSet %d / cset state %d inconsistency for region %u",
hr->in_collection_set(), cset_state.value(), i);
_failures = true;
return true;
}
if (cset_state.is_in_cset()) {
if (hr->is_young() != (cset_state.is_young())) {
gclog_or_tty->print_cr("\n## is_young %d / cset state %d inconsistency for region %u",
hr->is_young(), cset_state.value(), i);
_failures = true;
return true;
}
if (hr->is_old() != (cset_state.is_old())) {
gclog_or_tty->print_cr("\n## is_old %d / cset state %d inconsistency for region %u",
hr->is_old(), cset_state.value(), i);
_failures = true;
return true;
}
}
}
return false;
}
bool failures() const { return _failures; }
};
bool G1CollectedHeap::check_cset_fast_test() {
G1CheckCSetFastTableClosure cl;
_hrm.iterate(&cl);
return !cl.failures();
}
#endif // PRODUCT
void G1CollectedHeap::cleanUpCardTable() {
@ -6519,20 +6526,20 @@ void G1CollectedHeap::set_par_threads() {
HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
uint count,
GCAllocPurpose ap) {
InCSetState dest) {
assert(FreeList_lock->owned_by_self(), "pre-condition");
if (count < g1_policy()->max_regions(ap)) {
bool survivor = (ap == GCAllocForSurvived);
if (count < g1_policy()->max_regions(dest)) {
const bool is_survivor = (dest.is_young());
HeapRegion* new_alloc_region = new_region(word_size,
!survivor,
!is_survivor,
true /* do_expand */);
if (new_alloc_region != NULL) {
// We really only need to do this for old regions given that we
// should never scan survivors. But it doesn't hurt to do it
// for survivors too.
new_alloc_region->record_timestamp();
if (survivor) {
if (is_survivor) {
new_alloc_region->set_survivor();
_hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
check_bitmaps("Survivor Region Allocation", new_alloc_region);
@ -6544,8 +6551,6 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
bool during_im = g1_policy()->during_initial_mark_pause();
new_alloc_region->note_start_of_copying(during_im);
return new_alloc_region;
} else {
g1_policy()->note_alloc_region_limit_reached(ap);
}
}
return NULL;
@ -6553,11 +6558,11 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
size_t allocated_bytes,
GCAllocPurpose ap) {
InCSetState dest) {
bool during_im = g1_policy()->during_initial_mark_pause();
alloc_region->note_end_of_copying(during_im);
g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
if (ap == GCAllocForSurvived) {
if (dest.is_young()) {
young_list()->add_survivor_region(alloc_region);
} else {
_old_set.add(alloc_region);

View File

@ -32,6 +32,7 @@
#include "gc_implementation/g1/g1AllocRegion.hpp"
#include "gc_implementation/g1/g1BiasedArray.hpp"
#include "gc_implementation/g1/g1HRPrinter.hpp"
#include "gc_implementation/g1/g1InCSetState.hpp"
#include "gc_implementation/g1/g1MonitoringSupport.hpp"
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#include "gc_implementation/g1/g1YCTypes.hpp"
@ -213,6 +214,9 @@ class G1CollectedHeap : public SharedHeap {
friend class G1MarkSweep;
friend class HeapRegionClaimer;
// Testing classes.
friend class G1CheckCSetFastTableClosure;
private:
// The one and only G1CollectedHeap, so static functions can find it.
static G1CollectedHeap* _g1h;
@ -547,15 +551,9 @@ protected:
// allocation region, either by picking one or expanding the
// heap, and then allocate a block of the given size. The block
// may not be a humongous - it must fit into a single heap region.
HeapWord* par_allocate_during_gc(GCAllocPurpose purpose,
size_t word_size,
AllocationContext_t context);
HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
HeapRegion* alloc_region,
bool par,
size_t word_size);
inline HeapWord* par_allocate_during_gc(InCSetState dest,
size_t word_size,
AllocationContext_t context);
// Ensure that no further allocations can happen in "r", bearing in mind
// that parallel threads might be attempting allocations.
void par_allocate_remaining_space(HeapRegion* r);
@ -577,9 +575,9 @@ protected:
// For GC alloc regions.
HeapRegion* new_gc_alloc_region(size_t word_size, uint count,
GCAllocPurpose ap);
InCSetState dest);
void retire_gc_alloc_region(HeapRegion* alloc_region,
size_t allocated_bytes, GCAllocPurpose ap);
size_t allocated_bytes, InCSetState dest);
// - if explicit_gc is true, the GC is for a System.gc() or a heap
// inspection request and should collect the entire heap
@ -640,26 +638,11 @@ public:
// (Rounds up to a HeapRegion boundary.)
bool expand(size_t expand_bytes);
// Returns the PLAB statistics given a purpose.
PLABStats* stats_for_purpose(GCAllocPurpose purpose) {
PLABStats* stats = NULL;
// Returns the PLAB statistics for a given destination.
inline PLABStats* alloc_buffer_stats(InCSetState dest);
switch (purpose) {
case GCAllocForSurvived:
stats = &_survivor_plab_stats;
break;
case GCAllocForTenured:
stats = &_old_plab_stats;
break;
default:
assert(false, "unrecognized GCAllocPurpose");
}
return stats;
}
// Determines PLAB size for a particular allocation purpose.
size_t desired_plab_sz(GCAllocPurpose purpose);
// Determines PLAB size for a given destination.
inline size_t desired_plab_sz(InCSetState dest);
inline AllocationContextStats& allocation_context_stats();
@ -683,8 +666,11 @@ public:
void register_humongous_regions_with_in_cset_fast_test();
// We register a region with the fast "in collection set" test. We
// simply set to true the array slot corresponding to this region.
void register_region_with_in_cset_fast_test(HeapRegion* r) {
_in_cset_fast_test.set_in_cset(r->hrm_index());
void register_young_region_with_in_cset_fast_test(HeapRegion* r) {
_in_cset_fast_test.set_in_young(r->hrm_index());
}
void register_old_region_with_in_cset_fast_test(HeapRegion* r) {
_in_cset_fast_test.set_in_old(r->hrm_index());
}
// This is a fast test on whether a reference points into the
@ -821,7 +807,7 @@ protected:
// In the sequential case this param will be ignored.
void g1_process_roots(OopClosure* scan_non_heap_roots,
OopClosure* scan_non_heap_weak_roots,
OopsInHeapRegionClosure* scan_rs,
G1ParPushHeapRSClosure* scan_rs,
CLDClosure* scan_strong_clds,
CLDClosure* scan_weak_clds,
CodeBlobClosure* scan_strong_code,
@ -1181,6 +1167,9 @@ public:
// appropriate error messages and crash.
void check_bitmaps(const char* caller) PRODUCT_RETURN;
// Do sanity check on the contents of the in-cset fast test table.
bool check_cset_fast_test() PRODUCT_RETURN_( return true; );
// verify_region_sets() performs verification over the region
// lists. It will be compiled in the product code to be used when
// necessary (i.e., during heap verification).
@ -1276,53 +1265,15 @@ public:
inline bool is_in_cset_or_humongous(const oop obj);
enum in_cset_state_t {
InNeither, // neither in collection set nor humongous
InCSet, // region is in collection set only
IsHumongous // region is a humongous start region
};
private:
// Instances of this class are used for quick tests on whether a reference points
// into the collection set or is a humongous object (points into a humongous
// object).
// Each of the array's elements denotes whether the corresponding region is in
// the collection set or a humongous region.
// We use this to quickly reclaim humongous objects: by making a humongous region
// succeed this test, we sort-of add it to the collection set. During the reference
// iteration closures, when we see a humongous region, we simply mark it as
// referenced, i.e. live.
class G1FastCSetBiasedMappedArray : public G1BiasedMappedArray<char> {
protected:
char default_value() const { return G1CollectedHeap::InNeither; }
public:
void set_humongous(uintptr_t index) {
assert(get_by_index(index) != InCSet, "Should not overwrite InCSet values");
set_by_index(index, G1CollectedHeap::IsHumongous);
}
void clear_humongous(uintptr_t index) {
set_by_index(index, G1CollectedHeap::InNeither);
}
void set_in_cset(uintptr_t index) {
assert(get_by_index(index) != G1CollectedHeap::IsHumongous, "Should not overwrite IsHumongous value");
set_by_index(index, G1CollectedHeap::InCSet);
}
bool is_in_cset_or_humongous(HeapWord* addr) const { return get_by_address(addr) != G1CollectedHeap::InNeither; }
bool is_in_cset(HeapWord* addr) const { return get_by_address(addr) == G1CollectedHeap::InCSet; }
G1CollectedHeap::in_cset_state_t at(HeapWord* addr) const { return (G1CollectedHeap::in_cset_state_t)get_by_address(addr); }
void clear() { G1BiasedMappedArray<char>::clear(); }
};
// This array is used for a quick test on whether a reference points into
// the collection set or not. Each of the array's elements denotes whether the
// corresponding region is in the collection set or not.
G1FastCSetBiasedMappedArray _in_cset_fast_test;
G1InCSetStateFastTestBiasedMappedArray _in_cset_fast_test;
public:
inline in_cset_state_t in_cset_state(const oop obj);
inline InCSetState in_cset_state(const oop obj);
// Return "TRUE" iff the given object address is in the reserved
// region of g1.

View File

@ -35,6 +35,41 @@
#include "runtime/orderAccess.inline.hpp"
#include "utilities/taskqueue.hpp"
PLABStats* G1CollectedHeap::alloc_buffer_stats(InCSetState dest) {
switch (dest.value()) {
case InCSetState::Young:
return &_survivor_plab_stats;
case InCSetState::Old:
return &_old_plab_stats;
default:
ShouldNotReachHere();
return NULL; // Keep some compilers happy
}
}
size_t G1CollectedHeap::desired_plab_sz(InCSetState dest) {
size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz();
// Prevent humongous PLAB sizes for two reasons:
// * PLABs are allocated using a similar paths as oops, but should
// never be in a humongous region
// * Allowing humongous PLABs needlessly churns the region free lists
return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
}
HeapWord* G1CollectedHeap::par_allocate_during_gc(InCSetState dest,
size_t word_size,
AllocationContext_t context) {
switch (dest.value()) {
case InCSetState::Young:
return survivor_attempt_allocation(word_size, context);
case InCSetState::Old:
return old_attempt_allocation(word_size, context);
default:
ShouldNotReachHere();
return NULL; // Keep some compilers happy
}
}
// Inline functions for G1CollectedHeap
inline AllocationContextStats& G1CollectedHeap::allocation_context_stats() {
@ -203,7 +238,7 @@ bool G1CollectedHeap::is_in_cset_or_humongous(const oop obj) {
return _in_cset_fast_test.is_in_cset_or_humongous((HeapWord*)obj);
}
G1CollectedHeap::in_cset_state_t G1CollectedHeap::in_cset_state(const oop obj) {
InCSetState G1CollectedHeap::in_cset_state(const oop obj) {
return _in_cset_fast_test.at((HeapWord*)obj);
}

View File

@ -1437,18 +1437,6 @@ bool G1CollectorPolicy::can_expand_young_list() {
return young_list_length < young_list_max_length;
}
uint G1CollectorPolicy::max_regions(int purpose) {
switch (purpose) {
case GCAllocForSurvived:
return _max_survivor_regions;
case GCAllocForTenured:
return REGIONS_UNLIMITED;
default:
ShouldNotReachHere();
return REGIONS_UNLIMITED;
};
}
void G1CollectorPolicy::update_max_gc_locker_expansion() {
uint expansion_region_num = 0;
if (GCLockerEdenExpansionPercent > 0) {
@ -1634,7 +1622,7 @@ void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
hr->set_next_in_collection_set(_collection_set);
_collection_set = hr;
_collection_set_bytes_used_before += hr->used();
_g1->register_region_with_in_cset_fast_test(hr);
_g1->register_old_region_with_in_cset_fast_test(hr);
size_t rs_length = hr->rem_set()->occupied();
_recorded_rs_lengths += rs_length;
_old_cset_region_length += 1;
@ -1767,7 +1755,7 @@ void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
hr->set_in_collection_set(true);
assert( hr->next_in_collection_set() == NULL, "invariant");
_g1->register_region_with_in_cset_fast_test(hr);
_g1->register_young_region_with_in_cset_fast_test(hr);
}
// Add the region at the RHS of the incremental cset

View File

@ -881,28 +881,20 @@ private:
public:
uint tenuring_threshold() const { return _tenuring_threshold; }
inline GCAllocPurpose
evacuation_destination(HeapRegion* src_region, uint age, size_t word_sz) {
if (age < _tenuring_threshold && src_region->is_young()) {
return GCAllocForSurvived;
} else {
return GCAllocForTenured;
}
}
inline bool track_object_age(GCAllocPurpose purpose) {
return purpose == GCAllocForSurvived;
}
static const uint REGIONS_UNLIMITED = (uint) -1;
uint max_regions(int purpose);
// The limit on regions for a particular purpose is reached.
void note_alloc_region_limit_reached(int purpose) {
if (purpose == GCAllocForSurvived) {
_tenuring_threshold = 0;
uint max_regions(InCSetState dest) {
switch (dest.value()) {
case InCSetState::Young:
return _max_survivor_regions;
case InCSetState::Old:
return REGIONS_UNLIMITED;
default:
assert(false, err_msg("Unknown dest state: " CSETSTATE_FORMAT, dest.value()));
break;
}
// keep some compilers happy
return 0;
}
void note_start_adding_survivor_regions() {

View File

@ -0,0 +1,132 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1INCSETSTATE_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1INCSETSTATE_HPP
#include "gc_implementation/g1/g1BiasedArray.hpp"
#include "memory/allocation.hpp"
// Per-region state during garbage collection.
struct InCSetState {
public:
// We use different types to represent the state value. Particularly SPARC puts
// values in structs from "left to right", i.e. MSB to LSB. This results in many
// unnecessary shift operations when loading and storing values of this type.
// This degrades performance significantly (>10%) on that platform.
// Other tested ABIs do not seem to have this problem, and actually tend to
// favor smaller types, so we use the smallest usable type there.
#ifdef SPARC
#define CSETSTATE_FORMAT INTPTR_FORMAT
typedef intptr_t in_cset_state_t;
#else
#define CSETSTATE_FORMAT "%d"
typedef int8_t in_cset_state_t;
#endif
private:
in_cset_state_t _value;
public:
enum {
// Selection of the values were driven to micro-optimize the encoding and
// frequency of the checks.
// The most common check is whether the region is in the collection set or not.
// This encoding allows us to use an != 0 check which in some architectures
// (x86*) can be encoded slightly more efficently than a normal comparison
// against zero.
// The same situation occurs when checking whether the region is humongous
// or not, which is encoded by values < 0.
// The other values are simply encoded in increasing generation order, which
// makes getting the next generation fast by a simple increment.
Humongous = -1, // The region is humongous - note that actually any value < 0 would be possible here.
NotInCSet = 0, // The region is not in the collection set.
Young = 1, // The region is in the collection set and a young region.
Old = 2, // The region is in the collection set and an old region.
Num
};
InCSetState(in_cset_state_t value = NotInCSet) : _value(value) {
assert(is_valid(), err_msg("Invalid state %d", _value));
}
in_cset_state_t value() const { return _value; }
void set_old() { _value = Old; }
bool is_in_cset_or_humongous() const { return _value != NotInCSet; }
bool is_in_cset() const { return _value > NotInCSet; }
bool is_humongous() const { return _value < NotInCSet; }
bool is_young() const { return _value == Young; }
bool is_old() const { return _value == Old; }
#ifdef ASSERT
bool is_default() const { return !is_in_cset_or_humongous(); }
bool is_valid() const { return (_value >= Humongous) && (_value < Num); }
bool is_valid_gen() const { return (_value >= Young && _value <= Old); }
#endif
};
// Instances of this class are used for quick tests on whether a reference points
// into the collection set and into which generation or is a humongous object
//
// Each of the array's elements indicates whether the corresponding region is in
// the collection set and if so in which generation, or a humongous region.
//
// We use this to speed up reference processing during young collection and
// quickly reclaim humongous objects. For the latter, by making a humongous region
// succeed this test, we sort-of add it to the collection set. During the reference
// iteration closures, when we see a humongous region, we then simply mark it as
// referenced, i.e. live.
class G1InCSetStateFastTestBiasedMappedArray : public G1BiasedMappedArray<InCSetState> {
protected:
InCSetState default_value() const { return InCSetState::NotInCSet; }
public:
void set_humongous(uintptr_t index) {
assert(get_by_index(index).is_default(),
err_msg("State at index " INTPTR_FORMAT" should be default but is " CSETSTATE_FORMAT, index, get_by_index(index).value()));
set_by_index(index, InCSetState::Humongous);
}
void clear_humongous(uintptr_t index) {
set_by_index(index, InCSetState::NotInCSet);
}
void set_in_young(uintptr_t index) {
assert(get_by_index(index).is_default(),
err_msg("State at index " INTPTR_FORMAT" should be default but is " CSETSTATE_FORMAT, index, get_by_index(index).value()));
set_by_index(index, InCSetState::Young);
}
void set_in_old(uintptr_t index) {
assert(get_by_index(index).is_default(),
err_msg("State at index " INTPTR_FORMAT" should be default but is " CSETSTATE_FORMAT, index, get_by_index(index).value()));
set_by_index(index, InCSetState::Old);
}
bool is_in_cset_or_humongous(HeapWord* addr) const { return at(addr).is_in_cset_or_humongous(); }
bool is_in_cset(HeapWord* addr) const { return at(addr).is_in_cset(); }
InCSetState at(HeapWord* addr) const { return get_by_address(addr); }
void clear() { G1BiasedMappedArray<InCSetState>::clear(); }
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1INCSETSTATE_HPP

View File

@ -26,6 +26,7 @@
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP
#include "memory/iterator.hpp"
#include "oops/markOop.hpp"
class HeapRegion;
class G1CollectedHeap;
@ -239,14 +240,14 @@ class G1UpdateRSOrPushRefOopClosure: public ExtendedOopClosure {
G1CollectedHeap* _g1;
G1RemSet* _g1_rem_set;
HeapRegion* _from;
OopsInHeapRegionClosure* _push_ref_cl;
G1ParPushHeapRSClosure* _push_ref_cl;
bool _record_refs_into_cset;
uint _worker_i;
public:
G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
G1RemSet* rs,
OopsInHeapRegionClosure* push_ref_cl,
G1ParPushHeapRSClosure* push_ref_cl,
bool record_refs_into_cset,
uint worker_i = 0);
@ -256,7 +257,8 @@ public:
}
bool self_forwarded(oop obj) {
bool result = (obj->is_forwarded() && (obj->forwardee()== obj));
markOop m = obj->mark();
bool result = (m->is_marked() && ((oop)m->decode_pointer() == obj));
return result;
}

View File

@ -67,8 +67,8 @@ inline void G1ParScanClosure::do_oop_nv(T* p) {
if (!oopDesc::is_null(heap_oop)) {
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
G1CollectedHeap::in_cset_state_t state = _g1->in_cset_state(obj);
if (state == G1CollectedHeap::InCSet) {
const InCSetState state = _g1->in_cset_state(obj);
if (state.is_in_cset()) {
// We're not going to even bother checking whether the object is
// already forwarded or not, as this usually causes an immediate
// stall. We'll try to prefetch the object (for write, given that
@ -87,7 +87,7 @@ inline void G1ParScanClosure::do_oop_nv(T* p) {
_par_scan_state->push_on_queue(p);
} else {
if (state == G1CollectedHeap::IsHumongous) {
if (state.is_humongous()) {
_g1->set_humongous_is_live(obj);
}
_par_scan_state->update_rs(_from, p, _worker_id);

View File

@ -131,6 +131,9 @@ MemRegion G1PageBasedVirtualSpace::commit(uintptr_t start, size_t size_in_pages)
_committed.set_range(start, start + size_in_pages);
MemRegion result((HeapWord*)page_start(start), byte_size_for_pages(size_in_pages) / HeapWordSize);
if (AlwaysPreTouch) {
os::pretouch_memory((char*)result.start(), (char*)result.end());
}
return result;
}

View File

@ -38,6 +38,7 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num,
_g1_rem(g1h->g1_rem_set()),
_hash_seed(17), _queue_num(queue_num),
_term_attempts(0),
_tenuring_threshold(g1h->g1_policy()->tenuring_threshold()),
_age_table(false), _scanner(g1h, rp),
_strong_roots_time(0), _term_time(0) {
_scanner.set_par_scan_thread_state(this);
@ -59,6 +60,12 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num,
_g1_par_allocator = G1ParGCAllocator::create_allocator(_g1h);
_dest[InCSetState::NotInCSet] = InCSetState::NotInCSet;
// The dest for Young is used when the objects are aged enough to
// need to be moved to the next space.
_dest[InCSetState::Young] = InCSetState::Old;
_dest[InCSetState::Old] = InCSetState::Old;
_start = os::elapsedTime();
}
@ -150,52 +157,94 @@ void G1ParScanThreadState::trim_queue() {
} while (!_refs->is_empty());
}
oop G1ParScanThreadState::copy_to_survivor_space(oop const old,
HeapWord* G1ParScanThreadState::allocate_in_next_plab(InCSetState const state,
InCSetState* dest,
size_t word_sz,
AllocationContext_t const context) {
assert(state.is_in_cset_or_humongous(), err_msg("Unexpected state: " CSETSTATE_FORMAT, state.value()));
assert(dest->is_in_cset_or_humongous(), err_msg("Unexpected dest: " CSETSTATE_FORMAT, dest->value()));
// Right now we only have two types of regions (young / old) so
// let's keep the logic here simple. We can generalize it when necessary.
if (dest->is_young()) {
HeapWord* const obj_ptr = _g1_par_allocator->allocate(InCSetState::Old,
word_sz, context);
if (obj_ptr == NULL) {
return NULL;
}
// Make sure that we won't attempt to copy any other objects out
// of a survivor region (given that apparently we cannot allocate
// any new ones) to avoid coming into this slow path.
_tenuring_threshold = 0;
dest->set_old();
return obj_ptr;
} else {
assert(dest->is_old(), err_msg("Unexpected dest: " CSETSTATE_FORMAT, dest->value()));
// no other space to try.
return NULL;
}
}
InCSetState G1ParScanThreadState::next_state(InCSetState const state, markOop const m, uint& age) {
if (state.is_young()) {
age = !m->has_displaced_mark_helper() ? m->age()
: m->displaced_mark_helper()->age();
if (age < _tenuring_threshold) {
return state;
}
}
return dest(state);
}
oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
oop const old,
markOop const old_mark) {
size_t word_sz = old->size();
HeapRegion* from_region = _g1h->heap_region_containing_raw(old);
const size_t word_sz = old->size();
HeapRegion* const from_region = _g1h->heap_region_containing_raw(old);
// +1 to make the -1 indexes valid...
int young_index = from_region->young_index_in_cset()+1;
const int young_index = from_region->young_index_in_cset()+1;
assert( (from_region->is_young() && young_index > 0) ||
(!from_region->is_young() && young_index == 0), "invariant" );
G1CollectorPolicy* g1p = _g1h->g1_policy();
uint age = old_mark->has_displaced_mark_helper() ? old_mark->displaced_mark_helper()->age()
: old_mark->age();
GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
word_sz);
AllocationContext_t context = from_region->allocation_context();
HeapWord* obj_ptr = _g1_par_allocator->allocate(alloc_purpose, word_sz, context);
const AllocationContext_t context = from_region->allocation_context();
uint age = 0;
InCSetState dest_state = next_state(state, old_mark, age);
HeapWord* obj_ptr = _g1_par_allocator->plab_allocate(dest_state, word_sz, context);
// PLAB allocations should succeed most of the time, so we'll
// normally check against NULL once and that's it.
if (obj_ptr == NULL) {
obj_ptr = _g1_par_allocator->allocate_direct_or_new_plab(dest_state, word_sz, context);
if (obj_ptr == NULL) {
obj_ptr = allocate_in_next_plab(state, &dest_state, word_sz, context);
if (obj_ptr == NULL) {
// This will either forward-to-self, or detect that someone else has
// installed a forwarding pointer.
return _g1h->handle_evacuation_failure_par(this, old);
}
}
}
assert(obj_ptr != NULL, "when we get here, allocation should have succeeded");
#ifndef PRODUCT
// Should this evacuation fail?
if (_g1h->evacuation_should_fail()) {
if (obj_ptr != NULL) {
_g1_par_allocator->undo_allocation(alloc_purpose, obj_ptr, word_sz, context);
obj_ptr = NULL;
}
}
#endif // !PRODUCT
if (obj_ptr == NULL) {
// This will either forward-to-self, or detect that someone else has
// installed a forwarding pointer.
// Doing this after all the allocation attempts also tests the
// undo_allocation() method too.
_g1_par_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context);
return _g1h->handle_evacuation_failure_par(this, old);
}
oop obj = oop(obj_ptr);
#endif // !PRODUCT
// We're going to allocate linearly, so might as well prefetch ahead.
Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
oop forward_ptr = old->forward_to_atomic(obj);
const oop obj = oop(obj_ptr);
const oop forward_ptr = old->forward_to_atomic(obj);
if (forward_ptr == NULL) {
Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
// alloc_purpose is just a hint to allocate() above, recheck the type of region
// we actually allocated from and update alloc_purpose accordingly
HeapRegion* to_region = _g1h->heap_region_containing_raw(obj_ptr);
alloc_purpose = to_region->is_young() ? GCAllocForSurvived : GCAllocForTenured;
if (g1p->track_object_age(alloc_purpose)) {
if (dest_state.is_young()) {
if (age < markOopDesc::max_age) {
age++;
}
@ -215,13 +264,19 @@ oop G1ParScanThreadState::copy_to_survivor_space(oop const old,
}
if (G1StringDedup::is_enabled()) {
G1StringDedup::enqueue_from_evacuation(from_region->is_young(),
to_region->is_young(),
const bool is_from_young = state.is_young();
const bool is_to_young = dest_state.is_young();
assert(is_from_young == _g1h->heap_region_containing_raw(old)->is_young(),
"sanity");
assert(is_to_young == _g1h->heap_region_containing_raw(obj)->is_young(),
"sanity");
G1StringDedup::enqueue_from_evacuation(is_from_young,
is_to_young,
queue_num(),
obj);
}
size_t* surv_young_words = surviving_young_words();
size_t* const surv_young_words = surviving_young_words();
surv_young_words[young_index] += word_sz;
if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
@ -232,14 +287,13 @@ oop G1ParScanThreadState::copy_to_survivor_space(oop const old,
oop* old_p = set_partial_array_mask(old);
push_on_queue(old_p);
} else {
// No point in using the slower heap_region_containing() method,
// given that we know obj is in the heap.
_scanner.set_region(_g1h->heap_region_containing_raw(obj));
HeapRegion* const to_region = _g1h->heap_region_containing_raw(obj_ptr);
_scanner.set_region(to_region);
obj->oop_iterate_backwards(&_scanner);
}
return obj;
} else {
_g1_par_allocator->undo_allocation(alloc_purpose, obj_ptr, word_sz, context);
obj = forward_ptr;
_g1_par_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context);
return forward_ptr;
}
return obj;
}

View File

@ -46,14 +46,16 @@ class G1ParScanThreadState : public StackObj {
G1SATBCardTableModRefBS* _ct_bs;
G1RemSet* _g1_rem;
G1ParGCAllocator* _g1_par_allocator;
G1ParGCAllocator* _g1_par_allocator;
ageTable _age_table;
ageTable _age_table;
InCSetState _dest[InCSetState::Num];
// Local tenuring threshold.
uint _tenuring_threshold;
G1ParScanClosure _scanner;
G1ParScanClosure _scanner;
size_t _alloc_buffer_waste;
size_t _undo_waste;
size_t _alloc_buffer_waste;
size_t _undo_waste;
OopsInHeapRegionClosure* _evac_failure_cl;
@ -82,6 +84,14 @@ class G1ParScanThreadState : public StackObj {
DirtyCardQueue& dirty_card_queue() { return _dcq; }
G1SATBCardTableModRefBS* ctbs() { return _ct_bs; }
InCSetState dest(InCSetState original) const {
assert(original.is_valid(),
err_msg("Original state invalid: " CSETSTATE_FORMAT, original.value()));
assert(_dest[original.value()].is_valid_gen(),
err_msg("Dest state is invalid: " CSETSTATE_FORMAT, _dest[original.value()].value()));
return _dest[original.value()];
}
public:
G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
~G1ParScanThreadState();
@ -112,7 +122,6 @@ class G1ParScanThreadState : public StackObj {
}
}
}
public:
void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
_evac_failure_cl = evac_failure_cl;
@ -193,9 +202,20 @@ class G1ParScanThreadState : public StackObj {
template <class T> inline void deal_with_reference(T* ref_to_scan);
inline void dispatch_reference(StarTask ref);
// Tries to allocate word_sz in the PLAB of the next "generation" after trying to
// allocate into dest. State is the original (source) cset state for the object
// that is allocated for.
// Returns a non-NULL pointer if successful, and updates dest if required.
HeapWord* allocate_in_next_plab(InCSetState const state,
InCSetState* dest,
size_t word_sz,
AllocationContext_t const context);
inline InCSetState next_state(InCSetState const state, markOop const m, uint& age);
public:
oop copy_to_survivor_space(oop const obj, markOop const old_mark);
oop copy_to_survivor_space(InCSetState const state, oop const obj, markOop const old_mark);
void trim_queue();

View File

@ -38,21 +38,21 @@ template <class T> void G1ParScanThreadState::do_oop_evac(T* p, HeapRegion* from
// set, due to (benign) races in the claim mechanism during RSet scanning more
// than one thread might claim the same card. So the same card may be
// processed multiple times. So redo this check.
G1CollectedHeap::in_cset_state_t in_cset_state = _g1h->in_cset_state(obj);
if (in_cset_state == G1CollectedHeap::InCSet) {
const InCSetState in_cset_state = _g1h->in_cset_state(obj);
if (in_cset_state.is_in_cset()) {
oop forwardee;
markOop m = obj->mark();
if (m->is_marked()) {
forwardee = (oop) m->decode_pointer();
} else {
forwardee = copy_to_survivor_space(obj, m);
forwardee = copy_to_survivor_space(in_cset_state, obj, m);
}
oopDesc::encode_store_heap_oop(p, forwardee);
} else if (in_cset_state == G1CollectedHeap::IsHumongous) {
} else if (in_cset_state.is_humongous()) {
_g1h->set_humongous_is_live(obj);
} else {
assert(in_cset_state == G1CollectedHeap::InNeither,
err_msg("In_cset_state must be InNeither here, but is %d", in_cset_state));
assert(!in_cset_state.is_in_cset_or_humongous(),
err_msg("In_cset_state must be NotInCSet here, but is " CSETSTATE_FORMAT, in_cset_state.value()));
}
assert(obj != NULL, "Must be");

View File

@ -80,7 +80,7 @@ G1RemSet::G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
_prev_period_summary()
{
_seq_task = new SubTasksDone(NumSeqTasks);
_cset_rs_update_cl = NEW_C_HEAP_ARRAY(OopsInHeapRegionClosure*, n_workers(), mtGC);
_cset_rs_update_cl = NEW_C_HEAP_ARRAY(G1ParPushHeapRSClosure*, n_workers(), mtGC);
for (uint i = 0; i < n_workers(); i++) {
_cset_rs_update_cl[i] = NULL;
}
@ -94,14 +94,14 @@ G1RemSet::~G1RemSet() {
for (uint i = 0; i < n_workers(); i++) {
assert(_cset_rs_update_cl[i] == NULL, "it should be");
}
FREE_C_HEAP_ARRAY(OopsInHeapRegionClosure*, _cset_rs_update_cl);
FREE_C_HEAP_ARRAY(G1ParPushHeapRSClosure*, _cset_rs_update_cl);
}
class ScanRSClosure : public HeapRegionClosure {
size_t _cards_done, _cards;
G1CollectedHeap* _g1h;
OopsInHeapRegionClosure* _oc;
G1ParPushHeapRSClosure* _oc;
CodeBlobClosure* _code_root_cl;
G1BlockOffsetSharedArray* _bot_shared;
@ -113,7 +113,7 @@ class ScanRSClosure : public HeapRegionClosure {
bool _try_claimed;
public:
ScanRSClosure(OopsInHeapRegionClosure* oc,
ScanRSClosure(G1ParPushHeapRSClosure* oc,
CodeBlobClosure* code_root_cl,
uint worker_i) :
_oc(oc),
@ -135,8 +135,7 @@ public:
void scanCard(size_t index, HeapRegion *r) {
// Stack allocate the DirtyCardToOopClosure instance
HeapRegionDCTOC cl(_g1h, r, _oc,
CardTableModRefBS::Precise,
HeapRegionDCTOC::IntoCSFilterKind);
CardTableModRefBS::Precise);
// Set the "from" region in the closure.
_oc->set_region(r);
@ -231,7 +230,7 @@ public:
size_t cards_looked_up() { return _cards;}
};
void G1RemSet::scanRS(OopsInHeapRegionClosure* oc,
void G1RemSet::scanRS(G1ParPushHeapRSClosure* oc,
CodeBlobClosure* code_root_cl,
uint worker_i) {
double rs_time_start = os::elapsedTime();
@ -301,7 +300,7 @@ void G1RemSet::cleanupHRRS() {
HeapRegionRemSet::cleanup();
}
void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
void G1RemSet::oops_into_collection_set_do(G1ParPushHeapRSClosure* oc,
CodeBlobClosure* code_root_cl,
uint worker_i) {
#if CARD_REPEAT_HISTO
@ -417,7 +416,7 @@ G1Mux2Closure::G1Mux2Closure(OopClosure *c1, OopClosure *c2) :
G1UpdateRSOrPushRefOopClosure::
G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
G1RemSet* rs,
OopsInHeapRegionClosure* push_ref_cl,
G1ParPushHeapRSClosure* push_ref_cl,
bool record_refs_into_cset,
uint worker_i) :
_g1(g1h), _g1_rem_set(rs), _from(NULL),
@ -518,7 +517,7 @@ bool G1RemSet::refine_card(jbyte* card_ptr, uint worker_i,
ct_freq_note_card(_ct_bs->index_for(start));
#endif
OopsInHeapRegionClosure* oops_in_heap_closure = NULL;
G1ParPushHeapRSClosure* oops_in_heap_closure = NULL;
if (check_for_refs_into_cset) {
// ConcurrentG1RefineThreads have worker numbers larger than what
// _cset_rs_update_cl[] is set up to handle. But those threads should

View File

@ -33,6 +33,7 @@
class G1CollectedHeap;
class CardTableModRefBarrierSet;
class ConcurrentG1Refine;
class G1ParPushHeapRSClosure;
// A G1RemSet in which each heap region has a rem set that records the
// external heap references into it. Uses a mod ref bs to track updates,
@ -68,7 +69,7 @@ protected:
// Used for caching the closure that is responsible for scanning
// references into the collection set.
OopsInHeapRegionClosure** _cset_rs_update_cl;
G1ParPushHeapRSClosure** _cset_rs_update_cl;
// Print the given summary info
virtual void print_summary_info(G1RemSetSummary * summary, const char * header = NULL);
@ -95,7 +96,7 @@ public:
// partitioning the work to be done. It should be the same as
// the "i" passed to the calling thread's work(i) function.
// In the sequential case this param will be ignored.
void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
void oops_into_collection_set_do(G1ParPushHeapRSClosure* blk,
CodeBlobClosure* code_root_cl,
uint worker_i);
@ -107,7 +108,7 @@ public:
void prepare_for_oops_into_collection_set_do();
void cleanup_after_oops_into_collection_set_do();
void scanRS(OopsInHeapRegionClosure* oc,
void scanRS(G1ParPushHeapRSClosure* oc,
CodeBlobClosure* code_root_cl,
uint worker_i);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -33,7 +33,7 @@
#include "runtime/thread.inline.hpp"
G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(MemRegion whole_heap) :
CardTableModRefBSForCTRS(whole_heap)
CardTableModRefBS(whole_heap)
{
_kind = G1SATBCT;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -37,7 +37,7 @@ class G1SATBCardTableLoggingModRefBS;
// This barrier is specialized to use a logging barrier to support
// snapshot-at-the-beginning marking.
class G1SATBCardTableModRefBS: public CardTableModRefBSForCTRS {
class G1SATBCardTableModRefBS: public CardTableModRefBS {
protected:
enum G1CardValues {
g1_young_gen = CT_MR_BS_last_reserved << 1

View File

@ -48,93 +48,55 @@ size_t HeapRegion::GrainWords = 0;
size_t HeapRegion::CardsPerRegion = 0;
HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
HeapRegion* hr, ExtendedOopClosure* cl,
CardTableModRefBS::PrecisionStyle precision,
FilterKind fk) :
HeapRegion* hr,
G1ParPushHeapRSClosure* cl,
CardTableModRefBS::PrecisionStyle precision) :
DirtyCardToOopClosure(hr, cl, precision, NULL),
_hr(hr), _fk(fk), _g1(g1) { }
_hr(hr), _rs_scan(cl), _g1(g1) { }
FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
OopClosure* oc) :
_r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { }
template<class ClosureType>
HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h,
HeapRegion* hr,
HeapWord* cur, HeapWord* top) {
oop cur_oop = oop(cur);
size_t oop_size = hr->block_size(cur);
HeapWord* next_obj = cur + oop_size;
while (next_obj < top) {
// Keep filtering the remembered set.
if (!g1h->is_obj_dead(cur_oop, hr)) {
// Bottom lies entirely below top, so we can call the
// non-memRegion version of oop_iterate below.
cur_oop->oop_iterate(cl);
}
cur = next_obj;
cur_oop = oop(cur);
oop_size = hr->block_size(cur);
next_obj = cur + oop_size;
}
return cur;
}
void HeapRegionDCTOC::walk_mem_region(MemRegion mr,
HeapWord* bottom,
HeapWord* top) {
G1CollectedHeap* g1h = _g1;
size_t oop_size;
ExtendedOopClosure* cl2 = NULL;
FilterIntoCSClosure intoCSFilt(this, g1h, _cl);
FilterOutOfRegionClosure outOfRegionFilt(_hr, _cl);
switch (_fk) {
case NoFilterKind: cl2 = _cl; break;
case IntoCSFilterKind: cl2 = &intoCSFilt; break;
case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break;
default: ShouldNotReachHere();
}
HeapWord* cur = bottom;
// Start filtering what we add to the remembered set. If the object is
// not considered dead, either because it is marked (in the mark bitmap)
// or it was allocated after marking finished, then we add it. Otherwise
// we can safely ignore the object.
if (!g1h->is_obj_dead(oop(bottom), _hr)) {
oop_size = oop(bottom)->oop_iterate(cl2, mr);
if (!g1h->is_obj_dead(oop(cur), _hr)) {
oop_size = oop(cur)->oop_iterate(_rs_scan, mr);
} else {
oop_size = _hr->block_size(bottom);
oop_size = _hr->block_size(cur);
}
bottom += oop_size;
cur += oop_size;
if (bottom < top) {
// We replicate the loop below for several kinds of possible filters.
switch (_fk) {
case NoFilterKind:
bottom = walk_mem_region_loop(_cl, g1h, _hr, bottom, top);
break;
case IntoCSFilterKind: {
FilterIntoCSClosure filt(this, g1h, _cl);
bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
break;
}
case OutOfRegionFilterKind: {
FilterOutOfRegionClosure filt(_hr, _cl);
bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
break;
}
default:
ShouldNotReachHere();
if (cur < top) {
oop cur_oop = oop(cur);
oop_size = _hr->block_size(cur);
HeapWord* next_obj = cur + oop_size;
while (next_obj < top) {
// Keep filtering the remembered set.
if (!g1h->is_obj_dead(cur_oop, _hr)) {
// Bottom lies entirely below top, so we can call the
// non-memRegion version of oop_iterate below.
cur_oop->oop_iterate(_rs_scan);
}
cur = next_obj;
cur_oop = oop(cur);
oop_size = _hr->block_size(cur);
next_obj = cur + oop_size;
}
// Last object. Need to do dead-obj filtering here too.
if (!g1h->is_obj_dead(oop(bottom), _hr)) {
oop(bottom)->oop_iterate(cl2, mr);
if (!g1h->is_obj_dead(oop(cur), _hr)) {
oop(cur)->oop_iterate(_rs_scan, mr);
}
}
}

View File

@ -67,17 +67,9 @@ class nmethod;
// sets.
class HeapRegionDCTOC : public DirtyCardToOopClosure {
public:
// Specification of possible DirtyCardToOopClosure filtering.
enum FilterKind {
NoFilterKind,
IntoCSFilterKind,
OutOfRegionFilterKind
};
protected:
private:
HeapRegion* _hr;
FilterKind _fk;
G1ParPushHeapRSClosure* _rs_scan;
G1CollectedHeap* _g1;
// Walk the given memory region from bottom to (actual) top
@ -90,9 +82,9 @@ protected:
public:
HeapRegionDCTOC(G1CollectedHeap* g1,
HeapRegion* hr, ExtendedOopClosure* cl,
CardTableModRefBS::PrecisionStyle precision,
FilterKind fk);
HeapRegion* hr,
G1ParPushHeapRSClosure* cl,
CardTableModRefBS::PrecisionStyle precision);
};
// The complicating factor is that BlockOffsetTable diverged

View File

@ -407,20 +407,8 @@ void FromCardCache::clear(uint region_idx) {
}
}
void OtherRegionsTable::initialize(uint max_regions) {
FromCardCache::initialize(HeapRegionRemSet::num_par_rem_sets(), max_regions);
}
void OtherRegionsTable::invalidate(uint start_idx, size_t num_regions) {
FromCardCache::invalidate(start_idx, num_regions);
}
void OtherRegionsTable::print_from_card_cache() {
FromCardCache::print();
}
void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) {
uint cur_hrm_ind = hr()->hrm_index();
uint cur_hrm_ind = _hr->hrm_index();
if (G1TraceHeapRegionRememberedSet) {
gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").",
@ -434,7 +422,7 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) {
if (G1TraceHeapRegionRememberedSet) {
gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = %d)",
hr()->bottom(), from_card,
_hr->bottom(), from_card,
FromCardCache::at(tid, cur_hrm_ind));
}
@ -477,13 +465,13 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) {
if (G1HRRSUseSparseTable &&
_sparse_table.add_card(from_hrm_ind, card_index)) {
if (G1RecordHRRSOops) {
HeapRegionRemSet::record(hr(), from);
HeapRegionRemSet::record(_hr, from);
if (G1TraceHeapRegionRememberedSet) {
gclog_or_tty->print(" Added card " PTR_FORMAT " to region "
"[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n",
align_size_down(uintptr_t(from),
CardTableModRefBS::card_size),
hr()->bottom(), from);
_hr->bottom(), from);
}
}
if (G1TraceHeapRegionRememberedSet) {
@ -539,13 +527,13 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) {
prt->add_reference(from);
if (G1RecordHRRSOops) {
HeapRegionRemSet::record(hr(), from);
HeapRegionRemSet::record(_hr, from);
if (G1TraceHeapRegionRememberedSet) {
gclog_or_tty->print("Added card " PTR_FORMAT " to region "
"[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n",
align_size_down(uintptr_t(from),
CardTableModRefBS::card_size),
hr()->bottom(), from);
_hr->bottom(), from);
}
}
assert(contains_reference(from), "We just added it!");
@ -614,7 +602,7 @@ PerRegionTable* OtherRegionsTable::delete_region_table() {
if (G1TraceHeapRegionRememberedSet) {
gclog_or_tty->print("Coarsened entry in region [" PTR_FORMAT "...] "
"for region [" PTR_FORMAT "...] (" SIZE_FORMAT " coarse entries).\n",
hr()->bottom(),
_hr->bottom(),
max->hr()->bottom(),
_n_coarse_entries);
}
@ -627,13 +615,11 @@ PerRegionTable* OtherRegionsTable::delete_region_table() {
return max;
}
// At present, this must be called stop-world single-threaded.
void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
BitMap* region_bm, BitMap* card_bm) {
// First eliminated garbage regions from the coarse map.
if (G1RSScrubVerbose) {
gclog_or_tty->print_cr("Scrubbing region %u:", hr()->hrm_index());
gclog_or_tty->print_cr("Scrubbing region %u:", _hr->hrm_index());
}
assert(_coarse_map.size() == region_bm->size(), "Precondition");
@ -752,7 +738,7 @@ size_t OtherRegionsTable::fl_mem_size() {
}
void OtherRegionsTable::clear_fcc() {
FromCardCache::clear(hr()->hrm_index());
FromCardCache::clear(_hr->hrm_index());
}
void OtherRegionsTable::clear() {
@ -774,27 +760,6 @@ void OtherRegionsTable::clear() {
clear_fcc();
}
bool OtherRegionsTable::del_single_region_table(size_t ind,
HeapRegion* hr) {
assert(0 <= ind && ind < _max_fine_entries, "Preconditions.");
PerRegionTable** prev_addr = &_fine_grain_regions[ind];
PerRegionTable* prt = *prev_addr;
while (prt != NULL && prt->hr() != hr) {
prev_addr = prt->collision_list_next_addr();
prt = prt->collision_list_next();
}
if (prt != NULL) {
assert(prt->hr() == hr, "Loop postcondition.");
*prev_addr = prt->collision_list_next();
unlink_from_all(prt);
PerRegionTable::free(prt);
_n_fine_entries--;
return true;
} else {
return false;
}
}
bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const {
// Cast away const in this case.
MutexLockerEx x((Mutex*)_m, Mutex::_no_safepoint_check_flag);
@ -840,7 +805,7 @@ uint HeapRegionRemSet::num_par_rem_sets() {
HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
HeapRegion* hr)
: _bosa(bosa),
_m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrm_index()), true),
_m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrm_index()), true, Monitor::_safepoint_check_never),
_code_roots(), _other_regions(hr, &_m), _iter_state(Unclaimed), _iter_claimed(0) {
reset_for_par_iteration();
}
@ -975,7 +940,7 @@ HeapRegionRemSetIterator:: HeapRegionRemSetIterator(HeapRegionRemSet* hrrs) :
_hrrs(hrrs),
_g1h(G1CollectedHeap::heap()),
_coarse_map(&hrrs->_other_regions._coarse_map),
_bosa(hrrs->bosa()),
_bosa(hrrs->_bosa),
_is(Sparse),
// Set these values so that we increment to the first region.
_coarse_cur_region_index(-1),

View File

@ -162,32 +162,36 @@ class OtherRegionsTable VALUE_OBJ_CLASS_SPEC {
// to hold _m, and the fine-grain table to be full.
PerRegionTable* delete_region_table();
// If a PRT for "hr" is in the bucket list indicated by "ind" (which must
// be the correct index for "hr"), delete it and return true; else return
// false.
bool del_single_region_table(size_t ind, HeapRegion* hr);
// link/add the given fine grain remembered set into the "all" list
void link_to_all(PerRegionTable * prt);
// unlink/remove the given fine grain remembered set into the "all" list
void unlink_from_all(PerRegionTable * prt);
public:
OtherRegionsTable(HeapRegion* hr, Mutex* m);
bool contains_reference_locked(OopOrNarrowOopStar from) const;
HeapRegion* hr() const { return _hr; }
// Clear the from_card_cache entries for this region.
void clear_fcc();
public:
// Create a new remembered set for the given heap region. The given mutex should
// be used to ensure consistency.
OtherRegionsTable(HeapRegion* hr, Mutex* m);
// For now. Could "expand" some tables in the future, so that this made
// sense.
void add_reference(OopOrNarrowOopStar from, uint tid);
// Returns whether the remembered set contains the given reference.
bool contains_reference(OopOrNarrowOopStar from) const;
// Removes any entries shown by the given bitmaps to contain only dead
// objects.
// objects. Not thread safe.
// Set bits in the bitmaps indicate that the given region or card is live.
void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm);
// Returns whether this remembered set (and all sub-sets) contain no entries.
// Returns whether this remembered set (and all sub-sets) does not contain any entry.
bool is_empty() const;
// Returns the number of cards contained in this remembered set.
size_t occupied() const;
size_t occ_fine() const;
size_t occ_coarse() const;
@ -195,31 +199,17 @@ public:
static jint n_coarsenings() { return _n_coarsenings; }
// Returns size in bytes.
// Not const because it takes a lock.
// Returns size of the actual remembered set containers in bytes.
size_t mem_size() const;
// Returns the size of static data in bytes.
static size_t static_mem_size();
// Returns the size of the free list content in bytes.
static size_t fl_mem_size();
bool contains_reference(OopOrNarrowOopStar from) const;
bool contains_reference_locked(OopOrNarrowOopStar from) const;
// Clear the entire contents of this remembered set.
void clear();
// Specifically clear the from_card_cache.
void clear_fcc();
void do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task);
// Declare the heap size (in # of regions) to the OtherRegionsTable.
// (Uses it to initialize from_card_cache).
static void initialize(uint max_regions);
// Declares that regions between start_idx <= i < start_idx + num_regions are
// not in use. Make sure that any entries for these regions are invalid.
static void invalidate(uint start_idx, size_t num_regions);
static void print_from_card_cache();
};
class HeapRegionRemSet : public CHeapObj<mtGC> {
@ -233,7 +223,6 @@ public:
private:
G1BlockOffsetSharedArray* _bosa;
G1BlockOffsetSharedArray* bosa() const { return _bosa; }
// A set of code blobs (nmethods) whose code contains pointers into
// the region that owns this RSet.
@ -268,10 +257,6 @@ public:
static uint num_par_rem_sets();
static void setup_remset_size();
HeapRegion* hr() const {
return _other_regions.hr();
}
bool is_empty() const {
return (strong_code_roots_list_length() == 0) && _other_regions.is_empty();
}
@ -305,8 +290,9 @@ public:
_other_regions.add_reference(from, tid);
}
// Removes any entries shown by the given bitmaps to contain only dead
// objects.
// Removes any entries in the remembered set shown by the given bitmaps to
// contain only dead objects. Not thread safe.
// One bits in the bitmaps indicate that the given region or card is live.
void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm);
// The region is being reclaimed; clear its remset, and any mention of
@ -397,16 +383,16 @@ public:
// Declare the heap size (in # of regions) to the HeapRegionRemSet(s).
// (Uses it to initialize from_card_cache).
static void init_heap(uint max_regions) {
OtherRegionsTable::initialize(max_regions);
FromCardCache::initialize(num_par_rem_sets(), max_regions);
}
static void invalidate(uint start_idx, uint num_regions) {
OtherRegionsTable::invalidate(start_idx, num_regions);
static void invalidate_from_card_cache(uint start_idx, size_t num_regions) {
FromCardCache::invalidate(start_idx, num_regions);
}
#ifndef PRODUCT
static void print_from_card_cache() {
OtherRegionsTable::print_from_card_cache();
FromCardCache::print();
}
#endif

View File

@ -400,7 +400,8 @@ void GCTaskManager::initialize() {
assert(workers() != 0, "no workers");
_monitor = new Monitor(Mutex::barrier, // rank
"GCTaskManager monitor", // name
Mutex::_allow_vm_block_flag); // allow_vm_block
Mutex::_allow_vm_block_flag, // allow_vm_block
Monitor::_safepoint_check_never);
// The queue for the GCTaskManager must be a CHeapObj.
GCTaskQueue* unsynchronized_queue = GCTaskQueue::create_on_c_heap();
_queue = SynchronizedGCTaskQueue::create(unsynchronized_queue, lock());
@ -1125,7 +1126,8 @@ Monitor* MonitorSupply::reserve() {
} else {
result = new Monitor(Mutex::barrier, // rank
"MonitorSupply monitor", // name
Mutex::_allow_vm_block_flag); // allow_vm_block
Mutex::_allow_vm_block_flag, // allow_vm_block
Monitor::_safepoint_check_never);
}
guarantee(result != NULL, "shouldn't return NULL");
assert(!result->is_locked(), "shouldn't be locked");

View File

@ -195,7 +195,7 @@ bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
COMPILER2_PRESENT(DerivedPointerTable::clear());
ref_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
ref_processor()->enable_discovery();
ref_processor()->setup_policy(clear_all_softrefs);
mark_sweep_phase1(clear_all_softrefs);

View File

@ -41,31 +41,24 @@ class PSMarkSweep : public MarkSweep {
// Closure accessors
static OopClosure* mark_and_push_closure() { return &MarkSweep::mark_and_push_closure; }
static VoidClosure* follow_stack_closure() { return (VoidClosure*)&MarkSweep::follow_stack_closure; }
static VoidClosure* follow_stack_closure() { return &MarkSweep::follow_stack_closure; }
static CLDClosure* follow_cld_closure() { return &MarkSweep::follow_cld_closure; }
static OopClosure* adjust_pointer_closure() { return (OopClosure*)&MarkSweep::adjust_pointer_closure; }
static OopClosure* adjust_pointer_closure() { return &MarkSweep::adjust_pointer_closure; }
static CLDClosure* adjust_cld_closure() { return &MarkSweep::adjust_cld_closure; }
static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&MarkSweep::is_alive; }
static BoolObjectClosure* is_alive_closure() { return &MarkSweep::is_alive; }
debug_only(public:) // Used for PSParallelCompact debugging
// Mark live objects
static void mark_sweep_phase1(bool clear_all_softrefs);
// Calculate new addresses
static void mark_sweep_phase2();
debug_only(private:) // End used for PSParallelCompact debugging
// Update pointers
static void mark_sweep_phase3();
// Move objects to new positions
static void mark_sweep_phase4();
debug_only(public:) // Used for PSParallelCompact debugging
// Temporary data structures for traversal and storing/restoring marks
static void allocate_stacks();
static void deallocate_stacks();
static void set_ref_processor(ReferenceProcessor* rp) { // delete this method
_ref_processor = rp;
}
debug_only(private:) // End used for PSParallelCompact debugging
// If objects are left in eden after a collection, try to move the boundary
// and absorb them into the old gen. Returns true if eden was emptied.

View File

@ -2069,7 +2069,7 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
COMPILER2_PRESENT(DerivedPointerTable::clear());
ref_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
ref_processor()->enable_discovery();
ref_processor()->setup_policy(maximum_heap_compaction);
bool marked_for_unloading = false;

View File

@ -147,6 +147,10 @@ class PSPromotionManager VALUE_OBJ_CLASS_SPEC {
claimed_stack_depth()->push(p);
}
inline void promotion_trace_event(oop new_obj, oop old_obj, size_t obj_size,
uint age, bool tenured,
const PSPromotionLAB* lab);
protected:
static OopStarTaskQueueSet* stack_array_depth() { return _stack_array_depth; }
public:

View File

@ -64,6 +64,33 @@ inline void PSPromotionManager::claim_or_forward_depth(T* p) {
claim_or_forward_internal_depth(p);
}
inline void PSPromotionManager::promotion_trace_event(oop new_obj, oop old_obj,
size_t obj_size,
uint age, bool tenured,
const PSPromotionLAB* lab) {
// Skip if memory allocation failed
if (new_obj != NULL) {
const ParallelScavengeTracer* gc_tracer = PSScavenge::gc_tracer();
if (lab != NULL) {
// Promotion of object through newly allocated PLAB
if (gc_tracer->should_report_promotion_in_new_plab_event()) {
size_t obj_bytes = obj_size * HeapWordSize;
size_t lab_size = lab->capacity();
gc_tracer->report_promotion_in_new_plab_event(old_obj->klass(), obj_bytes,
age, tenured, lab_size);
}
} else {
// Promotion of object directly to heap
if (gc_tracer->should_report_promotion_outside_plab_event()) {
size_t obj_bytes = obj_size * HeapWordSize;
gc_tracer->report_promotion_outside_plab_event(old_obj->klass(), obj_bytes,
age, tenured);
}
}
}
}
//
// This method is pretty bulky. It would be nice to split it up
// into smaller submethods, but we need to be careful not to hurt
@ -85,11 +112,11 @@ oop PSPromotionManager::copy_to_survivor_space(oop o) {
bool new_obj_is_tenured = false;
size_t new_obj_size = o->size();
if (!promote_immediately) {
// Find the objects age, MT safe.
uint age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
test_mark->displaced_mark_helper()->age() : test_mark->age();
// Find the objects age, MT safe.
uint age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
test_mark->displaced_mark_helper()->age() : test_mark->age();
if (!promote_immediately) {
// Try allocating obj in to-space (unless too old)
if (age < PSScavenge::tenuring_threshold()) {
new_obj = (oop) _young_lab.allocate(new_obj_size);
@ -98,6 +125,7 @@ oop PSPromotionManager::copy_to_survivor_space(oop o) {
if (new_obj_size > (YoungPLABSize / 2)) {
// Allocate this object directly
new_obj = (oop)young_space()->cas_allocate(new_obj_size);
promotion_trace_event(new_obj, o, new_obj_size, age, false, NULL);
} else {
// Flush and fill
_young_lab.flush();
@ -107,6 +135,7 @@ oop PSPromotionManager::copy_to_survivor_space(oop o) {
_young_lab.initialize(MemRegion(lab_base, YoungPLABSize));
// Try the young lab allocation again.
new_obj = (oop) _young_lab.allocate(new_obj_size);
promotion_trace_event(new_obj, o, new_obj_size, age, false, &_young_lab);
} else {
_young_gen_is_full = true;
}
@ -132,6 +161,7 @@ oop PSPromotionManager::copy_to_survivor_space(oop o) {
if (new_obj_size > (OldPLABSize / 2)) {
// Allocate this object directly
new_obj = (oop)old_gen()->cas_allocate(new_obj_size);
promotion_trace_event(new_obj, o, new_obj_size, age, true, NULL);
} else {
// Flush and fill
_old_lab.flush();
@ -148,6 +178,7 @@ oop PSPromotionManager::copy_to_survivor_space(oop o) {
_old_lab.initialize(MemRegion(lab_base, OldPLABSize));
// Try the old lab allocation again.
new_obj = (oop) _old_lab.allocate(new_obj_size);
promotion_trace_event(new_obj, o, new_obj_size, age, true, &_old_lab);
}
}
}

View File

@ -362,7 +362,7 @@ bool PSScavenge::invoke_no_policy() {
COMPILER2_PRESENT(DerivedPointerTable::clear());
reference_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
reference_processor()->enable_discovery();
reference_processor()->setup_policy(false);
// We track how much was promoted to the next generation for

View File

@ -92,6 +92,7 @@ class PSScavenge: AllStatic {
// Private accessors
static CardTableExtension* const card_table() { assert(_card_table != NULL, "Sanity"); return _card_table; }
static const ParallelScavengeTracer* gc_tracer() { return &_gc_tracer; }
public:
// Accessors

View File

@ -88,7 +88,8 @@ static void _sltLoop(JavaThread* thread, TRAPS) {
SurrogateLockerThread::SurrogateLockerThread() :
JavaThread(&_sltLoop),
_monitor(Mutex::nonleaf, "SLTMonitor"),
_monitor(Mutex::nonleaf, "SLTMonitor", false,
Monitor::_safepoint_check_sometimes),
_buffer(empty)
{}

View File

@ -172,6 +172,27 @@ void YoungGCTracer::report_tenuring_threshold(const uint tenuring_threshold) {
_tenuring_threshold = tenuring_threshold;
}
bool YoungGCTracer::should_report_promotion_in_new_plab_event() const {
return should_send_promotion_in_new_plab_event();
}
bool YoungGCTracer::should_report_promotion_outside_plab_event() const {
return should_send_promotion_outside_plab_event();
}
void YoungGCTracer::report_promotion_in_new_plab_event(Klass* klass, size_t obj_size,
uint age, bool tenured,
size_t plab_size) const {
assert_set_gc_id();
send_promotion_in_new_plab_event(klass, obj_size, age, tenured, plab_size);
}
void YoungGCTracer::report_promotion_outside_plab_event(Klass* klass, size_t obj_size,
uint age, bool tenured) const {
assert_set_gc_id();
send_promotion_outside_plab_event(klass, obj_size, age, tenured);
}
void OldGCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) {
assert_set_gc_id();

View File

@ -156,9 +156,38 @@ class YoungGCTracer : public GCTracer {
void report_promotion_failed(const PromotionFailedInfo& pf_info);
void report_tenuring_threshold(const uint tenuring_threshold);
/*
* Methods for reporting Promotion in new or outside PLAB Events.
*
* The object age is always required as it is not certain that the mark word
* of the oop can be trusted at this stage.
*
* obj_size is the size of the promoted object in bytes.
*
* tenured should be true if the object has been promoted to the old
* space during this GC, if the object is copied to survivor space
* from young space or survivor space (aging) tenured should be false.
*
* plab_size is the size of the newly allocated PLAB in bytes.
*/
bool should_report_promotion_in_new_plab_event() const;
bool should_report_promotion_outside_plab_event() const;
void report_promotion_in_new_plab_event(Klass* klass, size_t obj_size,
uint age, bool tenured,
size_t plab_size) const;
void report_promotion_outside_plab_event(Klass* klass, size_t obj_size,
uint age, bool tenured) const;
private:
void send_young_gc_event() const;
void send_promotion_failed_event(const PromotionFailedInfo& pf_info) const;
bool should_send_promotion_in_new_plab_event() const;
bool should_send_promotion_outside_plab_event() const;
void send_promotion_in_new_plab_event(Klass* klass, size_t obj_size,
uint age, bool tenured,
size_t plab_size) const;
void send_promotion_outside_plab_event(Klass* klass, size_t obj_size,
uint age, bool tenured) const;
};
class OldGCTracer : public GCTracer {

View File

@ -111,6 +111,44 @@ void YoungGCTracer::send_young_gc_event() const {
}
}
bool YoungGCTracer::should_send_promotion_in_new_plab_event() const {
return EventPromoteObjectInNewPLAB::is_enabled();
}
bool YoungGCTracer::should_send_promotion_outside_plab_event() const {
return EventPromoteObjectOutsidePLAB::is_enabled();
}
void YoungGCTracer::send_promotion_in_new_plab_event(Klass* klass, size_t obj_size,
uint age, bool tenured,
size_t plab_size) const {
EventPromoteObjectInNewPLAB event;
if (event.should_commit()) {
event.set_gcId(_shared_gc_info.gc_id().id());
event.set_class(klass);
event.set_objectSize(obj_size);
event.set_tenured(tenured);
event.set_tenuringAge(age);
event.set_plabSize(plab_size);
event.commit();
}
}
void YoungGCTracer::send_promotion_outside_plab_event(Klass* klass, size_t obj_size,
uint age, bool tenured) const {
EventPromoteObjectOutsidePLAB event;
if (event.should_commit()) {
event.set_gcId(_shared_gc_info.gc_id().id());
event.set_class(klass);
event.set_objectSize(obj_size);
event.set_tenured(tenured);
event.set_tenuringAge(age);
event.commit();
}
}
void OldGCTracer::send_old_gc_event() const {
EventGCOldGarbageCollection e(UNTIMED);
if (e.should_commit()) {

View File

@ -63,9 +63,7 @@ void MutableSpace::numa_setup_pages(MemRegion mr, bool clear_space) {
}
void MutableSpace::pretouch_pages(MemRegion mr) {
for (volatile char *p = (char*)mr.start(); p < (char*)mr.end(); p += os::vm_page_size()) {
char t = *p; *p = t;
}
os::pretouch_memory((char*)mr.start(), (char*)mr.end());
}
void MutableSpace::initialize(MemRegion mr,

View File

@ -385,6 +385,22 @@ IRT_ENTRY(address, InterpreterRuntime::exception_handler_for_exception(JavaThrea
int handler_bci;
int current_bci = bci(thread);
if (thread->frames_to_pop_failed_realloc() > 0) {
// Allocation of scalar replaced object used in this frame
// failed. Unconditionally pop the frame.
thread->dec_frames_to_pop_failed_realloc();
thread->set_vm_result(h_exception());
// If the method is synchronized we already unlocked the monitor
// during deoptimization so the interpreter needs to skip it when
// the frame is popped.
thread->set_do_not_unlock_if_synchronized(true);
#ifdef CC_INTERP
return (address) -1;
#else
return Interpreter::remove_activation_entry();
#endif
}
// Need to do this check first since when _do_not_unlock_if_synchronized
// is set, we don't want to trigger any classloading which may make calls
// into java, or surprisingly find a matching exception handler for bci 0

View File

@ -0,0 +1,360 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "memory/blockOffsetTable.inline.hpp"
#include "memory/cardGeneration.inline.hpp"
#include "memory/gcLocker.hpp"
#include "memory/generationSpec.hpp"
#include "memory/genOopClosures.inline.hpp"
#include "memory/genRemSet.hpp"
#include "memory/iterator.hpp"
#include "memory/memRegion.hpp"
#include "memory/space.inline.hpp"
#include "runtime/java.hpp"
CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size,
int level,
GenRemSet* remset) :
Generation(rs, initial_byte_size, level), _rs(remset),
_shrink_factor(0), _min_heap_delta_bytes(), _capacity_at_prologue(),
_used_at_prologue()
{
HeapWord* start = (HeapWord*)rs.base();
size_t reserved_byte_size = rs.size();
assert((uintptr_t(start) & 3) == 0, "bad alignment");
assert((reserved_byte_size & 3) == 0, "bad alignment");
MemRegion reserved_mr(start, heap_word_size(reserved_byte_size));
_bts = new BlockOffsetSharedArray(reserved_mr,
heap_word_size(initial_byte_size));
MemRegion committed_mr(start, heap_word_size(initial_byte_size));
_rs->resize_covered_region(committed_mr);
if (_bts == NULL) {
vm_exit_during_initialization("Could not allocate a BlockOffsetArray");
}
// Verify that the start and end of this generation is the start of a card.
// If this wasn't true, a single card could span more than on generation,
// which would cause problems when we commit/uncommit memory, and when we
// clear and dirty cards.
guarantee(_rs->is_aligned(reserved_mr.start()), "generation must be card aligned");
if (reserved_mr.end() != Universe::heap()->reserved_region().end()) {
// Don't check at the very end of the heap as we'll assert that we're probing off
// the end if we try.
guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned");
}
_min_heap_delta_bytes = MinHeapDeltaBytes;
_capacity_at_prologue = initial_byte_size;
_used_at_prologue = 0;
}
bool CardGeneration::grow_by(size_t bytes) {
assert_correct_size_change_locking();
bool result = _virtual_space.expand_by(bytes);
if (result) {
size_t new_word_size =
heap_word_size(_virtual_space.committed_size());
MemRegion mr(space()->bottom(), new_word_size);
// Expand card table
Universe::heap()->barrier_set()->resize_covered_region(mr);
// Expand shared block offset array
_bts->resize(new_word_size);
// Fix for bug #4668531
if (ZapUnusedHeapArea) {
MemRegion mangle_region(space()->end(),
(HeapWord*)_virtual_space.high());
SpaceMangler::mangle_region(mangle_region);
}
// Expand space -- also expands space's BOT
// (which uses (part of) shared array above)
space()->set_end((HeapWord*)_virtual_space.high());
// update the space and generation capacity counters
update_counters();
if (Verbose && PrintGC) {
size_t new_mem_size = _virtual_space.committed_size();
size_t old_mem_size = new_mem_size - bytes;
gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by "
SIZE_FORMAT "K to " SIZE_FORMAT "K",
name(), old_mem_size/K, bytes/K, new_mem_size/K);
}
}
return result;
}
bool CardGeneration::expand(size_t bytes, size_t expand_bytes) {
assert_locked_or_safepoint(Heap_lock);
if (bytes == 0) {
return true; // That's what grow_by(0) would return
}
size_t aligned_bytes = ReservedSpace::page_align_size_up(bytes);
if (aligned_bytes == 0){
// The alignment caused the number of bytes to wrap. An expand_by(0) will
// return true with the implication that an expansion was done when it
// was not. A call to expand implies a best effort to expand by "bytes"
// but not a guarantee. Align down to give a best effort. This is likely
// the most that the generation can expand since it has some capacity to
// start with.
aligned_bytes = ReservedSpace::page_align_size_down(bytes);
}
size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
bool success = false;
if (aligned_expand_bytes > aligned_bytes) {
success = grow_by(aligned_expand_bytes);
}
if (!success) {
success = grow_by(aligned_bytes);
}
if (!success) {
success = grow_to_reserved();
}
if (PrintGC && Verbose) {
if (success && GC_locker::is_active_and_needs_gc()) {
gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
}
}
return success;
}
bool CardGeneration::grow_to_reserved() {
assert_correct_size_change_locking();
bool success = true;
const size_t remaining_bytes = _virtual_space.uncommitted_size();
if (remaining_bytes > 0) {
success = grow_by(remaining_bytes);
DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
}
return success;
}
void CardGeneration::shrink(size_t bytes) {
assert_correct_size_change_locking();
size_t size = ReservedSpace::page_align_size_down(bytes);
if (size == 0) {
return;
}
// Shrink committed space
_virtual_space.shrink_by(size);
// Shrink space; this also shrinks the space's BOT
space()->set_end((HeapWord*) _virtual_space.high());
size_t new_word_size = heap_word_size(space()->capacity());
// Shrink the shared block offset array
_bts->resize(new_word_size);
MemRegion mr(space()->bottom(), new_word_size);
// Shrink the card table
Universe::heap()->barrier_set()->resize_covered_region(mr);
if (Verbose && PrintGC) {
size_t new_mem_size = _virtual_space.committed_size();
size_t old_mem_size = new_mem_size + size;
gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
name(), old_mem_size/K, new_mem_size/K);
}
}
// No young generation references, clear this generation's cards.
void CardGeneration::clear_remembered_set() {
_rs->clear(reserved());
}
// Objects in this generation may have moved, invalidate this
// generation's cards.
void CardGeneration::invalidate_remembered_set() {
_rs->invalidate(used_region());
}
void CardGeneration::compute_new_size() {
assert(_shrink_factor <= 100, "invalid shrink factor");
size_t current_shrink_factor = _shrink_factor;
_shrink_factor = 0;
// We don't have floating point command-line arguments
// Note: argument processing ensures that MinHeapFreeRatio < 100.
const double minimum_free_percentage = MinHeapFreeRatio / 100.0;
const double maximum_used_percentage = 1.0 - minimum_free_percentage;
// Compute some numbers about the state of the heap.
const size_t used_after_gc = used();
const size_t capacity_after_gc = capacity();
const double min_tmp = used_after_gc / maximum_used_percentage;
size_t minimum_desired_capacity = (size_t)MIN2(min_tmp, double(max_uintx));
// Don't shrink less than the initial generation size
minimum_desired_capacity = MAX2(minimum_desired_capacity,
spec()->init_size());
assert(used_after_gc <= minimum_desired_capacity, "sanity check");
if (PrintGC && Verbose) {
const size_t free_after_gc = free();
const double free_percentage = ((double)free_after_gc) / capacity_after_gc;
gclog_or_tty->print_cr("TenuredGeneration::compute_new_size: ");
gclog_or_tty->print_cr(" "
" minimum_free_percentage: %6.2f"
" maximum_used_percentage: %6.2f",
minimum_free_percentage,
maximum_used_percentage);
gclog_or_tty->print_cr(" "
" free_after_gc : %6.1fK"
" used_after_gc : %6.1fK"
" capacity_after_gc : %6.1fK",
free_after_gc / (double) K,
used_after_gc / (double) K,
capacity_after_gc / (double) K);
gclog_or_tty->print_cr(" "
" free_percentage: %6.2f",
free_percentage);
}
if (capacity_after_gc < minimum_desired_capacity) {
// If we have less free space than we want then expand
size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
// Don't expand unless it's significant
if (expand_bytes >= _min_heap_delta_bytes) {
expand(expand_bytes, 0); // safe if expansion fails
}
if (PrintGC && Verbose) {
gclog_or_tty->print_cr(" expanding:"
" minimum_desired_capacity: %6.1fK"
" expand_bytes: %6.1fK"
" _min_heap_delta_bytes: %6.1fK",
minimum_desired_capacity / (double) K,
expand_bytes / (double) K,
_min_heap_delta_bytes / (double) K);
}
return;
}
// No expansion, now see if we want to shrink
size_t shrink_bytes = 0;
// We would never want to shrink more than this
size_t max_shrink_bytes = capacity_after_gc - minimum_desired_capacity;
if (MaxHeapFreeRatio < 100) {
const double maximum_free_percentage = MaxHeapFreeRatio / 100.0;
const double minimum_used_percentage = 1.0 - maximum_free_percentage;
const double max_tmp = used_after_gc / minimum_used_percentage;
size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
maximum_desired_capacity = MAX2(maximum_desired_capacity,
spec()->init_size());
if (PrintGC && Verbose) {
gclog_or_tty->print_cr(" "
" maximum_free_percentage: %6.2f"
" minimum_used_percentage: %6.2f",
maximum_free_percentage,
minimum_used_percentage);
gclog_or_tty->print_cr(" "
" _capacity_at_prologue: %6.1fK"
" minimum_desired_capacity: %6.1fK"
" maximum_desired_capacity: %6.1fK",
_capacity_at_prologue / (double) K,
minimum_desired_capacity / (double) K,
maximum_desired_capacity / (double) K);
}
assert(minimum_desired_capacity <= maximum_desired_capacity,
"sanity check");
if (capacity_after_gc > maximum_desired_capacity) {
// Capacity too large, compute shrinking size
shrink_bytes = capacity_after_gc - maximum_desired_capacity;
// We don't want shrink all the way back to initSize if people call
// System.gc(), because some programs do that between "phases" and then
// we'd just have to grow the heap up again for the next phase. So we
// damp the shrinking: 0% on the first call, 10% on the second call, 40%
// on the third call, and 100% by the fourth call. But if we recompute
// size without shrinking, it goes back to 0%.
shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
if (current_shrink_factor == 0) {
_shrink_factor = 10;
} else {
_shrink_factor = MIN2(current_shrink_factor * 4, (size_t) 100);
}
if (PrintGC && Verbose) {
gclog_or_tty->print_cr(" "
" shrinking:"
" initSize: %.1fK"
" maximum_desired_capacity: %.1fK",
spec()->init_size() / (double) K,
maximum_desired_capacity / (double) K);
gclog_or_tty->print_cr(" "
" shrink_bytes: %.1fK"
" current_shrink_factor: " SIZE_FORMAT
" new shrink factor: " SIZE_FORMAT
" _min_heap_delta_bytes: %.1fK",
shrink_bytes / (double) K,
current_shrink_factor,
_shrink_factor,
_min_heap_delta_bytes / (double) K);
}
}
}
if (capacity_after_gc > _capacity_at_prologue) {
// We might have expanded for promotions, in which case we might want to
// take back that expansion if there's room after GC. That keeps us from
// stretching the heap with promotions when there's plenty of room.
size_t expansion_for_promotion = capacity_after_gc - _capacity_at_prologue;
expansion_for_promotion = MIN2(expansion_for_promotion, max_shrink_bytes);
// We have two shrinking computations, take the largest
shrink_bytes = MAX2(shrink_bytes, expansion_for_promotion);
assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
if (PrintGC && Verbose) {
gclog_or_tty->print_cr(" "
" aggressive shrinking:"
" _capacity_at_prologue: %.1fK"
" capacity_after_gc: %.1fK"
" expansion_for_promotion: %.1fK"
" shrink_bytes: %.1fK",
capacity_after_gc / (double) K,
_capacity_at_prologue / (double) K,
expansion_for_promotion / (double) K,
shrink_bytes / (double) K);
}
}
// Don't shrink unless it's significant
if (shrink_bytes >= _min_heap_delta_bytes) {
shrink(shrink_bytes);
}
}
// Currently nothing to do.
void CardGeneration::prepare_for_verify() {}
void CardGeneration::space_iterate(SpaceClosure* blk,
bool usedOnly) {
blk->do_space(space());
}
void CardGeneration::younger_refs_iterate(OopsInGenClosure* blk) {
blk->set_generation(this);
younger_refs_in_space_iterate(space(), blk);
blk->reset_generation();
}

View File

@ -0,0 +1,99 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_MEMORY_CARDGENERATION_HPP
#define SHARE_VM_MEMORY_CARDGENERATION_HPP
// Class CardGeneration is a generation that is covered by a card table,
// and uses a card-size block-offset array to implement block_start.
#include "memory/generation.hpp"
class BlockOffsetSharedArray;
class CompactibleSpace;
class CardGeneration: public Generation {
friend class VMStructs;
protected:
// This is shared with other generations.
GenRemSet* _rs;
// This is local to this generation.
BlockOffsetSharedArray* _bts;
// Current shrinking effect: this damps shrinking when the heap gets empty.
size_t _shrink_factor;
size_t _min_heap_delta_bytes; // Minimum amount to expand.
// Some statistics from before gc started.
// These are gathered in the gc_prologue (and should_collect)
// to control growing/shrinking policy in spite of promotions.
size_t _capacity_at_prologue;
size_t _used_at_prologue;
CardGeneration(ReservedSpace rs, size_t initial_byte_size, int level,
GenRemSet* remset);
virtual void assert_correct_size_change_locking() = 0;
virtual CompactibleSpace* space() const = 0;
public:
// Attempt to expand the generation by "bytes". Expand by at a
// minimum "expand_bytes". Return true if some amount (not
// necessarily the full "bytes") was done.
virtual bool expand(size_t bytes, size_t expand_bytes);
// Shrink generation with specified size
virtual void shrink(size_t bytes);
virtual void compute_new_size();
virtual void clear_remembered_set();
virtual void invalidate_remembered_set();
virtual void prepare_for_verify();
// Grow generation with specified size (returns false if unable to grow)
bool grow_by(size_t bytes);
// Grow generation to reserved size.
bool grow_to_reserved();
size_t capacity() const;
size_t used() const;
size_t free() const;
MemRegion used_region() const;
void space_iterate(SpaceClosure* blk, bool usedOnly = false);
void younger_refs_iterate(OopsInGenClosure* blk);
bool is_in(const void* p) const;
CompactibleSpace* first_compaction_space() const;
};
#endif // SHARE_VM_MEMORY_CARDGENERATION_HPP

View File

@ -0,0 +1,55 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_MEMORY_CARDGENERATION_INLINE_HPP
#define SHARE_VM_MEMORY_CARDGENERATION_INLINE_HPP
#include "memory/cardGeneration.hpp"
#include "memory/space.hpp"
inline size_t CardGeneration::capacity() const {
return space()->capacity();
}
inline size_t CardGeneration::used() const {
return space()->used();
}
inline size_t CardGeneration::free() const {
return space()->free();
}
inline MemRegion CardGeneration::used_region() const {
return space()->used_region();
}
inline bool CardGeneration::is_in(const void* p) const {
return space()->is_in(p);
}
inline CompactibleSpace* CardGeneration::first_compaction_space() const {
return space();
}
#endif // SHARE_VM_MEMORY_CARDGENERATION_INLINE_HPP

View File

@ -275,29 +275,26 @@ void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
// the new_end_aligned does not intrude onto the committed
// space of another region.
int ri = 0;
for (ri = 0; ri < _cur_covered_regions; ri++) {
if (ri != ind) {
if (_committed[ri].contains(new_end_aligned)) {
// The prior check included in the assert
// (new_end_aligned >= _committed[ri].start())
// is redundant with the "contains" test.
// Any region containing the new end
// should start at or beyond the region found (ind)
// for the new end (committed regions are not expected to
// be proper subsets of other committed regions).
assert(_committed[ri].start() >= _committed[ind].start(),
"New end of committed region is inconsistent");
new_end_aligned = _committed[ri].start();
// new_end_aligned can be equal to the start of its
// committed region (i.e., of "ind") if a second
// region following "ind" also start at the same location
// as "ind".
assert(new_end_aligned >= _committed[ind].start(),
"New end of committed region is before start");
debug_only(collided = true;)
// Should only collide with 1 region
break;
}
for (ri = ind + 1; ri < _cur_covered_regions; ri++) {
if (new_end_aligned > _committed[ri].start()) {
assert(new_end_aligned <= _committed[ri].end(),
"An earlier committed region can't cover a later committed region");
// Any region containing the new end
// should start at or beyond the region found (ind)
// for the new end (committed regions are not expected to
// be proper subsets of other committed regions).
assert(_committed[ri].start() >= _committed[ind].start(),
"New end of committed region is inconsistent");
new_end_aligned = _committed[ri].start();
// new_end_aligned can be equal to the start of its
// committed region (i.e., of "ind") if a second
// region following "ind" also start at the same location
// as "ind".
assert(new_end_aligned >= _committed[ind].start(),
"New end of committed region is before start");
debug_only(collided = true;)
// Should only collide with 1 region
break;
}
}
#ifdef ASSERT

View File

@ -33,24 +33,13 @@
#include "runtime/java.hpp"
#include "runtime/os.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
#include "gc_implementation/g1/concurrentMark.hpp"
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#endif // INCLUDE_ALL_GCS
CardTableRS::CardTableRS(MemRegion whole_heap) :
GenRemSet(),
_cur_youngergen_card_val(youngergenP1_card)
{
#if INCLUDE_ALL_GCS
if (UseG1GC) {
_ct_bs = new G1SATBCardTableLoggingModRefBS(whole_heap);
} else {
_ct_bs = new CardTableModRefBSForCTRS(whole_heap);
}
#else
guarantee(Universe::heap()->kind() == CollectedHeap::GenCollectedHeap, "sanity");
_ct_bs = new CardTableModRefBSForCTRS(whole_heap);
#endif
_ct_bs->initialize();
set_bs(_ct_bs);
_last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, GenCollectedHeap::max_gens + 1,

View File

@ -98,11 +98,11 @@ void FileMapInfo::fail_continue(const char *msg, ...) {
tty->print_cr("UseSharedSpaces: %s", msg);
}
}
UseSharedSpaces = false;
assert(current_info() != NULL, "singleton must be registered");
current_info()->close();
}
va_end(ap);
UseSharedSpaces = false;
assert(current_info() != NULL, "singleton must be registered");
current_info()->close();
}
// Fill in the fileMapInfo structure with data about this VM instance.

View File

@ -70,6 +70,7 @@ enum GCH_strong_roots_tasks {
GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
SharedHeap(policy),
_rem_set(NULL),
_gen_policy(policy),
_gen_process_roots_tasks(new SubTasksDone(GCH_PS_NumElements)),
_full_collections_completed(0)
@ -465,7 +466,7 @@ void GenCollectedHeap::do_collection(bool full,
// atomic wrt other collectors in this configuration, we
// are guaranteed to have empty discovered ref lists.
if (rp->discovery_is_atomic()) {
rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
rp->enable_discovery();
rp->setup_policy(do_clear_all_soft_refs);
} else {
// collect() below will enable discovery as appropriate

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -66,6 +66,9 @@ public:
Generation* _gens[max_gens];
GenerationSpec** _gen_specs;
// The singleton Gen Remembered Set.
GenRemSet* _rem_set;
// The generational collector policy.
GenCollectorPolicy* _gen_policy;
@ -383,6 +386,10 @@ public:
return _n_gens;
}
// This function returns the "GenRemSet" object that allows us to scan
// generations in a fully generational heap.
GenRemSet* rem_set() { return _rem_set; }
// Convenience function to be used in situations where the heap type can be
// asserted to be this type.
static GenCollectedHeap* heap();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -44,7 +44,7 @@ inline void OopsInGenClosure::set_generation(Generation* gen) {
_gen_boundary = _gen->reserved().start();
// Barrier set for the heap, must be set after heap is initialized
if (_rs == NULL) {
GenRemSet* rs = SharedHeap::heap()->rem_set();
GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
_rs = (CardTableRS*)rs;
}
}

Some files were not shown because too many files have changed in this diff Show More