This commit is contained in:
Erik Trimble 2010-07-15 19:52:58 -07:00
commit 89ddf727ca
53 changed files with 778 additions and 1364 deletions

View File

@ -35,7 +35,6 @@ import sun.jvm.hotspot.utilities.*;
public class NMethod extends CodeBlob {
private static long pcDescSize;
private static CIntegerField zombieInstructionSizeField;
private static sun.jvm.hotspot.types.OopField methodField;
/** != InvocationEntryBci if this nmethod is an on-stack replacement method */
private static CIntegerField entryBCIField;
@ -88,7 +87,6 @@ public class NMethod extends CodeBlob {
private static void initialize(TypeDataBase db) {
Type type = db.lookupType("nmethod");
zombieInstructionSizeField = type.getCIntegerField("_zombie_instruction_size");
methodField = type.getOopField("_method");
entryBCIField = type.getCIntegerField("_entry_bci");
osrLinkField = type.getAddressField("_osr_link");

View File

@ -40,6 +40,9 @@ GENERATED = $(TOPDIR)/../generated
# tools.jar is needed by the JDI - SA binding
SA_CLASSPATH = $(BOOT_JAVA_HOME)/lib/tools.jar
# TODO: if it's a modules image, check if SA module is installed.
MODULELIB_PATH= $(BOOT_JAVA_HOME)/lib/modules
# gnumake 3.78.1 does not accept the *s that
# are in AGENT_FILES1 and AGENT_FILES2, so use the shell to expand them
AGENT_FILES1 := $(shell /usr/bin/test -d $(AGENT_DIR) && /bin/ls $(AGENT_FILES1))
@ -65,7 +68,7 @@ $(GENERATED)/sa-jdi.jar: $(AGENT_FILES1) $(AGENT_FILES2)
echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \
exit 1; \
fi
$(QUIETLY) if [ ! -f $(SA_CLASSPATH) ] ; then \
$(QUIETLY) if [ ! -f $(SA_CLASSPATH) -a ! -d $(MODULELIB_PATH) ] ; then \
echo "Missing $(SA_CLASSPATH) file. Use 1.6.0 or later version of JDK";\
echo ""; \
exit 1; \

View File

@ -36,6 +36,9 @@ GENERATED = ../generated
# tools.jar is needed by the JDI - SA binding
SA_CLASSPATH = $(BOOT_JAVA_HOME)/lib/tools.jar
# TODO: if it's a modules image, check if SA module is installed.
MODULELIB_PATH= $(BOOT_JAVA_HOME)/lib/modules
# gnumake 3.78.1 does not accept the *s that
# are in AGENT_FILES1 and AGENT_FILES2, so use the shell to expand them
AGENT_FILES1 := $(shell /usr/bin/test -d $(AGENT_DIR) && /bin/ls $(AGENT_FILES1))
@ -59,7 +62,7 @@ $(GENERATED)/sa-jdi.jar: $(AGENT_FILES1) $(AGENT_FILES2)
echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \
exit 1; \
fi
$(QUIETLY) if [ ! -f $(SA_CLASSPATH) ] ; then \
$(QUIETLY) if [ ! -f $(SA_CLASSPATH) -a ! -d $(MODULELIB_PATH) ] ; then \
echo "Missing $(SA_CLASSPATH) file. Use 1.6.0 or later version of JDK";\
echo ""; \
exit 1; \

View File

@ -376,10 +376,17 @@ public:
static bool is_amd() { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x68747541; } // 'htuA'
static bool is_intel() { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x756e6547; } // 'uneG'
static bool supports_processor_topology() {
return (_cpuid_info.std_max_function >= 0xB) &&
// eax[4:0] | ebx[0:15] == 0 indicates invalid topology level.
// Some cpus have max cpuid >= 0xB but do not support processor topology.
((_cpuid_info.tpl_cpuidB0_eax & 0x1f | _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus) != 0);
}
static uint cores_per_cpu() {
uint result = 1;
if (is_intel()) {
if (_cpuid_info.std_max_function >= 0xB) {
if (supports_processor_topology()) {
result = _cpuid_info.tpl_cpuidB1_ebx.bits.logical_cpus /
_cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus;
} else {
@ -393,7 +400,7 @@ public:
static uint threads_per_core() {
uint result = 1;
if (is_intel() && _cpuid_info.std_max_function >= 0xB) {
if (is_intel() && supports_processor_topology()) {
result = _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus;
} else if (_cpuid_info.std_cpuid1_edx.bits.ht != 0) {
result = _cpuid_info.std_cpuid1_ebx.bits.threads_per_cpu /

View File

@ -2079,9 +2079,9 @@ void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
static char saved_jvm_path[MAXPATHLEN] = {0};
// Find the full path to the current module, libjvm.so or libjvm_g.so
void os::jvm_path(char *buf, jint len) {
void os::jvm_path(char *buf, jint buflen) {
// Error checking.
if (len < MAXPATHLEN) {
if (buflen < MAXPATHLEN) {
assert(false, "must use a large-enough buffer");
buf[0] = '\0';
return;
@ -2117,6 +2117,9 @@ void os::jvm_path(char *buf, jint len) {
// Look for JAVA_HOME in the environment.
char* java_home_var = ::getenv("JAVA_HOME");
if (java_home_var != NULL && java_home_var[0] != 0) {
char* jrelib_p;
int len;
// Check the current module name "libjvm.so" or "libjvm_g.so".
p = strrchr(buf, '/');
assert(strstr(p, "/libjvm") == p, "invalid library name");
@ -2124,14 +2127,24 @@ void os::jvm_path(char *buf, jint len) {
if (realpath(java_home_var, buf) == NULL)
return;
sprintf(buf + strlen(buf), "/jre/lib/%s", cpu_arch);
// determine if this is a legacy image or modules image
// modules image doesn't have "jre" subdirectory
len = strlen(buf);
jrelib_p = buf + len;
snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch);
if (0 != access(buf, F_OK)) {
snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch);
}
if (0 == access(buf, F_OK)) {
// Use current module name "libjvm[_g].so" instead of
// "libjvm"debug_only("_g")".so" since for fastdebug version
// we should have "libjvm.so" but debug_only("_g") adds "_g"!
// It is used when we are choosing the HPI library's name
// "libhpi[_g].so" in hpi::initialize_get_interface().
sprintf(buf + strlen(buf), "/hotspot/libjvm%s.so", p);
len = strlen(buf);
snprintf(buf + len, buflen-len, "/hotspot/libjvm%s.so", p);
} else {
// Go back to path of .so
if (realpath(dli_fname, buf) == NULL)

View File

@ -1,45 +0,0 @@
/*
* Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "incls/_precompiled.incl"
#include "incls/_vtune_linux.cpp.incl"
// empty implementation
void VTune::start_GC() {}
void VTune::end_GC() {}
void VTune::start_class_load() {}
void VTune::end_class_load() {}
void VTune::exit() {}
void VTune::register_stub(const char* name, address start, address end) {}
void VTune::create_nmethod(nmethod* nm) {}
void VTune::delete_nmethod(nmethod* nm) {}
void vtune_init() {}
// Reconciliation History
// vtune_solaris.cpp 1.8 99/07/12 23:54:21
// End

View File

@ -2435,6 +2435,8 @@ void os::jvm_path(char *buf, jint buflen) {
char* java_home_var = ::getenv("JAVA_HOME");
if (java_home_var != NULL && java_home_var[0] != 0) {
char cpu_arch[12];
char* jrelib_p;
int len;
sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
#ifdef _LP64
// If we are on sparc running a 64-bit vm, look in jre/lib/sparcv9.
@ -2450,14 +2452,23 @@ void os::jvm_path(char *buf, jint buflen) {
p = strstr(p, "_g") ? "_g" : "";
realpath(java_home_var, buf);
sprintf(buf + strlen(buf), "/jre/lib/%s", cpu_arch);
// determine if this is a legacy image or modules image
// modules image doesn't have "jre" subdirectory
len = strlen(buf);
jrelib_p = buf + len;
snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch);
if (0 != access(buf, F_OK)) {
snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch);
}
if (0 == access(buf, F_OK)) {
// Use current module name "libjvm[_g].so" instead of
// "libjvm"debug_only("_g")".so" since for fastdebug version
// we should have "libjvm.so" but debug_only("_g") adds "_g"!
// It is used when we are choosing the HPI library's name
// "libhpi[_g].so" in hpi::initialize_get_interface().
sprintf(buf + strlen(buf), "/hotspot/libjvm%s.so", p);
len = strlen(buf);
snprintf(buf + len, buflen-len, "/hotspot/libjvm%s.so", p);
} else {
// Go back to path of .so
realpath((char *)dlinfo.dli_fname, buf);

View File

@ -1,40 +0,0 @@
/*
* Copyright (c) 1998, 2007, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "incls/_precompiled.incl"
#include "incls/_vtune_solaris.cpp.incl"
// empty implementation
void VTune::start_GC() {}
void VTune::end_GC() {}
void VTune::start_class_load() {}
void VTune::end_class_load() {}
void VTune::exit() {}
void VTune::register_stub(const char* name, address start, address end) {}
void VTune::create_nmethod(nmethod* nm) {}
void VTune::delete_nmethod(nmethod* nm) {}
void vtune_init() {}

View File

@ -1,290 +0,0 @@
/*
* Copyright (c) 1998, 2007, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "incls/_precompiled.incl"
#include "incls/_vtune_windows.cpp.incl"
static int current_method_ID = 0;
// ------------- iJITProf.h -------------------
// defined by Intel -- do not change
#include "windows.h"
extern "C" {
enum iJITP_Event {
ExceptionOccurred_S, // Java exception
ExceptionOccurred_IDS,
Shutdown, // VM exit
ThreadCreate, // threads
ThreadDestroy,
ThreadSwitch,
ClassLoadStart, // class loading
ClassLoadEnd,
GCStart, // GC
GCEnd,
NMethodCreate = 13, // nmethod creation
NMethodDelete
// rest of event types omitted (call profiling not supported yet)
};
// version number -- 0 if VTune not installed
int WINAPI iJitP_VersionNumber();
enum iJITP_ModeFlags {
NoNotification = 0x0, // don't call vtune
NotifyNMethodCreate = 0x1, // notify NMethod_Create
NotifyNMethodDelete = 0x2, // notify NMethod_Create
NotifyMethodEnter = 0x4, // method entry
NotifyMethodExit = 0x8, // method exit
NotifyShutdown = 0x10, // VM exit
NotifyGC = 0x20, // GC
};
// call back function type
typedef void (WINAPI *ModeChangedFn)(iJITP_ModeFlags flags);
// ------------- VTune method interfaces ----------------------
typedef void (WINAPI *RegisterCallbackFn)(ModeChangedFn fn); // register callback
typedef int (WINAPI *NotifyEventFn)(iJITP_Event, void* event_data);
// specific event data structures
// data for NMethodCreate
struct VTuneObj { // base class for allocation
// (can't use CHeapObj -- has vtable ptr)
void* operator new(size_t size) { return os::malloc(size); }
void operator delete(void* p) { fatal("never delete VTune data"); }
};
struct LineNumberInfo : VTuneObj { // PC-to-line number mapping
unsigned long offset; // byte offset from start of method
unsigned long line_num; // corresponding line number
};
struct MethodLoadInfo : VTuneObj {
unsigned long methodID; // unique method ID
const char* name; // method name
unsigned long instr_start; // start address
unsigned long instr_size; // length in bytes
unsigned long line_number_size; // size of line number table
LineNumberInfo* line_number_table; // line number mapping
unsigned long classID; // unique class ID
char* class_file_name; // fully qualified class file name
char* source_file_name; // fully qualified source file name
MethodLoadInfo(nmethod* nm); // for real nmethods
MethodLoadInfo(const char* vm_name, address start, address end);
// for "nmethods" like stubs, interpreter, etc
};
// data for NMethodDelete
struct MethodInfo : VTuneObj {
unsigned long methodID; // unique method ID
unsigned long classID; // (added for convenience -- not part of Intel interface)
MethodInfo(methodOop m);
};
};
MethodInfo::MethodInfo(methodOop m) {
// just give it a new ID -- we're not compiling methods twice (usually)
// (and even if we did, one might want to see the two versions separately)
methodID = ++current_method_ID;
}
MethodLoadInfo::MethodLoadInfo(const char* vm_name, address start, address end) {
classID = 0;
methodID = ++current_method_ID;
name = vm_name;
instr_start = (unsigned long)start;
instr_size = end - start;
line_number_size = 0;
line_number_table = NULL;
class_file_name = source_file_name = "HotSpot JVM";
}
MethodLoadInfo::MethodLoadInfo(nmethod* nm) {
methodOop m = nm->method();
MethodInfo info(m);
classID = info.classID;
methodID = info.methodID;
name = strdup(m->name()->as_C_string());
instr_start = (unsigned long)nm->instructions_begin();
instr_size = nm->code_size();
line_number_size = 0;
line_number_table = NULL;
klassOop kl = m->method_holder();
char* class_name = Klass::cast(kl)->name()->as_C_string();
char* file_name = NEW_C_HEAP_ARRAY(char, strlen(class_name) + 1);
strcpy(file_name, class_name);
class_file_name = file_name;
char* src_name = NEW_C_HEAP_ARRAY(char, strlen(class_name) + strlen(".java") + 1);
strcpy(src_name, class_name);
strcat(src_name, ".java");
source_file_name = src_name;
}
// --------------------- DLL loading functions ------------------------
#define DLLNAME "iJitProf.dll"
static HINSTANCE load_lib(char* name) {
HINSTANCE lib = NULL;
HKEY hk;
// try to get VTune directory from the registry
if (RegOpenKey(HKEY_CURRENT_USER, "Software\\VB and VBA Program Settings\\VTune\\StartUp", &hk) == ERROR_SUCCESS) {
for (int i = 0; true; i++) {
char szName[MAX_PATH + 1];
char szVal [MAX_PATH + 1];
DWORD cbName, cbVal;
cbName = cbVal = MAX_PATH + 1;
if (RegEnumValue(hk, i, szName, &cbName, NULL, NULL, (LPBYTE)szVal, &cbVal) == ERROR_SUCCESS) {
// get VTune directory
if (!strcmp(szName, name)) {
char*p = szVal;
while (*p == ' ') p++; // trim
char* q = p + strlen(p) - 1;
while (*q == ' ') *(q--) = '\0';
// chdir to the VTune dir
GetCurrentDirectory(MAX_PATH + 1, szName);
SetCurrentDirectory(p);
// load lib
lib = LoadLibrary(strcat(strcat(p, "\\"), DLLNAME));
if (lib != NULL && WizardMode) tty->print_cr("*loaded VTune DLL %s", p);
// restore current dir
SetCurrentDirectory(szName);
break;
}
} else {
break;
}
}
}
return lib;
}
static RegisterCallbackFn iJIT_RegisterCallback = NULL;
static NotifyEventFn iJIT_NotifyEvent = NULL;
static bool load_iJIT_funcs() {
// first try to load from PATH
HINSTANCE lib = LoadLibrary(DLLNAME);
if (lib != NULL && WizardMode) tty->print_cr("*loaded VTune DLL %s via PATH", DLLNAME);
// if not successful, try to look in the VTUNE directory
if (lib == NULL) lib = load_lib("VTUNEDIR30");
if (lib == NULL) lib = load_lib("VTUNEDIR25");
if (lib == NULL) lib = load_lib("VTUNEDIR");
if (lib == NULL) return false; // unsuccessful
// try to load the functions
iJIT_RegisterCallback = (RegisterCallbackFn)GetProcAddress(lib, "iJIT_RegisterCallback");
iJIT_NotifyEvent = (NotifyEventFn) GetProcAddress(lib, "iJIT_NotifyEvent");
if (!iJIT_RegisterCallback) tty->print_cr("*couldn't find VTune entry point iJIT_RegisterCallback");
if (!iJIT_NotifyEvent) tty->print_cr("*couldn't find VTune entry point iJIT_NotifyEvent");
return iJIT_RegisterCallback != NULL && iJIT_NotifyEvent != NULL;
}
// --------------------- VTune class ------------------------
static bool active = false;
static int flags = 0;
void VTune::start_GC() {
if (active && (flags & NotifyGC)) iJIT_NotifyEvent(GCStart, NULL);
}
void VTune::end_GC() {
if (active && (flags & NotifyGC)) iJIT_NotifyEvent(GCEnd, NULL);
}
void VTune::start_class_load() {
// not yet implemented in VTune
}
void VTune::end_class_load() {
// not yet implemented in VTune
}
void VTune::exit() {
if (active && (flags & NotifyShutdown)) iJIT_NotifyEvent(Shutdown, NULL);
}
void VTune::register_stub(const char* name, address start, address end) {
if (flags & NotifyNMethodCreate) {
MethodLoadInfo* info = new MethodLoadInfo(name, start, end);
if (PrintMiscellaneous && WizardMode && Verbose) {
tty->print_cr("NMethodCreate %s (%d): %#x..%#x", info->name, info->methodID,
info->instr_start, info->instr_start + info->instr_size);
}
iJIT_NotifyEvent(NMethodCreate, info);
}
}
void VTune::create_nmethod(nmethod* nm) {
if (flags & NotifyNMethodCreate) {
MethodLoadInfo* info = new MethodLoadInfo(nm);
if (PrintMiscellaneous && WizardMode && Verbose) {
tty->print_cr("NMethodCreate %s (%d): %#x..%#x", info->name, info->methodID,
info->instr_start, info->instr_start + info->instr_size);
}
iJIT_NotifyEvent(NMethodCreate, info);
}
}
void VTune::delete_nmethod(nmethod* nm) {
if (flags & NotifyNMethodDelete) {
MethodInfo* info = new MethodInfo(nm->method());
iJIT_NotifyEvent(NMethodDelete, info);
}
}
static void set_flags(int new_flags) {
flags = new_flags;
// if (WizardMode) tty->print_cr("*new VTune flags: %#x", flags);
}
void vtune_init() {
if (!UseVTune) return;
active = load_iJIT_funcs();
if (active) {
iJIT_RegisterCallback((ModeChangedFn)set_flags);
} else {
assert(flags == 0, "flags shouldn't be set");
}
}

View File

@ -106,7 +106,7 @@ public:
void BCEscapeAnalyzer::set_returned(ArgumentMap vars) {
for (int i = 0; i < _arg_size; i++) {
if (vars.contains(i))
_arg_returned.set_bit(i);
_arg_returned.set(i);
}
_return_local = _return_local && !(vars.contains_unknown() || vars.contains_allocated());
_return_allocated = _return_allocated && vars.contains_allocated() && !(vars.contains_unknown() || vars.contains_vars());
@ -126,16 +126,16 @@ bool BCEscapeAnalyzer::is_arg_stack(ArgumentMap vars){
if (_conservative)
return true;
for (int i = 0; i < _arg_size; i++) {
if (vars.contains(i) && _arg_stack.at(i))
if (vars.contains(i) && _arg_stack.test(i))
return true;
}
return false;
}
void BCEscapeAnalyzer::clear_bits(ArgumentMap vars, BitMap &bm) {
void BCEscapeAnalyzer::clear_bits(ArgumentMap vars, VectorSet &bm) {
for (int i = 0; i < _arg_size; i++) {
if (vars.contains(i)) {
bm.clear_bit(i);
bm >>= i;
}
}
}
@ -1157,15 +1157,15 @@ void BCEscapeAnalyzer::initialize() {
ciSignature* sig = method()->signature();
int j = 0;
if (!method()->is_static()) {
_arg_local.set_bit(0);
_arg_stack.set_bit(0);
_arg_local.set(0);
_arg_stack.set(0);
j++;
}
for (i = 0; i < sig->count(); i++) {
ciType* t = sig->type_at(i);
if (!t->is_primitive_type()) {
_arg_local.set_bit(j);
_arg_stack.set_bit(j);
_arg_local.set(j);
_arg_stack.set(j);
}
j += t->size();
}
@ -1198,9 +1198,9 @@ void BCEscapeAnalyzer::clear_escape_info() {
set_modified(var, OFFSET_ANY, 4);
set_global_escape(var);
}
_arg_local.clear();
_arg_stack.clear();
_arg_returned.clear();
_arg_local.Clear();
_arg_stack.Clear();
_arg_returned.Clear();
_return_local = false;
_return_allocated = false;
_allocated_escapes = true;
@ -1254,7 +1254,7 @@ void BCEscapeAnalyzer::compute_escape_info() {
// Do not scan method if it has no object parameters and
// does not returns an object (_return_allocated is set in initialize()).
if (_arg_local.is_empty() && !_return_allocated) {
if (_arg_local.Size() == 0 && !_return_allocated) {
// Clear all info since method's bytecode was not analysed and
// set pessimistic escape information.
clear_escape_info();
@ -1275,14 +1275,14 @@ void BCEscapeAnalyzer::compute_escape_info() {
//
if (!has_dependencies() && !methodData()->is_empty()) {
for (i = 0; i < _arg_size; i++) {
if (_arg_local.at(i)) {
assert(_arg_stack.at(i), "inconsistent escape info");
if (_arg_local.test(i)) {
assert(_arg_stack.test(i), "inconsistent escape info");
methodData()->set_arg_local(i);
methodData()->set_arg_stack(i);
} else if (_arg_stack.at(i)) {
} else if (_arg_stack.test(i)) {
methodData()->set_arg_stack(i);
}
if (_arg_returned.at(i)) {
if (_arg_returned.test(i)) {
methodData()->set_arg_returned(i);
}
methodData()->set_arg_modified(i, _arg_modified[i]);
@ -1308,9 +1308,12 @@ void BCEscapeAnalyzer::read_escape_info() {
// read escape information from method descriptor
for (int i = 0; i < _arg_size; i++) {
_arg_local.at_put(i, methodData()->is_arg_local(i));
_arg_stack.at_put(i, methodData()->is_arg_stack(i));
_arg_returned.at_put(i, methodData()->is_arg_returned(i));
if (methodData()->is_arg_local(i))
_arg_local.set(i);
if (methodData()->is_arg_stack(i))
_arg_stack.set(i);
if (methodData()->is_arg_returned(i))
_arg_returned.set(i);
_arg_modified[i] = methodData()->arg_modified(i);
}
_return_local = methodData()->eflag_set(methodDataOopDesc::return_local);
@ -1358,26 +1361,26 @@ void BCEscapeAnalyzer::dump() {
BCEscapeAnalyzer::BCEscapeAnalyzer(ciMethod* method, BCEscapeAnalyzer* parent)
: _conservative(method == NULL || !EstimateArgEscape)
, _arena(CURRENT_ENV->arena())
, _method(method)
, _methodData(method ? method->method_data() : NULL)
, _arg_size(method ? method->arg_size() : 0)
, _stack()
, _arg_local(_arg_size)
, _arg_stack(_arg_size)
, _arg_returned(_arg_size)
, _dirty(_arg_size)
, _arg_local(_arena)
, _arg_stack(_arena)
, _arg_returned(_arena)
, _dirty(_arena)
, _return_local(false)
, _return_allocated(false)
, _allocated_escapes(false)
, _unknown_modified(false)
, _dependencies()
, _dependencies(_arena, 4, 0, NULL)
, _parent(parent)
, _level(parent == NULL ? 0 : parent->level() + 1) {
if (!_conservative) {
_arg_local.clear();
_arg_stack.clear();
_arg_returned.clear();
_dirty.clear();
_arg_local.Clear();
_arg_stack.Clear();
_arg_returned.Clear();
_dirty.Clear();
Arena* arena = CURRENT_ENV->arena();
_arg_modified = (uint *) arena->Amalloc(_arg_size * sizeof(uint));
Copy::zero_to_bytes(_arg_modified, _arg_size * sizeof(uint));
@ -1414,8 +1417,8 @@ void BCEscapeAnalyzer::copy_dependencies(Dependencies *deps) {
deps->assert_evol_method(method());
}
for (int i = 0; i < _dependencies.length(); i+=2) {
ciKlass *k = _dependencies[i]->as_klass();
ciMethod *m = _dependencies[i+1]->as_method();
ciKlass *k = _dependencies.at(i)->as_klass();
ciMethod *m = _dependencies.at(i+1)->as_method();
deps->assert_unique_concrete_method(k, m);
}
}

View File

@ -22,9 +22,6 @@
*
*/
define_array(ciObjectArray, ciObject*);
define_stack(ciObjectList, ciObjectArray);
// This class implements a fast, conservative analysis of effect of methods
// on the escape state of their arguments. The analysis is at the bytecode
// level.
@ -34,18 +31,17 @@ class ciBlock;
class BCEscapeAnalyzer : public ResourceObj {
private:
Arena* _arena; // ciEnv arena
bool _conservative; // If true, return maximally
// conservative results.
ciMethod* _method;
ciMethodData* _methodData;
int _arg_size;
intStack _stack;
BitMap _arg_local;
BitMap _arg_stack;
BitMap _arg_returned;
BitMap _dirty;
VectorSet _arg_local;
VectorSet _arg_stack;
VectorSet _arg_returned;
VectorSet _dirty;
enum{ ARG_OFFSET_MAX = 31};
uint *_arg_modified;
@ -54,7 +50,7 @@ class BCEscapeAnalyzer : public ResourceObj {
bool _allocated_escapes;
bool _unknown_modified;
ciObjectList _dependencies;
GrowableArray<ciObject *> _dependencies;
ciMethodBlocks *_methodBlocks;
@ -68,20 +64,10 @@ class BCEscapeAnalyzer : public ResourceObj {
private:
// helper functions
bool is_argument(int i) { return i >= 0 && i < _arg_size; }
void raw_push(int i) { _stack.push(i); }
int raw_pop() { return _stack.is_empty() ? -1 : _stack.pop(); }
void apush(int i) { raw_push(i); }
void spush() { raw_push(-1); }
void lpush() { spush(); spush(); }
int apop() { return raw_pop(); }
void spop() { assert(_stack.is_empty() || _stack.top() == -1, ""); raw_pop(); }
void lpop() { spop(); spop(); }
void set_returned(ArgumentMap vars);
bool is_argument(ArgumentMap vars);
bool is_arg_stack(ArgumentMap vars);
void clear_bits(ArgumentMap vars, BitMap &bs);
void clear_bits(ArgumentMap vars, VectorSet &bs);
void set_method_escape(ArgumentMap vars);
void set_global_escape(ArgumentMap vars);
void set_dirty(ArgumentMap vars);
@ -116,25 +102,25 @@ class BCEscapeAnalyzer : public ResourceObj {
ciMethodData* methodData() const { return _methodData; }
BCEscapeAnalyzer* parent() const { return _parent; }
int level() const { return _level; }
ciObjectList* dependencies() { return &_dependencies; }
GrowableArray<ciObject *>* dependencies() { return &_dependencies; }
bool has_dependencies() const { return !_dependencies.is_empty(); }
// retrieval of interprocedural escape information
// The given argument does not escape the callee.
bool is_arg_local(int i) const {
return !_conservative && _arg_local.at(i);
return !_conservative && _arg_local.test(i);
}
// The given argument escapes the callee, but does not become globally
// reachable.
bool is_arg_stack(int i) const {
return !_conservative && _arg_stack.at(i);
return !_conservative && _arg_stack.test(i);
}
// The given argument does not escape globally, and may be returned.
bool is_arg_returned(int i) const {
return !_conservative && _arg_returned.at(i); }
return !_conservative && _arg_returned.test(i); }
// True iff only input arguments are returned.
bool is_return_local() const {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -44,9 +44,7 @@ ciInstanceKlass::ciInstanceKlass(KlassHandle h_k) :
_flags = ciFlags(access_flags);
_has_finalizer = access_flags.has_finalizer();
_has_subklass = ik->subklass() != NULL;
_is_initialized = ik->is_initialized();
// Next line must follow and use the result of the previous line:
_is_linked = _is_initialized || ik->is_linked();
_init_state = (instanceKlass::ClassState)ik->get_init_state();
_nonstatic_field_size = ik->nonstatic_field_size();
_has_nonstatic_fields = ik->has_nonstatic_fields();
_nonstatic_fields = NULL; // initialized lazily by compute_nonstatic_fields:
@ -91,8 +89,7 @@ ciInstanceKlass::ciInstanceKlass(ciSymbol* name,
: ciKlass(name, ciInstanceKlassKlass::make())
{
assert(name->byte_at(0) != '[', "not an instance klass");
_is_initialized = false;
_is_linked = false;
_init_state = (instanceKlass::ClassState)0;
_nonstatic_field_size = -1;
_has_nonstatic_fields = false;
_nonstatic_fields = NULL;
@ -109,21 +106,10 @@ ciInstanceKlass::ciInstanceKlass(ciSymbol* name,
// ------------------------------------------------------------------
// ciInstanceKlass::compute_shared_is_initialized
bool ciInstanceKlass::compute_shared_is_initialized() {
void ciInstanceKlass::compute_shared_init_state() {
GUARDED_VM_ENTRY(
instanceKlass* ik = get_instanceKlass();
_is_initialized = ik->is_initialized();
return _is_initialized;
)
}
// ------------------------------------------------------------------
// ciInstanceKlass::compute_shared_is_linked
bool ciInstanceKlass::compute_shared_is_linked() {
GUARDED_VM_ENTRY(
instanceKlass* ik = get_instanceKlass();
_is_linked = ik->is_linked();
return _is_linked;
_init_state = (instanceKlass::ClassState)ik->get_init_state();
)
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -39,9 +39,8 @@ private:
jobject _loader;
jobject _protection_domain;
instanceKlass::ClassState _init_state; // state of class
bool _is_shared;
bool _is_initialized;
bool _is_linked;
bool _has_finalizer;
bool _has_subklass;
bool _has_nonstatic_fields;
@ -87,27 +86,34 @@ protected:
bool is_shared() { return _is_shared; }
bool compute_shared_is_initialized();
bool compute_shared_is_linked();
void compute_shared_init_state();
bool compute_shared_has_subklass();
int compute_shared_nof_implementors();
int compute_nonstatic_fields();
GrowableArray<ciField*>* compute_nonstatic_fields_impl(GrowableArray<ciField*>* super_fields);
// Update the init_state for shared klasses
void update_if_shared(instanceKlass::ClassState expected) {
if (_is_shared && _init_state != expected) {
if (is_loaded()) compute_shared_init_state();
}
}
public:
// Has this klass been initialized?
bool is_initialized() {
if (_is_shared && !_is_initialized) {
return is_loaded() && compute_shared_is_initialized();
}
return _is_initialized;
update_if_shared(instanceKlass::fully_initialized);
return _init_state == instanceKlass::fully_initialized;
}
// Is this klass being initialized?
bool is_being_initialized() {
update_if_shared(instanceKlass::being_initialized);
return _init_state == instanceKlass::being_initialized;
}
// Has this klass been linked?
bool is_linked() {
if (_is_shared && !_is_linked) {
return is_loaded() && compute_shared_is_linked();
}
return _is_linked;
update_if_shared(instanceKlass::linked);
return _init_state >= instanceKlass::linked;
}
// General klass information.

View File

@ -54,10 +54,10 @@ ciMethod::ciMethod(methodHandle h_m) : ciObject(h_m) {
_code = NULL;
_exception_handlers = NULL;
_liveness = NULL;
_bcea = NULL;
_method_blocks = NULL;
#ifdef COMPILER2
_flow = NULL;
_bcea = NULL;
#endif // COMPILER2
ciEnv *env = CURRENT_ENV;
@ -121,11 +121,11 @@ ciMethod::ciMethod(ciInstanceKlass* holder,
_intrinsic_id = vmIntrinsics::_none;
_liveness = NULL;
_can_be_statically_bound = false;
_bcea = NULL;
_method_blocks = NULL;
_method_data = NULL;
#ifdef COMPILER2
_flow = NULL;
_bcea = NULL;
#endif // COMPILER2
}
@ -1033,10 +1033,15 @@ bool ciMethod::is_accessor () const { FETCH_FLAG_FROM_VM(is_accessor)
bool ciMethod::is_initializer () const { FETCH_FLAG_FROM_VM(is_initializer); }
BCEscapeAnalyzer *ciMethod::get_bcea() {
#ifdef COMPILER2
if (_bcea == NULL) {
_bcea = new (CURRENT_ENV->arena()) BCEscapeAnalyzer(this, NULL);
}
return _bcea;
#else // COMPILER2
ShouldNotReachHere();
return NULL;
#endif // COMPILER2
}
ciMethodBlocks *ciMethod::get_method_blocks() {

View File

@ -48,7 +48,6 @@ class ciMethod : public ciObject {
ciInstanceKlass* _holder;
ciSignature* _signature;
ciMethodData* _method_data;
BCEscapeAnalyzer* _bcea;
ciMethodBlocks* _method_blocks;
// Code attributes.
@ -72,7 +71,8 @@ class ciMethod : public ciObject {
// Optional liveness analyzer.
MethodLiveness* _liveness;
#ifdef COMPILER2
ciTypeFlow* _flow;
ciTypeFlow* _flow;
BCEscapeAnalyzer* _bcea;
#endif
ciMethod(methodHandle h_m);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -832,7 +832,6 @@ objArrayOop ClassLoader::get_system_packages(TRAPS) {
instanceKlassHandle ClassLoader::load_classfile(symbolHandle h_name, TRAPS) {
VTuneClassLoadMarker clm;
ResourceMark rm(THREAD);
EventMark m("loading class " INTPTR_FORMAT, (address)h_name());
ThreadProfilerMark tpm(ThreadProfilerMark::classLoaderRegion);

View File

@ -210,6 +210,7 @@ AdapterBlob* AdapterBlob::create(CodeBuffer* cb) {
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
blob = new (size) AdapterBlob(size, cb);
CodeCache::commit(blob);
}
// Track memory usage statistic after releasing CodeCache_lock
MemoryService::track_code_cache_memory_usage();
@ -281,7 +282,6 @@ RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name,
tty->print_cr("Decoding %s " INTPTR_FORMAT, stub_id, stub);
Disassembler::decode(stub->instructions_begin(), stub->instructions_end());
}
VTune::register_stub(stub_id, stub->instructions_begin(), stub->instructions_end());
Forte::register_stub(stub_id, stub->instructions_begin(), stub->instructions_end());
if (JvmtiExport::should_post_dynamic_code_generated()) {
@ -356,7 +356,6 @@ DeoptimizationBlob* DeoptimizationBlob::create(
tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob);
Disassembler::decode(blob->instructions_begin(), blob->instructions_end());
}
VTune::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
Forte::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
if (JvmtiExport::should_post_dynamic_code_generated()) {
@ -414,7 +413,6 @@ UncommonTrapBlob* UncommonTrapBlob::create(
tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob);
Disassembler::decode(blob->instructions_begin(), blob->instructions_end());
}
VTune::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
Forte::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
if (JvmtiExport::should_post_dynamic_code_generated()) {
@ -474,7 +472,6 @@ ExceptionBlob* ExceptionBlob::create(
tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob);
Disassembler::decode(blob->instructions_begin(), blob->instructions_end());
}
VTune::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
Forte::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
if (JvmtiExport::should_post_dynamic_code_generated()) {
@ -533,7 +530,6 @@ SafepointBlob* SafepointBlob::create(
tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob);
Disassembler::decode(blob->instructions_begin(), blob->instructions_end());
}
VTune::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
Forte::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end());
if (JvmtiExport::should_post_dynamic_code_generated()) {

View File

@ -93,6 +93,8 @@ class CodeBlob_sizes {
CodeHeap * CodeCache::_heap = new CodeHeap();
int CodeCache::_number_of_blobs = 0;
int CodeCache::_number_of_adapters = 0;
int CodeCache::_number_of_nmethods = 0;
int CodeCache::_number_of_nmethods_with_dependencies = 0;
bool CodeCache::_needs_cache_clean = false;
nmethod* CodeCache::_scavenge_root_nmethods = NULL;
@ -176,8 +178,14 @@ void CodeCache::free(CodeBlob* cb) {
verify_if_often();
print_trace("free", cb);
if (cb->is_nmethod() && ((nmethod *)cb)->has_dependencies()) {
_number_of_nmethods_with_dependencies--;
if (cb->is_nmethod()) {
_number_of_nmethods--;
if (((nmethod *)cb)->has_dependencies()) {
_number_of_nmethods_with_dependencies--;
}
}
if (cb->is_adapter_blob()) {
_number_of_adapters--;
}
_number_of_blobs--;
@ -191,9 +199,16 @@ void CodeCache::free(CodeBlob* cb) {
void CodeCache::commit(CodeBlob* cb) {
// this is called by nmethod::nmethod, which must already own CodeCache_lock
assert_locked_or_safepoint(CodeCache_lock);
if (cb->is_nmethod() && ((nmethod *)cb)->has_dependencies()) {
_number_of_nmethods_with_dependencies++;
if (cb->is_nmethod()) {
_number_of_nmethods++;
if (((nmethod *)cb)->has_dependencies()) {
_number_of_nmethods_with_dependencies++;
}
}
if (cb->is_adapter_blob()) {
_number_of_adapters++;
}
// flush the hardware I-cache
ICache::invalidate_range(cb->instructions_begin(), cb->instructions_size());
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2008, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -43,6 +43,8 @@ class CodeCache : AllStatic {
// 4422213 or 4436291 for details.
static CodeHeap * _heap;
static int _number_of_blobs;
static int _number_of_adapters;
static int _number_of_nmethods;
static int _number_of_nmethods_with_dependencies;
static bool _needs_cache_clean;
static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link()
@ -105,6 +107,8 @@ class CodeCache : AllStatic {
static nmethod* first_nmethod();
static nmethod* next_nmethod (CodeBlob* cb);
static int nof_blobs() { return _number_of_blobs; }
static int nof_adapters() { return _number_of_adapters; }
static int nof_nmethods() { return _number_of_nmethods; }
// GC support
static void gc_epilogue();

View File

@ -397,11 +397,6 @@ void nmethod::add_handler_for_exception_and_pc(Handle exception, address pc, add
//-------------end of code for ExceptionCache--------------
void nmFlags::clear() {
assert(sizeof(nmFlags) == sizeof(int), "using more than one word for nmFlags");
*(jint*)this = 0;
}
int nmethod::total_size() const {
return
code_size() +
@ -419,8 +414,32 @@ const char* nmethod::compile_kind() const {
return NULL;
}
// %%% This variable is no longer used?
int nmethod::_zombie_instruction_size = NativeJump::instruction_size;
// Fill in default values for various flag fields
void nmethod::init_defaults() {
_state = alive;
_marked_for_reclamation = 0;
_has_flushed_dependencies = 0;
_speculatively_disconnected = 0;
_has_unsafe_access = 0;
_has_method_handle_invokes = 0;
_marked_for_deoptimization = 0;
_lock_count = 0;
_stack_traversal_mark = 0;
_unload_reported = false; // jvmti state
NOT_PRODUCT(_has_debug_info = false);
_oops_do_mark_link = NULL;
_jmethod_id = NULL;
_osr_link = NULL;
_scavenge_root_link = NULL;
_scavenge_root_state = 0;
_saved_nmethod_link = NULL;
_compiler = NULL;
#ifdef HAVE_DTRACE_H
_trap_offset = 0;
#endif // def HAVE_DTRACE_H
}
nmethod* nmethod::new_native_nmethod(methodHandle method,
@ -580,25 +599,16 @@ nmethod::nmethod(
debug_only(No_Safepoint_Verifier nsv;)
assert_locked_or_safepoint(CodeCache_lock);
NOT_PRODUCT(_has_debug_info = false);
_oops_do_mark_link = NULL;
init_defaults();
_method = method;
_entry_bci = InvocationEntryBci;
_jmethod_id = NULL;
_osr_link = NULL;
_scavenge_root_link = NULL;
_scavenge_root_state = 0;
_saved_nmethod_link = NULL;
_compiler = NULL;
// We have no exception handler or deopt handler make the
// values something that will never match a pc like the nmethod vtable entry
_exception_offset = 0;
_deoptimize_offset = 0;
_deoptimize_mh_offset = 0;
_orig_pc_offset = 0;
#ifdef HAVE_DTRACE_H
_trap_offset = 0;
#endif // def HAVE_DTRACE_H
_stub_offset = data_offset();
_consts_offset = data_offset();
_oops_offset = data_offset();
@ -616,17 +626,9 @@ nmethod::nmethod(
_exception_cache = NULL;
_pc_desc_cache.reset_to(NULL);
flags.clear();
flags.state = alive;
_markedForDeoptimization = 0;
_lock_count = 0;
_stack_traversal_mark = 0;
code_buffer->copy_oops_to(this);
debug_only(verify_scavenge_root_oops());
CodeCache::commit(this);
VTune::create_nmethod(this);
}
if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
@ -674,15 +676,9 @@ nmethod::nmethod(
debug_only(No_Safepoint_Verifier nsv;)
assert_locked_or_safepoint(CodeCache_lock);
NOT_PRODUCT(_has_debug_info = false);
_oops_do_mark_link = NULL;
init_defaults();
_method = method;
_entry_bci = InvocationEntryBci;
_jmethod_id = NULL;
_osr_link = NULL;
_scavenge_root_link = NULL;
_scavenge_root_state = 0;
_compiler = NULL;
// We have no exception handler or deopt handler make the
// values something that will never match a pc like the nmethod vtable entry
_exception_offset = 0;
@ -708,17 +704,9 @@ nmethod::nmethod(
_exception_cache = NULL;
_pc_desc_cache.reset_to(NULL);
flags.clear();
flags.state = alive;
_markedForDeoptimization = 0;
_lock_count = 0;
_stack_traversal_mark = 0;
code_buffer->copy_oops_to(this);
debug_only(verify_scavenge_root_oops());
CodeCache::commit(this);
VTune::create_nmethod(this);
}
if (PrintNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) {
@ -783,21 +771,13 @@ nmethod::nmethod(
debug_only(No_Safepoint_Verifier nsv;)
assert_locked_or_safepoint(CodeCache_lock);
NOT_PRODUCT(_has_debug_info = false);
_oops_do_mark_link = NULL;
init_defaults();
_method = method;
_jmethod_id = NULL;
_entry_bci = entry_bci;
_compile_id = compile_id;
_comp_level = comp_level;
_entry_bci = entry_bci;
_osr_link = NULL;
_scavenge_root_link = NULL;
_scavenge_root_state = 0;
_compiler = compiler;
_orig_pc_offset = orig_pc_offset;
#ifdef HAVE_DTRACE_H
_trap_offset = 0;
#endif // def HAVE_DTRACE_H
_stub_offset = instructions_offset() + code_buffer->total_offset_of(code_buffer->stubs()->start());
// Exception handler and deopt handler are in the stub section
@ -824,15 +804,6 @@ nmethod::nmethod(
_exception_cache = NULL;
_pc_desc_cache.reset_to(scopes_pcs_begin());
flags.clear();
flags.state = alive;
_markedForDeoptimization = 0;
_unload_reported = false; // jvmti state
_lock_count = 0;
_stack_traversal_mark = 0;
// Copy contents of ScopeDescRecorder to nmethod
code_buffer->copy_oops_to(this);
debug_info->copy_to(this);
@ -844,8 +815,6 @@ nmethod::nmethod(
CodeCache::commit(this);
VTune::create_nmethod(this);
// Copy contents of ExceptionHandlerTable to nmethod
handler_table->copy_to(this);
nul_chk_table->copy_to(this);
@ -991,11 +960,6 @@ void nmethod::print_nmethod(bool printmethod) {
}
void nmethod::set_version(int v) {
flags.version = v;
}
// Promote one word from an assembly-time handle to a live embedded oop.
inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) {
if (handle == NULL ||
@ -1142,6 +1106,8 @@ void nmethod::cleanup_inline_caches() {
// This is a private interface with the sweeper.
void nmethod::mark_as_seen_on_stack() {
assert(is_not_entrant(), "must be a non-entrant method");
// Set the traversal mark to ensure that the sweeper does 2
// cleaning passes before moving to zombie.
set_stack_traversal_mark(NMethodSweeper::traversal_count());
}
@ -1210,7 +1176,7 @@ void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
// for later on.
CodeCache::set_needs_cache_clean(true);
}
flags.state = unloaded;
_state = unloaded;
// Log the unloading.
log_state_change();
@ -1236,21 +1202,21 @@ void nmethod::log_state_change() const {
if (LogCompilation) {
if (xtty != NULL) {
ttyLocker ttyl; // keep the following output all in one block
if (flags.state == unloaded) {
if (_state == unloaded) {
xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'",
os::current_thread_id());
} else {
xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s",
os::current_thread_id(),
(flags.state == zombie ? " zombie='1'" : ""));
(_state == zombie ? " zombie='1'" : ""));
}
log_identity(xtty);
xtty->stamp();
xtty->end_elem();
}
}
if (PrintCompilation && flags.state != unloaded) {
print_on(tty, flags.state == zombie ? "made zombie " : "made not entrant ");
if (PrintCompilation && _state != unloaded) {
print_on(tty, _state == zombie ? "made zombie " : "made not entrant ");
tty->cr();
}
}
@ -1261,8 +1227,9 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
bool was_alive = false;
// Make sure the nmethod is not flushed in case of a safepoint in code below.
// Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
nmethodLocker nml(this);
methodHandle the_method(method());
{
// If the method is already zombie there is nothing to do
@ -1282,7 +1249,7 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
// Enter critical section. Does not block for safepoint.
MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
if (flags.state == state) {
if (_state == state) {
// another thread already performed this transition so nothing
// to do, but return false to indicate this.
return false;
@ -1293,17 +1260,37 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
if (!is_osr_method() && !is_not_entrant()) {
NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
SharedRuntime::get_handle_wrong_method_stub());
assert (NativeJump::instruction_size == nmethod::_zombie_instruction_size, "");
}
was_alive = is_in_use(); // Read state under lock
if (is_in_use()) {
// It's a true state change, so mark the method as decompiled.
// Do it only for transition from alive.
inc_decompile_count();
}
// Change state
flags.state = state;
_state = state;
// Log the transition once
log_state_change();
// Remove nmethod from method.
// We need to check if both the _code and _from_compiled_code_entry_point
// refer to this nmethod because there is a race in setting these two fields
// in methodOop as seen in bugid 4947125.
// If the vep() points to the zombie nmethod, the memory for the nmethod
// could be flushed and the compiler and vtable stubs could still call
// through it.
if (method() != NULL && (method()->code() == this ||
method()->from_compiled_entry() == verified_entry_point())) {
HandleMark hm;
method()->clear_code();
}
if (state == not_entrant) {
mark_as_seen_on_stack();
}
} // leave critical region under Patching_lock
// When the nmethod becomes zombie it is no longer alive so the
@ -1311,18 +1298,17 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
// state will be flushed later when the transition to zombie
// happens or they get unloaded.
if (state == zombie) {
// zombie only - if a JVMTI agent has enabled the CompiledMethodUnload event
// and it hasn't already been reported for this nmethod then report it now.
// (the event may have been reported earilier if the GC marked it for unloading).
post_compiled_method_unload();
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
flush_dependencies(NULL);
} else {
assert(state == not_entrant, "other cases may need to be handled differently");
}
if (state == not_entrant) {
Events::log("Make nmethod not entrant " INTPTR_FORMAT, this);
} else {
Events::log("Make nmethod zombie " INTPTR_FORMAT, this);
}
if (TraceCreateZombies) {
tty->print_cr("nmethod <" INTPTR_FORMAT "> code made %s", this, (state == not_entrant) ? "not entrant" : "zombie");
}
@ -1330,47 +1316,6 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
// Make sweeper aware that there is a zombie method that needs to be removed
NMethodSweeper::notify(this);
// not_entrant only stuff
if (state == not_entrant) {
mark_as_seen_on_stack();
}
if (was_alive) {
// It's a true state change, so mark the method as decompiled.
// Do it only for transition from alive.
inc_decompile_count();
}
// zombie only - if a JVMTI agent has enabled the CompiledMethodUnload event
// and it hasn't already been reported for this nmethod then report it now.
// (the event may have been reported earilier if the GC marked it for unloading).
if (state == zombie) {
post_compiled_method_unload();
}
// Zombie only stuff
if (state == zombie) {
VTune::delete_nmethod(this);
}
// Check whether method got unloaded at a safepoint before this,
// if so we can skip the flushing steps below
if (method() == NULL) return true;
// Remove nmethod from method.
// We need to check if both the _code and _from_compiled_code_entry_point
// refer to this nmethod because there is a race in setting these two fields
// in methodOop as seen in bugid 4947125.
// If the vep() points to the zombie nmethod, the memory for the nmethod
// could be flushed and the compiler and vtable stubs could still call
// through it.
if (method()->code() == this ||
method()->from_compiled_entry() == verified_entry_point()) {
HandleMark hm;
method()->clear_code();
}
return true;
}
@ -2109,7 +2054,6 @@ address nmethod::continuation_for_implicit_exception(address pc) {
void nmethod_init() {
// make sure you didn't forget to adjust the filler fields
assert(sizeof(nmFlags) <= 4, "nmFlags occupies more than a word");
assert(sizeof(nmethod) % oopSize == 0, "nmethod size must be multiple of a word");
}
@ -2345,7 +2289,6 @@ void nmethod::print() const {
tty->print("((nmethod*) "INTPTR_FORMAT ") ", this);
tty->print(" for method " INTPTR_FORMAT , (address)method());
tty->print(" { ");
if (version()) tty->print("v%d ", version());
if (is_in_use()) tty->print("in_use ");
if (is_not_entrant()) tty->print("not_entrant ");
if (is_zombie()) tty->print("zombie ");

View File

@ -78,29 +78,8 @@ class PcDescCache VALUE_OBJ_CLASS_SPEC {
// nmethods (native methods) are the compiled code versions of Java methods.
struct nmFlags {
friend class VMStructs;
unsigned int version:8; // version number (0 = first version)
unsigned int age:4; // age (in # of sweep steps)
unsigned int state:2; // {alive, zombie, unloaded)
unsigned int isUncommonRecompiled:1; // recompiled because of uncommon trap?
unsigned int isToBeRecompiled:1; // to be recompiled as soon as it matures
unsigned int hasFlushedDependencies:1; // Used for maintenance of dependencies
unsigned int markedForReclamation:1; // Used by NMethodSweeper
unsigned int has_unsafe_access:1; // May fault due to unsafe access.
unsigned int has_method_handle_invokes:1; // Has this method MethodHandle invokes?
unsigned int speculatively_disconnected:1; // Marked for potential unload
void clear();
};
// A nmethod contains:
//
// An nmethod contains:
// - header (the nmethod structure)
// [Relocation]
// - relocation information
@ -131,8 +110,6 @@ class nmethod : public CodeBlob {
friend class CodeCache; // non-perm oops
private:
// Shared fields for all nmethod's
static int _zombie_instruction_size;
methodOop _method;
int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
jmethodID _jmethod_id; // Cache of method()->jmethod_id()
@ -147,6 +124,11 @@ class nmethod : public CodeBlob {
AbstractCompiler* _compiler; // The compiler which compiled this nmethod
// offsets for entry points
address _entry_point; // entry point with class check
address _verified_entry_point; // entry point without class check
address _osr_entry_point; // entry point for on stack replacement
// Offsets for different nmethod parts
int _exception_offset;
// All deoptee's will resume execution at this location described by
@ -175,23 +157,31 @@ class nmethod : public CodeBlob {
// pc during a deopt.
int _orig_pc_offset;
int _compile_id; // which compilation made this nmethod
int _comp_level; // compilation level
int _compile_id; // which compilation made this nmethod
int _comp_level; // compilation level
// offsets for entry points
address _entry_point; // entry point with class check
address _verified_entry_point; // entry point without class check
address _osr_entry_point; // entry point for on stack replacement
// protected by CodeCache_lock
bool _has_flushed_dependencies; // Used for maintenance of dependencies (CodeCache_lock)
bool _speculatively_disconnected; // Marked for potential unload
bool _marked_for_reclamation; // Used by NMethodSweeper (set only by sweeper)
bool _marked_for_deoptimization; // Used for stack deoptimization
// used by jvmti to track if an unload event has been posted for this nmethod.
bool _unload_reported;
// set during construction
unsigned int _has_unsafe_access:1; // May fault due to unsafe access.
unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes?
// Protected by Patching_lock
unsigned char _state; // {alive, not_entrant, zombie, unloaded)
nmFlags flags; // various flags to keep track of nmethod state
bool _markedForDeoptimization; // Used for stack deoptimization
enum { alive = 0,
not_entrant = 1, // uncommon trap has happened but activations may still exist
zombie = 2,
unloaded = 3 };
// used by jvmti to track if an unload event has been posted for this nmethod.
bool _unload_reported;
jbyte _scavenge_root_state;
@ -270,15 +260,15 @@ class nmethod : public CodeBlob {
bool make_not_entrant_or_zombie(unsigned int state);
void inc_decompile_count();
// used to check that writes to nmFlags are done consistently.
static void check_safepoint() PRODUCT_RETURN;
// Used to manipulate the exception cache
void add_exception_cache_entry(ExceptionCache* new_entry);
ExceptionCache* exception_cache_entry_for_exception(Handle exception);
// Inform external interfaces that a compiled method has been unloaded
inline void post_compiled_method_unload();
void post_compiled_method_unload();
// Initailize fields to their default values
void init_defaults();
public:
// create nmethod with entry_bci
@ -393,11 +383,11 @@ class nmethod : public CodeBlob {
address verified_entry_point() const { return _verified_entry_point; } // if klass is correct
// flag accessing and manipulation
bool is_in_use() const { return flags.state == alive; }
bool is_alive() const { return flags.state == alive || flags.state == not_entrant; }
bool is_not_entrant() const { return flags.state == not_entrant; }
bool is_zombie() const { return flags.state == zombie; }
bool is_unloaded() const { return flags.state == unloaded; }
bool is_in_use() const { return _state == alive; }
bool is_alive() const { return _state == alive || _state == not_entrant; }
bool is_not_entrant() const { return _state == not_entrant; }
bool is_zombie() const { return _state == zombie; }
bool is_unloaded() const { return _state == unloaded; }
// Make the nmethod non entrant. The nmethod will continue to be
// alive. It is used when an uncommon trap happens. Returns true
@ -410,37 +400,33 @@ class nmethod : public CodeBlob {
bool unload_reported() { return _unload_reported; }
void set_unload_reported() { _unload_reported = true; }
bool is_marked_for_deoptimization() const { return _markedForDeoptimization; }
void mark_for_deoptimization() { _markedForDeoptimization = true; }
bool is_marked_for_deoptimization() const { return _marked_for_deoptimization; }
void mark_for_deoptimization() { _marked_for_deoptimization = true; }
void make_unloaded(BoolObjectClosure* is_alive, oop cause);
bool has_dependencies() { return dependencies_size() != 0; }
void flush_dependencies(BoolObjectClosure* is_alive);
bool has_flushed_dependencies() { return flags.hasFlushedDependencies; }
void set_has_flushed_dependencies() {
bool has_flushed_dependencies() { return _has_flushed_dependencies; }
void set_has_flushed_dependencies() {
assert(!has_flushed_dependencies(), "should only happen once");
flags.hasFlushedDependencies = 1;
_has_flushed_dependencies = 1;
}
bool is_marked_for_reclamation() const { return flags.markedForReclamation; }
void mark_for_reclamation() { flags.markedForReclamation = 1; }
void unmark_for_reclamation() { flags.markedForReclamation = 0; }
bool is_marked_for_reclamation() const { return _marked_for_reclamation; }
void mark_for_reclamation() { _marked_for_reclamation = 1; }
bool has_unsafe_access() const { return flags.has_unsafe_access; }
void set_has_unsafe_access(bool z) { flags.has_unsafe_access = z; }
bool has_unsafe_access() const { return _has_unsafe_access; }
void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
bool has_method_handle_invokes() const { return flags.has_method_handle_invokes; }
void set_has_method_handle_invokes(bool z) { flags.has_method_handle_invokes = z; }
bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
bool is_speculatively_disconnected() const { return flags.speculatively_disconnected; }
void set_speculatively_disconnected(bool z) { flags.speculatively_disconnected = z; }
bool is_speculatively_disconnected() const { return _speculatively_disconnected; }
void set_speculatively_disconnected(bool z) { _speculatively_disconnected = z; }
int comp_level() const { return _comp_level; }
int version() const { return flags.version; }
void set_version(int v);
// Support for oops in scopes and relocs:
// Note: index 0 is reserved for null.
oop oop_at(int index) const { return index == 0 ? (oop) NULL: *oop_addr_at(index); }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -50,7 +50,6 @@ void* VtableStub::operator new(size_t size, int code_size) {
}
_chunk = blob->instructions_begin();
_chunk_end = _chunk + bytes;
VTune::register_stub("vtable stub", _chunk, _chunk_end);
Forte::register_stub("vtable stub", _chunk, _chunk_end);
// Notify JVMTI about this stub. The event will be recorded by the enclosing
// JvmtiDynamicCodeEventCollector and posted when this thread has released

View File

@ -270,7 +270,7 @@ psParallelCompact.cpp parallelScavengeHeap.inline.hpp
psParallelCompact.cpp pcTasks.hpp
psParallelCompact.cpp psMarkSweep.hpp
psParallelCompact.cpp psMarkSweepDecorator.hpp
psParallelCompact.cpp psCompactionManager.hpp
psParallelCompact.cpp psCompactionManager.inline.hpp
psParallelCompact.cpp psPromotionManager.inline.hpp
psParallelCompact.cpp psOldGen.hpp
psParallelCompact.cpp psParallelCompact.hpp

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2009, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -32,7 +32,7 @@ ParCompactionManager::ObjArrayTaskQueueSet*
ParCompactionManager::_objarray_queues = NULL;
ObjectStartArray* ParCompactionManager::_start_array = NULL;
ParMarkBitMap* ParCompactionManager::_mark_bitmap = NULL;
RegionTaskQueueSet* ParCompactionManager::_region_array = NULL;
RegionTaskQueueSet* ParCompactionManager::_region_array = NULL;
ParCompactionManager::ParCompactionManager() :
_action(CopyAndUpdate) {
@ -43,25 +43,9 @@ ParCompactionManager::ParCompactionManager() :
_old_gen = heap->old_gen();
_start_array = old_gen()->start_array();
marking_stack()->initialize();
// We want the overflow stack to be permanent
_overflow_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(10, true);
_objarray_queue.initialize();
_objarray_overflow_stack =
new (ResourceObj::C_HEAP) ObjArrayOverflowStack(10, true);
#ifdef USE_RegionTaskQueueWithOverflow
_objarray_stack.initialize();
region_stack()->initialize();
#else
region_stack()->initialize();
// We want the overflow stack to be permanent
_region_overflow_stack =
new (ResourceObj::C_HEAP) GrowableArray<size_t>(10, true);
#endif
// Note that _revisit_klass_stack is allocated out of the
// C heap (as opposed to out of ResourceArena).
@ -71,12 +55,9 @@ ParCompactionManager::ParCompactionManager() :
// From some experiments (#klass/k)^2 for k = 10 seems a better fit, but this will
// have to do for now until we are able to investigate a more optimal setting.
_revisit_mdo_stack = new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(size*2, true);
}
ParCompactionManager::~ParCompactionManager() {
delete _overflow_stack;
delete _objarray_overflow_stack;
delete _revisit_klass_stack;
delete _revisit_mdo_stack;
// _manager_array and _stack_array are statics
@ -108,12 +89,8 @@ void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
_manager_array[i] = new ParCompactionManager();
guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager");
stack_array()->register_queue(i, _manager_array[i]->marking_stack());
_objarray_queues->register_queue(i, &_manager_array[i]->_objarray_queue);
#ifdef USE_RegionTaskQueueWithOverflow
region_array()->register_queue(i, _manager_array[i]->region_stack()->task_queue());
#else
_objarray_queues->register_queue(i, &_manager_array[i]->_objarray_stack);
region_array()->register_queue(i, _manager_array[i]->region_stack());
#endif
}
// The VMThread gets its own ParCompactionManager, which is not available
@ -149,57 +126,6 @@ bool ParCompactionManager::should_reset_only() {
return action() == ParCompactionManager::ResetObjects;
}
// For now save on a stack
void ParCompactionManager::save_for_scanning(oop m) {
stack_push(m);
}
void ParCompactionManager::stack_push(oop obj) {
if(!marking_stack()->push(obj)) {
overflow_stack()->push(obj);
}
}
oop ParCompactionManager::retrieve_for_scanning() {
// Should not be used in the parallel case
ShouldNotReachHere();
return NULL;
}
// Save region on a stack
void ParCompactionManager::save_for_processing(size_t region_index) {
#ifdef ASSERT
const ParallelCompactData& sd = PSParallelCompact::summary_data();
ParallelCompactData::RegionData* const region_ptr = sd.region(region_index);
assert(region_ptr->claimed(), "must be claimed");
assert(region_ptr->_pushed++ == 0, "should only be pushed once");
#endif
region_stack_push(region_index);
}
void ParCompactionManager::region_stack_push(size_t region_index) {
#ifdef USE_RegionTaskQueueWithOverflow
region_stack()->save(region_index);
#else
if(!region_stack()->push(region_index)) {
region_overflow_stack()->push(region_index);
}
#endif
}
bool ParCompactionManager::retrieve_for_processing(size_t& region_index) {
#ifdef USE_RegionTaskQueueWithOverflow
return region_stack()->retrieve(region_index);
#else
// Should not be used in the parallel case
ShouldNotReachHere();
return false;
#endif
}
ParCompactionManager*
ParCompactionManager::gc_thread_compaction_manager(int index) {
assert(index >= 0 && index < (int)ParallelGCThreads, "index out of range");
@ -218,8 +144,8 @@ void ParCompactionManager::follow_marking_stacks() {
do {
// Drain the overflow stack first, to allow stealing from the marking stack.
oop obj;
while (!overflow_stack()->is_empty()) {
overflow_stack()->pop()->follow_contents(this);
while (marking_stack()->pop_overflow(obj)) {
obj->follow_contents(this);
}
while (marking_stack()->pop_local(obj)) {
obj->follow_contents(this);
@ -227,11 +153,10 @@ void ParCompactionManager::follow_marking_stacks() {
// Process ObjArrays one at a time to avoid marking stack bloat.
ObjArrayTask task;
if (!_objarray_overflow_stack->is_empty()) {
task = _objarray_overflow_stack->pop();
if (_objarray_stack.pop_overflow(task)) {
objArrayKlass* const k = (objArrayKlass*)task.obj()->blueprint();
k->oop_follow_contents(this, task.obj(), task.index());
} else if (_objarray_queue.pop_local(task)) {
} else if (_objarray_stack.pop_local(task)) {
objArrayKlass* const k = (objArrayKlass*)task.obj()->blueprint();
k->oop_follow_contents(this, task.obj(), task.index());
}
@ -240,68 +165,18 @@ void ParCompactionManager::follow_marking_stacks() {
assert(marking_stacks_empty(), "Sanity");
}
void ParCompactionManager::drain_region_overflow_stack() {
size_t region_index = (size_t) -1;
while(region_stack()->retrieve_from_overflow(region_index)) {
PSParallelCompact::fill_and_update_region(this, region_index);
}
}
void ParCompactionManager::drain_region_stacks() {
#ifdef ASSERT
ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
MutableSpace* to_space = heap->young_gen()->to_space();
MutableSpace* old_space = heap->old_gen()->object_space();
MutableSpace* perm_space = heap->perm_gen()->object_space();
#endif /* ASSERT */
#if 1 // def DO_PARALLEL - the serial code hasn't been updated
do {
#ifdef USE_RegionTaskQueueWithOverflow
// Drain overflow stack first, so other threads can steal from
// claimed stack while we work.
size_t region_index = (size_t) -1;
while(region_stack()->retrieve_from_overflow(region_index)) {
// Drain overflow stack first so other threads can steal.
size_t region_index;
while (region_stack()->pop_overflow(region_index)) {
PSParallelCompact::fill_and_update_region(this, region_index);
}
while (region_stack()->retrieve_from_stealable_queue(region_index)) {
while (region_stack()->pop_local(region_index)) {
PSParallelCompact::fill_and_update_region(this, region_index);
}
} while (!region_stack()->is_empty());
#else
// Drain overflow stack first, so other threads can steal from
// claimed stack while we work.
while(!region_overflow_stack()->is_empty()) {
size_t region_index = region_overflow_stack()->pop();
PSParallelCompact::fill_and_update_region(this, region_index);
}
size_t region_index = -1;
// obj is a reference!!!
while (region_stack()->pop_local(region_index)) {
// It would be nice to assert about the type of objects we might
// pop, but they can come from anywhere, unfortunately.
PSParallelCompact::fill_and_update_region(this, region_index);
}
} while((region_stack()->size() != 0) ||
(region_overflow_stack()->length() != 0));
#endif
#ifdef USE_RegionTaskQueueWithOverflow
assert(region_stack()->is_empty(), "Sanity");
#else
assert(region_stack()->size() == 0, "Sanity");
assert(region_overflow_stack()->length() == 0, "Sanity");
#endif
#else
oop obj;
while (obj = retrieve_for_scanning()) {
obj->follow_contents(this);
}
#endif
}
#ifdef ASSERT

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2009, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -59,10 +59,10 @@ class ParCompactionManager : public CHeapObj {
private:
// 32-bit: 4K * 8 = 32KiB; 64-bit: 8K * 16 = 128KiB
#define OBJARRAY_QUEUE_SIZE (1 << NOT_LP64(12) LP64_ONLY(13))
typedef GenericTaskQueue<ObjArrayTask, OBJARRAY_QUEUE_SIZE> ObjArrayTaskQueue;
typedef GenericTaskQueueSet<ObjArrayTaskQueue> ObjArrayTaskQueueSet;
#undef OBJARRAY_QUEUE_SIZE
#define QUEUE_SIZE (1 << NOT_LP64(12) LP64_ONLY(13))
typedef OverflowTaskQueue<ObjArrayTask, QUEUE_SIZE> ObjArrayTaskQueue;
typedef GenericTaskQueueSet<ObjArrayTaskQueue> ObjArrayTaskQueueSet;
#undef QUEUE_SIZE
static ParCompactionManager** _manager_array;
static OopTaskQueueSet* _stack_array;
@ -72,23 +72,13 @@ class ParCompactionManager : public CHeapObj {
static PSOldGen* _old_gen;
private:
OopTaskQueue _marking_stack;
GrowableArray<oop>* _overflow_stack;
typedef GrowableArray<ObjArrayTask> ObjArrayOverflowStack;
ObjArrayTaskQueue _objarray_queue;
ObjArrayOverflowStack* _objarray_overflow_stack;
OverflowTaskQueue<oop> _marking_stack;
ObjArrayTaskQueue _objarray_stack;
// Is there a way to reuse the _marking_stack for the
// saving empty regions? For now just create a different
// type of TaskQueue.
#ifdef USE_RegionTaskQueueWithOverflow
RegionTaskQueueWithOverflow _region_stack;
#else
RegionTaskQueue _region_stack;
GrowableArray<size_t>* _region_overflow_stack;
#endif
#if 1 // does this happen enough to need a per thread stack?
GrowableArray<Klass*>* _revisit_klass_stack;
@ -107,16 +97,8 @@ private:
protected:
// Array of tasks. Needed by the ParallelTaskTerminator.
static RegionTaskQueueSet* region_array() { return _region_array; }
OopTaskQueue* marking_stack() { return &_marking_stack; }
GrowableArray<oop>* overflow_stack() { return _overflow_stack; }
#ifdef USE_RegionTaskQueueWithOverflow
RegionTaskQueueWithOverflow* region_stack() { return &_region_stack; }
#else
RegionTaskQueue* region_stack() { return &_region_stack; }
GrowableArray<size_t>* region_overflow_stack() {
return _region_overflow_stack;
}
#endif
OverflowTaskQueue<oop>* marking_stack() { return &_marking_stack; }
RegionTaskQueue* region_stack() { return &_region_stack; }
// Pushes onto the marking stack. If the marking stack is full,
// pushes onto the overflow stack.
@ -124,11 +106,7 @@ private:
// Do not implement an equivalent stack_pop. Deal with the
// marking stack and overflow stack directly.
// Pushes onto the region stack. If the region stack is full,
// pushes onto the region overflow stack.
void region_stack_push(size_t region_index);
public:
public:
Action action() { return _action; }
void set_action(Action v) { _action = v; }
@ -157,22 +135,15 @@ public:
GrowableArray<DataLayout*>* revisit_mdo_stack() { return _revisit_mdo_stack; }
#endif
// Save oop for later processing. Must not fail.
void save_for_scanning(oop m);
// Get a oop for scanning. If returns null, no oop were found.
oop retrieve_for_scanning();
inline void push_objarray(oop obj, size_t index);
// Save region for later processing. Must not fail.
void save_for_processing(size_t region_index);
// Get a region for processing. If returns null, no region were found.
bool retrieve_for_processing(size_t& region_index);
// Save for later processing. Must not fail.
inline void push(oop obj) { _marking_stack.push(obj); }
inline void push_objarray(oop objarray, size_t index);
inline void push_region(size_t index);
// Access function for compaction managers
static ParCompactionManager* gc_thread_compaction_manager(int index);
static bool steal(int queue_num, int* seed, Task& t) {
static bool steal(int queue_num, int* seed, oop& t) {
return stack_array()->steal(queue_num, seed, t);
}
@ -180,8 +151,8 @@ public:
return _objarray_queues->steal(queue_num, seed, t);
}
static bool steal(int queue_num, int* seed, RegionTask& t) {
return region_array()->steal(queue_num, seed, t);
static bool steal(int queue_num, int* seed, size_t& region) {
return region_array()->steal(queue_num, seed, region);
}
// Process tasks remaining on any marking stack
@ -191,9 +162,6 @@ public:
// Process tasks remaining on any stack
void drain_region_stacks();
// Process tasks remaining on any stack
void drain_region_overflow_stack();
// Debugging support
#ifdef ASSERT
bool stacks_have_been_allocated();
@ -208,6 +176,5 @@ inline ParCompactionManager* ParCompactionManager::manager_array(int index) {
}
bool ParCompactionManager::marking_stacks_empty() const {
return _marking_stack.size() == 0 && _overflow_stack->is_empty() &&
_objarray_queue.size() == 0 && _objarray_overflow_stack->is_empty();
return _marking_stack.is_empty() && _objarray_stack.is_empty();
}

View File

@ -26,7 +26,16 @@ void ParCompactionManager::push_objarray(oop obj, size_t index)
{
ObjArrayTask task(obj, index);
assert(task.is_valid(), "bad ObjArrayTask");
if (!_objarray_queue.push(task)) {
_objarray_overflow_stack->push(task);
}
_objarray_stack.push(task);
}
void ParCompactionManager::push_region(size_t index)
{
#ifdef ASSERT
const ParallelCompactData& sd = PSParallelCompact::summary_data();
ParallelCompactData::RegionData* const region_ptr = sd.region(index);
assert(region_ptr->claimed(), "must be claimed");
assert(region_ptr->_pushed++ == 0, "should only be pushed once");
#endif
region_stack()->push(index);
}

View File

@ -2474,7 +2474,7 @@ void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
for (size_t cur = end_region - 1; cur >= beg_region; --cur) {
if (sd.region(cur)->claim_unsafe()) {
ParCompactionManager* cm = ParCompactionManager::manager_array(which);
cm->save_for_processing(cur);
cm->push_region(cur);
if (TraceParallelOldGCCompactionPhase && Verbose) {
const size_t count_mod_8 = fillable_regions & 7;
@ -3138,7 +3138,7 @@ void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm,
assert(cur->data_size() > 0, "region must have live data");
cur->decrement_destination_count();
if (cur < enqueue_end && cur->available() && cur->claim()) {
cm->save_for_processing(sd.region(cur));
cm->push_region(sd.region(cur));
}
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2009, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1297,11 +1297,8 @@ inline void PSParallelCompact::mark_and_push(ParCompactionManager* cm, T* p) {
T heap_oop = oopDesc::load_heap_oop(p);
if (!oopDesc::is_null(heap_oop)) {
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
if (mark_bitmap()->is_unmarked(obj)) {
if (mark_obj(obj)) {
// This thread marked the object and owns the subsequent processing of it.
cm->save_for_scanning(obj);
}
if (mark_bitmap()->is_unmarked(obj) && mark_obj(obj)) {
cm->push(obj);
}
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2009, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -94,45 +94,13 @@ void PSPromotionManager::post_scavenge() {
print_stats();
#endif // PS_PM_STATS
for(uint i=0; i<ParallelGCThreads+1; i++) {
for (uint i = 0; i < ParallelGCThreads + 1; i++) {
PSPromotionManager* manager = manager_array(i);
// the guarantees are a bit gratuitous but, if one fires, we'll
// have a better idea of what went wrong
if (i < ParallelGCThreads) {
guarantee((!UseDepthFirstScavengeOrder ||
manager->overflow_stack_depth()->length() <= 0),
"promotion manager overflow stack must be empty");
guarantee((UseDepthFirstScavengeOrder ||
manager->overflow_stack_breadth()->length() <= 0),
"promotion manager overflow stack must be empty");
guarantee((!UseDepthFirstScavengeOrder ||
manager->claimed_stack_depth()->size() <= 0),
"promotion manager claimed stack must be empty");
guarantee((UseDepthFirstScavengeOrder ||
manager->claimed_stack_breadth()->size() <= 0),
"promotion manager claimed stack must be empty");
if (UseDepthFirstScavengeOrder) {
assert(manager->claimed_stack_depth()->is_empty(), "should be empty");
} else {
guarantee((!UseDepthFirstScavengeOrder ||
manager->overflow_stack_depth()->length() <= 0),
"VM Thread promotion manager overflow stack "
"must be empty");
guarantee((UseDepthFirstScavengeOrder ||
manager->overflow_stack_breadth()->length() <= 0),
"VM Thread promotion manager overflow stack "
"must be empty");
guarantee((!UseDepthFirstScavengeOrder ||
manager->claimed_stack_depth()->size() <= 0),
"VM Thread promotion manager claimed stack "
"must be empty");
guarantee((UseDepthFirstScavengeOrder ||
manager->claimed_stack_breadth()->size() <= 0),
"VM Thread promotion manager claimed stack "
"must be empty");
assert(manager->claimed_stack_breadth()->is_empty(), "should be empty");
}
manager->flush_labs();
}
}
@ -181,15 +149,9 @@ PSPromotionManager::PSPromotionManager() {
if (depth_first()) {
claimed_stack_depth()->initialize();
queue_size = claimed_stack_depth()->max_elems();
// We want the overflow stack to be permanent
_overflow_stack_depth = new (ResourceObj::C_HEAP) GrowableArray<StarTask>(10, true);
_overflow_stack_breadth = NULL;
} else {
claimed_stack_breadth()->initialize();
queue_size = claimed_stack_breadth()->max_elems();
// We want the overflow stack to be permanent
_overflow_stack_breadth = new (ResourceObj::C_HEAP) GrowableArray<oop>(10, true);
_overflow_stack_depth = NULL;
}
_totally_drain = (ParallelGCThreads == 1) || (GCDrainStackTargetSize == 0);
@ -209,8 +171,7 @@ PSPromotionManager::PSPromotionManager() {
}
void PSPromotionManager::reset() {
assert(claimed_stack_empty(), "reset of non-empty claimed stack");
assert(overflow_stack_empty(), "reset of non-empty overflow stack");
assert(stacks_empty(), "reset of non-empty stack");
// We need to get an assert in here to make sure the labs are always flushed.
@ -243,7 +204,7 @@ void PSPromotionManager::reset() {
void PSPromotionManager::drain_stacks_depth(bool totally_drain) {
assert(depth_first(), "invariant");
assert(overflow_stack_depth() != NULL, "invariant");
assert(claimed_stack_depth()->overflow_stack() != NULL, "invariant");
totally_drain = totally_drain || _totally_drain;
#ifdef ASSERT
@ -254,41 +215,35 @@ void PSPromotionManager::drain_stacks_depth(bool totally_drain) {
MutableSpace* perm_space = heap->perm_gen()->object_space();
#endif /* ASSERT */
OopStarTaskQueue* const tq = claimed_stack_depth();
do {
StarTask p;
// Drain overflow stack first, so other threads can steal from
// claimed stack while we work.
while(!overflow_stack_depth()->is_empty()) {
// linux compiler wants different overloaded operator= in taskqueue to
// assign to p that the other compilers don't like.
StarTask ptr = overflow_stack_depth()->pop();
process_popped_location_depth(ptr);
while (tq->pop_overflow(p)) {
process_popped_location_depth(p);
}
if (totally_drain) {
while (claimed_stack_depth()->pop_local(p)) {
while (tq->pop_local(p)) {
process_popped_location_depth(p);
}
} else {
while (claimed_stack_depth()->size() > _target_stack_size &&
claimed_stack_depth()->pop_local(p)) {
while (tq->size() > _target_stack_size && tq->pop_local(p)) {
process_popped_location_depth(p);
}
}
} while( (totally_drain && claimed_stack_depth()->size() > 0) ||
(overflow_stack_depth()->length() > 0) );
} while (totally_drain && !tq->taskqueue_empty() || !tq->overflow_empty());
assert(!totally_drain || claimed_stack_empty(), "Sanity");
assert(totally_drain ||
claimed_stack_depth()->size() <= _target_stack_size,
"Sanity");
assert(overflow_stack_empty(), "Sanity");
assert(!totally_drain || tq->taskqueue_empty(), "Sanity");
assert(totally_drain || tq->size() <= _target_stack_size, "Sanity");
assert(tq->overflow_empty(), "Sanity");
}
void PSPromotionManager::drain_stacks_breadth(bool totally_drain) {
assert(!depth_first(), "invariant");
assert(overflow_stack_breadth() != NULL, "invariant");
assert(claimed_stack_breadth()->overflow_stack() != NULL, "invariant");
totally_drain = totally_drain || _totally_drain;
#ifdef ASSERT
@ -299,51 +254,39 @@ void PSPromotionManager::drain_stacks_breadth(bool totally_drain) {
MutableSpace* perm_space = heap->perm_gen()->object_space();
#endif /* ASSERT */
OverflowTaskQueue<oop>* const tq = claimed_stack_breadth();
do {
oop obj;
// Drain overflow stack first, so other threads can steal from
// claimed stack while we work.
while(!overflow_stack_breadth()->is_empty()) {
obj = overflow_stack_breadth()->pop();
while (tq->pop_overflow(obj)) {
obj->copy_contents(this);
}
if (totally_drain) {
// obj is a reference!!!
while (claimed_stack_breadth()->pop_local(obj)) {
// It would be nice to assert about the type of objects we might
// pop, but they can come from anywhere, unfortunately.
while (tq->pop_local(obj)) {
obj->copy_contents(this);
}
} else {
// obj is a reference!!!
while (claimed_stack_breadth()->size() > _target_stack_size &&
claimed_stack_breadth()->pop_local(obj)) {
// It would be nice to assert about the type of objects we might
// pop, but they can come from anywhere, unfortunately.
while (tq->size() > _target_stack_size && tq->pop_local(obj)) {
obj->copy_contents(this);
}
}
// If we could not find any other work, flush the prefetch queue
if (claimed_stack_breadth()->size() == 0 &&
(overflow_stack_breadth()->length() == 0)) {
if (tq->is_empty()) {
flush_prefetch_queue();
}
} while((totally_drain && claimed_stack_breadth()->size() > 0) ||
(overflow_stack_breadth()->length() > 0));
} while (totally_drain && !tq->taskqueue_empty() || !tq->overflow_empty());
assert(!totally_drain || claimed_stack_empty(), "Sanity");
assert(totally_drain ||
claimed_stack_breadth()->size() <= _target_stack_size,
"Sanity");
assert(overflow_stack_empty(), "Sanity");
assert(!totally_drain || tq->taskqueue_empty(), "Sanity");
assert(totally_drain || tq->size() <= _target_stack_size, "Sanity");
assert(tq->overflow_empty(), "Sanity");
}
void PSPromotionManager::flush_labs() {
assert(claimed_stack_empty(), "Attempt to flush lab with live stack");
assert(overflow_stack_empty(), "Attempt to flush lab with live overflow stack");
assert(stacks_empty(), "Attempt to flush lab with live stack");
// If either promotion lab fills up, we can flush the
// lab but not refill it, so check first.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2008, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -78,9 +78,7 @@ class PSPromotionManager : public CHeapObj {
PrefetchQueue _prefetch_queue;
OopStarTaskQueue _claimed_stack_depth;
GrowableArray<StarTask>* _overflow_stack_depth;
OopTaskQueue _claimed_stack_breadth;
GrowableArray<oop>* _overflow_stack_breadth;
OverflowTaskQueue<oop> _claimed_stack_breadth;
bool _depth_first;
bool _totally_drain;
@ -97,9 +95,6 @@ class PSPromotionManager : public CHeapObj {
template <class T> inline void claim_or_forward_internal_depth(T* p);
template <class T> inline void claim_or_forward_internal_breadth(T* p);
GrowableArray<StarTask>* overflow_stack_depth() { return _overflow_stack_depth; }
GrowableArray<oop>* overflow_stack_breadth() { return _overflow_stack_breadth; }
// On the task queues we push reference locations as well as
// partially-scanned arrays (in the latter case, we push an oop to
// the from-space image of the array and the length on the
@ -151,18 +146,19 @@ class PSPromotionManager : public CHeapObj {
#if PS_PM_STATS
++_total_pushes;
int stack_length = claimed_stack_depth()->overflow_stack()->length();
#endif // PS_PM_STATS
if (!claimed_stack_depth()->push(p)) {
overflow_stack_depth()->push(p);
claimed_stack_depth()->push(p);
#if PS_PM_STATS
if (claimed_stack_depth()->overflow_stack()->length() != stack_length) {
++_overflow_pushes;
uint stack_length = (uint) overflow_stack_depth()->length();
if (stack_length > _max_overflow_length) {
_max_overflow_length = stack_length;
if ((uint)stack_length + 1 > _max_overflow_length) {
_max_overflow_length = (uint)stack_length + 1;
}
#endif // PS_PM_STATS
}
#endif // PS_PM_STATS
}
void push_breadth(oop o) {
@ -170,18 +166,19 @@ class PSPromotionManager : public CHeapObj {
#if PS_PM_STATS
++_total_pushes;
int stack_length = claimed_stack_breadth()->overflow_stack()->length();
#endif // PS_PM_STATS
if(!claimed_stack_breadth()->push(o)) {
overflow_stack_breadth()->push(o);
claimed_stack_breadth()->push(o);
#if PS_PM_STATS
if (claimed_stack_breadth()->overflow_stack()->length() != stack_length) {
++_overflow_pushes;
uint stack_length = (uint) overflow_stack_breadth()->length();
if (stack_length > _max_overflow_length) {
_max_overflow_length = stack_length;
if ((uint)stack_length + 1 > _max_overflow_length) {
_max_overflow_length = (uint)stack_length + 1;
}
#endif // PS_PM_STATS
}
#endif // PS_PM_STATS
}
protected:
@ -199,12 +196,10 @@ class PSPromotionManager : public CHeapObj {
static PSPromotionManager* vm_thread_promotion_manager();
static bool steal_depth(int queue_num, int* seed, StarTask& t) {
assert(stack_array_depth() != NULL, "invariant");
return stack_array_depth()->steal(queue_num, seed, t);
}
static bool steal_breadth(int queue_num, int* seed, Task& t) {
assert(stack_array_breadth() != NULL, "invariant");
static bool steal_breadth(int queue_num, int* seed, oop& t) {
return stack_array_breadth()->steal(queue_num, seed, t);
}
@ -214,7 +209,7 @@ class PSPromotionManager : public CHeapObj {
OopStarTaskQueue* claimed_stack_depth() {
return &_claimed_stack_depth;
}
OopTaskQueue* claimed_stack_breadth() {
OverflowTaskQueue<oop>* claimed_stack_breadth() {
return &_claimed_stack_breadth;
}
@ -246,25 +241,13 @@ class PSPromotionManager : public CHeapObj {
void drain_stacks_depth(bool totally_drain);
void drain_stacks_breadth(bool totally_drain);
bool claimed_stack_empty() {
if (depth_first()) {
return claimed_stack_depth()->size() <= 0;
} else {
return claimed_stack_breadth()->size() <= 0;
}
}
bool overflow_stack_empty() {
if (depth_first()) {
return overflow_stack_depth()->length() <= 0;
} else {
return overflow_stack_breadth()->length() <= 0;
}
bool depth_first() const {
return _depth_first;
}
bool stacks_empty() {
return claimed_stack_empty() && overflow_stack_empty();
}
bool depth_first() {
return _depth_first;
return depth_first() ?
claimed_stack_depth()->is_empty() :
claimed_stack_breadth()->is_empty();
}
inline void process_popped_location_depth(StarTask p);

View File

@ -414,7 +414,6 @@ bool PSScavenge::invoke_no_policy() {
}
// Finally, flush the promotion_manager's labs, and deallocate its stacks.
assert(promotion_manager->claimed_stack_empty(), "Sanity");
PSPromotionManager::post_scavenge();
promotion_failure_occurred = promotion_failed();

View File

@ -89,6 +89,21 @@ adlcVMDeps.hpp allocation.hpp
allocation.hpp c2_globals.hpp
bcEscapeAnalyzer.cpp bcEscapeAnalyzer.hpp
bcEscapeAnalyzer.cpp bitMap.inline.hpp
bcEscapeAnalyzer.cpp bytecode.hpp
bcEscapeAnalyzer.cpp ciConstant.hpp
bcEscapeAnalyzer.cpp ciField.hpp
bcEscapeAnalyzer.cpp ciMethodBlocks.hpp
bcEscapeAnalyzer.cpp ciStreams.hpp
bcEscapeAnalyzer.hpp allocation.hpp
bcEscapeAnalyzer.hpp ciMethod.hpp
bcEscapeAnalyzer.hpp ciMethodData.hpp
bcEscapeAnalyzer.hpp dependencies.hpp
bcEscapeAnalyzer.hpp growableArray.hpp
bcEscapeAnalyzer.hpp vectset.hpp
block.cpp allocation.inline.hpp
block.cpp block.hpp
block.cpp cfgnode.hpp
@ -239,6 +254,7 @@ chaitin_<os_family>.cpp machnode.hpp
ciEnv.cpp compileLog.hpp
ciEnv.cpp runtime.hpp
ciMethod.cpp bcEscapeAnalyzer.hpp
ciMethod.cpp ciTypeFlow.hpp
ciMethod.cpp methodOop.hpp

View File

@ -301,20 +301,6 @@ barrierSet.hpp oopsHierarchy.hpp
barrierSet.inline.hpp barrierSet.hpp
barrierSet.inline.hpp cardTableModRefBS.hpp
bcEscapeAnalyzer.cpp bcEscapeAnalyzer.hpp
bcEscapeAnalyzer.cpp bitMap.inline.hpp
bcEscapeAnalyzer.cpp bytecode.hpp
bcEscapeAnalyzer.cpp ciConstant.hpp
bcEscapeAnalyzer.cpp ciField.hpp
bcEscapeAnalyzer.cpp ciMethodBlocks.hpp
bcEscapeAnalyzer.cpp ciStreams.hpp
bcEscapeAnalyzer.hpp allocation.hpp
bcEscapeAnalyzer.hpp ciMethod.hpp
bcEscapeAnalyzer.hpp ciMethodData.hpp
bcEscapeAnalyzer.hpp dependencies.hpp
bcEscapeAnalyzer.hpp growableArray.hpp
biasedLocking.cpp biasedLocking.hpp
biasedLocking.cpp klass.inline.hpp
biasedLocking.cpp markOop.hpp
@ -665,7 +651,6 @@ ciKlassKlass.hpp ciSymbol.hpp
ciMethod.cpp abstractCompiler.hpp
ciMethod.cpp allocation.inline.hpp
ciMethod.cpp bcEscapeAnalyzer.hpp
ciMethod.cpp bitMap.inline.hpp
ciMethod.cpp ciCallProfile.hpp
ciMethod.cpp ciExceptionHandler.hpp
@ -964,7 +949,6 @@ classLoader.cpp threadService.hpp
classLoader.cpp timer.hpp
classLoader.cpp universe.inline.hpp
classLoader.cpp vmSymbols.hpp
classLoader.cpp vtune.hpp
classLoader.hpp classFileParser.hpp
classLoader.hpp perfData.hpp
@ -1004,7 +988,6 @@ codeBlob.cpp relocInfo.hpp
codeBlob.cpp safepoint.hpp
codeBlob.cpp sharedRuntime.hpp
codeBlob.cpp vframe.hpp
codeBlob.cpp vtune.hpp
codeBlob.hpp codeBuffer.hpp
codeBlob.hpp frame.hpp
@ -2167,7 +2150,6 @@ interpreter.cpp sharedRuntime.hpp
interpreter.cpp stubRoutines.hpp
interpreter.cpp templateTable.hpp
interpreter.cpp timer.hpp
interpreter.cpp vtune.hpp
interpreter.hpp cppInterpreter.hpp
interpreter.hpp stubs.hpp
@ -2323,7 +2305,6 @@ java.cpp universe.hpp
java.cpp vmError.hpp
java.cpp vm_operations.hpp
java.cpp vm_version_<arch>.hpp
java.cpp vtune.hpp
java.hpp os.hpp
@ -3050,7 +3031,6 @@ nmethod.cpp nmethod.hpp
nmethod.cpp scopeDesc.hpp
nmethod.cpp sharedRuntime.hpp
nmethod.cpp sweeper.hpp
nmethod.cpp vtune.hpp
nmethod.cpp xmlstream.hpp
nmethod.hpp codeBlob.hpp
@ -3773,7 +3753,6 @@ sharedRuntime.cpp vframeArray.hpp
sharedRuntime.cpp vmSymbols.hpp
sharedRuntime.cpp vmreg_<arch>.inline.hpp
sharedRuntime.cpp vtableStubs.hpp
sharedRuntime.cpp vtune.hpp
sharedRuntime.cpp xmlstream.hpp
sharedRuntime.hpp allocation.hpp
@ -3937,7 +3916,6 @@ stubCodeGenerator.cpp disassembler.hpp
stubCodeGenerator.cpp forte.hpp
stubCodeGenerator.cpp oop.inline.hpp
stubCodeGenerator.cpp stubCodeGenerator.hpp
stubCodeGenerator.cpp vtune.hpp
stubCodeGenerator.hpp allocation.hpp
stubCodeGenerator.hpp assembler.hpp
@ -4458,7 +4436,6 @@ universe.cpp universe.hpp
universe.cpp universe.inline.hpp
universe.cpp vmSymbols.hpp
universe.cpp vm_operations.hpp
universe.cpp vtune.hpp
universe.hpp growableArray.hpp
universe.hpp handles.hpp
@ -4721,7 +4698,6 @@ vtableStubs.cpp mutexLocker.hpp
vtableStubs.cpp resourceArea.hpp
vtableStubs.cpp sharedRuntime.hpp
vtableStubs.cpp vtableStubs.hpp
vtableStubs.cpp vtune.hpp
vtableStubs.hpp allocation.hpp
@ -4735,11 +4711,6 @@ vtableStubs_<arch_model>.cpp sharedRuntime.hpp
vtableStubs_<arch_model>.cpp vmreg_<arch>.inline.hpp
vtableStubs_<arch_model>.cpp vtableStubs.hpp
vtune.hpp allocation.hpp
vtune_<os_family>.cpp interpreter.hpp
vtune_<os_family>.cpp vtune.hpp
watermark.hpp allocation.hpp
watermark.hpp globalDefinitions.hpp

View File

@ -99,11 +99,6 @@ void interpreter_init() {
#endif // PRODUCT
// need to hit every safepoint in order to call zapping routine
// register the interpreter
VTune::register_stub(
"Interpreter",
AbstractInterpreter::code()->code_start(),
AbstractInterpreter::code()->code_end()
);
Forte::register_stub(
"Interpreter",
AbstractInterpreter::code()->code_start(),

View File

@ -343,7 +343,8 @@ bool Parse::can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass* kl
// being initialized. Uncommon-trap for not-initialized static or
// v-calls. Let interface calls happen.
ciInstanceKlass* holder_klass = dest_method->holder();
if (!holder_klass->is_initialized() &&
if (!holder_klass->is_being_initialized() &&
!holder_klass->is_initialized() &&
!holder_klass->is_interface()) {
uncommon_trap(Deoptimization::Reason_uninitialized,
Deoptimization::Action_reinterpret,

View File

@ -480,6 +480,7 @@ class Parse : public GraphKit {
bool push_constant(ciConstant con, bool require_constant = false);
// implementation of object creation bytecodes
void emit_guard_for_new(ciInstanceKlass* klass);
void do_new();
void do_newarray(BasicType elemtype);
void do_anewarray();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2009, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -197,6 +197,43 @@ void Parse::array_store_check() {
}
void Parse::emit_guard_for_new(ciInstanceKlass* klass) {
// Emit guarded new
// if (klass->_init_thread != current_thread ||
// klass->_init_state != being_initialized)
// uncommon_trap
Node* cur_thread = _gvn.transform( new (C, 1) ThreadLocalNode() );
Node* merge = new (C, 3) RegionNode(3);
_gvn.set_type(merge, Type::CONTROL);
Node* kls = makecon(TypeKlassPtr::make(klass));
Node* init_thread_offset = _gvn.MakeConX(instanceKlass::init_thread_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes());
Node* adr_node = basic_plus_adr(kls, kls, init_thread_offset);
Node* init_thread = make_load(NULL, adr_node, TypeRawPtr::BOTTOM, T_ADDRESS);
Node *tst = Bool( CmpP( init_thread, cur_thread), BoolTest::eq);
IfNode* iff = create_and_map_if(control(), tst, PROB_ALWAYS, COUNT_UNKNOWN);
set_control(IfTrue(iff));
merge->set_req(1, IfFalse(iff));
Node* init_state_offset = _gvn.MakeConX(instanceKlass::init_state_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes());
adr_node = basic_plus_adr(kls, kls, init_state_offset);
Node* init_state = make_load(NULL, adr_node, TypeInt::INT, T_INT);
Node* being_init = _gvn.intcon(instanceKlass::being_initialized);
tst = Bool( CmpI( init_state, being_init), BoolTest::eq);
iff = create_and_map_if(control(), tst, PROB_ALWAYS, COUNT_UNKNOWN);
set_control(IfTrue(iff));
merge->set_req(2, IfFalse(iff));
PreserveJVMState pjvms(this);
record_for_igvn(merge);
set_control(merge);
uncommon_trap(Deoptimization::Reason_uninitialized,
Deoptimization::Action_reinterpret,
klass);
}
//------------------------------do_new-----------------------------------------
void Parse::do_new() {
kill_dead_locals();
@ -206,7 +243,7 @@ void Parse::do_new() {
assert(will_link, "_new: typeflow responsibility");
// Should initialize, or throw an InstantiationError?
if (!klass->is_initialized() ||
if (!klass->is_initialized() && !klass->is_being_initialized() ||
klass->is_abstract() || klass->is_interface() ||
klass->name() == ciSymbol::java_lang_Class() ||
iter().is_unresolved_klass()) {
@ -215,6 +252,9 @@ void Parse::do_new() {
klass);
return;
}
if (klass->is_being_initialized()) {
emit_guard_for_new(klass);
}
Node* kls = makecon(TypeKlassPtr::make(klass));
Node* obj = new_instance(kls);

View File

@ -118,7 +118,6 @@ void CodeBlobCollector::do_blob(CodeBlob* cb) {
for (int i=0; i<_global_code_blobs->length(); i++) {
JvmtiCodeBlobDesc* scb = _global_code_blobs->at(i);
if (addr == scb->code_begin()) {
ShouldNotReachHere();
return;
}
}
@ -206,11 +205,11 @@ jvmtiError JvmtiCodeBlobEvents::generate_compiled_method_load_events(JvmtiEnv* e
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
nmethod* current = CodeCache::first_nmethod();
while (current != NULL) {
// Lock the nmethod so it can't be freed
nmethodLocker nml(current);
// Only notify for live nmethods
if (current->is_alive()) {
// Lock the nmethod so it can't be freed
nmethodLocker nml(current);
// Don't hold the lock over the notify or jmethodID creation
MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
current->get_and_cache_jmethod_id();

View File

@ -2541,9 +2541,6 @@ class CommandLineFlags {
"Enable String cache capabilities on String.java") \
\
/* statistics */ \
develop(bool, UseVTune, false, \
"enable support for Intel's VTune profiler") \
\
develop(bool, CountCompiledCalls, false, \
"counts method invocations") \
\

View File

@ -34,7 +34,6 @@ void perfMemory_init();
// Initialization done by Java thread in init_globals()
void management_init();
void vtune_init();
void bytecodes_init();
void classLoader_init();
void codeCache_init();
@ -82,7 +81,6 @@ void vm_init_globals() {
jint init_globals() {
HandleMark hm;
management_init();
vtune_init();
bytecodes_init();
classLoader_init();
codeCache_init();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2008, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -432,8 +432,6 @@ void before_exit(JavaThread * thread) {
print_statistics();
Universe::heap()->print_tracing_info();
VTune::exit();
{ MutexLocker ml(BeforeExit_lock);
_before_exit_status = BEFORE_EXIT_DONE;
BeforeExit_lock->notify_all();

View File

@ -886,6 +886,11 @@ bool os::set_boot_path(char fileSep, char pathSep) {
"%/lib/jsse.jar:"
"%/lib/jce.jar:"
"%/lib/charsets.jar:"
// ## TEMPORARY hack to keep the legacy launcher working when
// ## only the boot module is installed (cf. j.l.ClassLoader)
"%/lib/modules/jdk.boot.jar:"
"%/classes";
char* sysclasspath = format_boot_path(classpath_format, home, home_len, fileSep, pathSep);
if (sysclasspath == NULL) return false;

View File

@ -2251,7 +2251,6 @@ AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(methodHandle method) {
B->name(),
fingerprint->as_string(),
B->instructions_begin());
VTune::register_stub(blob_id, B->instructions_begin(), B->instructions_end());
Forte::register_stub(blob_id, B->instructions_begin(), B->instructions_end());
if (JvmtiExport::should_post_dynamic_code_generated()) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2008, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -132,7 +132,6 @@ StubCodeMark::~StubCodeMark() {
_cdesc->set_end(_cgen->assembler()->pc());
assert(StubCodeDesc::_list == _cdesc, "expected order on list");
_cgen->stub_epilog(_cdesc);
VTune::register_stub(_cdesc->name(), _cdesc->begin(), _cdesc->end());
Forte::register_stub(_cdesc->name(), _cdesc->begin(), _cdesc->end());
if (JvmtiExport::should_post_dynamic_code_generated()) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,14 +27,15 @@
long NMethodSweeper::_traversals = 0; // No. of stack traversals performed
nmethod* NMethodSweeper::_current = NULL; // Current nmethod
int NMethodSweeper::_seen = 0 ; // No. of blobs we have currently processed in current pass of CodeCache
int NMethodSweeper::_invocations = 0; // No. of invocations left until we are completed with this pass
int NMethodSweeper::_seen = 0 ; // No. of nmethods we have currently processed in current pass of CodeCache
volatile int NMethodSweeper::_invocations = 0; // No. of invocations left until we are completed with this pass
volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress.
jint NMethodSweeper::_locked_seen = 0;
jint NMethodSweeper::_not_entrant_seen_on_stack = 0;
bool NMethodSweeper::_rescan = false;
bool NMethodSweeper::_do_sweep = false;
jint NMethodSweeper::_sweep_started = 0;
bool NMethodSweeper::_was_full = false;
jint NMethodSweeper::_advise_to_sweep = 0;
jlong NMethodSweeper::_last_was_full = 0;
@ -108,23 +109,14 @@ void NMethodSweeper::scan_stacks() {
// code cache is filling up
_last_was_full = os::javaTimeMillis();
if (PrintMethodFlushing) {
tty->print_cr("### sweeper: Live blobs:" UINT32_FORMAT "/Free code cache:" SIZE_FORMAT " bytes, restarting compiler",
CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
}
if (LogCompilation && (xtty != NULL)) {
ttyLocker ttyl;
xtty->begin_elem("restart_compiler live_blobs='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
xtty->stamp();
xtty->end_elem();
}
log_sweep("restart_compiler");
}
}
}
}
void NMethodSweeper::possibly_sweep() {
assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
if ((!MethodFlushing) || (!_do_sweep)) return;
if (_invocations > 0) {
@ -133,32 +125,31 @@ void NMethodSweeper::possibly_sweep() {
if (old != 0) {
return;
}
sweep_code_cache();
if (_invocations > 0) {
sweep_code_cache();
_invocations--;
}
_sweep_started = 0;
}
_sweep_started = 0;
}
void NMethodSweeper::sweep_code_cache() {
#ifdef ASSERT
jlong sweep_start;
if(PrintMethodFlushing) {
if (PrintMethodFlushing) {
sweep_start = os::javaTimeMillis();
}
#endif
if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_blobs(), _invocations);
tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations);
}
// We want to visit all nmethods after NmethodSweepFraction invocations.
// If invocation is 1 we do the rest
int todo = CodeCache::nof_blobs();
if (_invocations > 1) {
todo = (CodeCache::nof_blobs() - _seen) / _invocations;
}
// Compilers may check to sweep more often than stack scans happen,
// don't keep trying once it is all scanned
_invocations--;
// We want to visit all nmethods after NmethodSweepFraction
// invocations so divide the remaining number of nmethods by the
// remaining number of invocations. This is only an estimate since
// the number of nmethods changes during the sweep so the final
// stage must iterate until it there are no more nmethods.
int todo = (CodeCache::nof_nmethods() - _seen) / _invocations;
assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
assert(!CodeCache_lock->owned_by_self(), "just checking");
@ -166,11 +157,12 @@ void NMethodSweeper::sweep_code_cache() {
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
for(int i = 0; i < todo && _current != NULL; i++) {
// The last invocation iterates until there are no more nmethods
for (int i = 0; (i < todo || _invocations == 1) && _current != NULL; i++) {
// Since we will give up the CodeCache_lock, always skip ahead to an nmethod.
// Other blobs can be deleted by other threads
// Read next before we potentially delete current
// Since we will give up the CodeCache_lock, always skip ahead
// to the next nmethod. Other blobs can be deleted by other
// threads but nmethods are only reclaimed by the sweeper.
nmethod* next = CodeCache::next_nmethod(_current);
// Now ready to process nmethod and give up CodeCache_lock
@ -183,6 +175,8 @@ void NMethodSweeper::sweep_code_cache() {
}
}
assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache");
if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) {
// we've completed a scan without making progress but there were
// nmethods we were unable to process either because they were
@ -201,6 +195,10 @@ void NMethodSweeper::sweep_code_cache() {
tty->print_cr("### sweeper: sweep time(%d): " INT64_FORMAT, _invocations, sweep_end - sweep_start);
}
#endif
if (_invocations == 1) {
log_sweep("finished");
}
}
@ -223,7 +221,7 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
if (nm->is_zombie()) {
// If it is first time, we see nmethod then we mark it. Otherwise,
// we reclame it. When we have seen a zombie method twice, we know that
// there are no inline caches that referes to it.
// there are no inline caches that refer to it.
if (nm->is_marked_for_reclamation()) {
assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
if (PrintMethodFlushing && Verbose) {
@ -320,16 +318,8 @@ void NMethodSweeper::handle_full_code_cache(bool is_full) {
jlong curr_interval = now - _last_was_full;
if (curr_interval < max_interval) {
_rescan = true;
if (PrintMethodFlushing) {
tty->print_cr("### handle full too often, turning off compiler");
}
if (LogCompilation && (xtty != NULL)) {
ttyLocker ttyl;
xtty->begin_elem("disable_compiler flushing_interval='" UINT64_FORMAT "' live_blobs='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
curr_interval/1000, CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
xtty->stamp();
xtty->end_elem();
}
log_sweep("disable_compiler", "flushing_interval='" UINT64_FORMAT "'",
curr_interval/1000);
return;
}
}
@ -349,17 +339,7 @@ void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
if ((!was_full()) && (is_full)) {
if (!CodeCache::needs_flushing()) {
if (PrintMethodFlushing) {
tty->print_cr("### sweeper: Live blobs:" UINT32_FORMAT "/Free code cache:" SIZE_FORMAT " bytes, restarting compiler",
CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
}
if (LogCompilation && (xtty != NULL)) {
ttyLocker ttyl;
xtty->begin_elem("restart_compiler live_blobs='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
xtty->stamp();
xtty->end_elem();
}
log_sweep("restart_compiler");
CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
return;
}
@ -368,17 +348,7 @@ void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
// Traverse the code cache trying to dump the oldest nmethods
uint curr_max_comp_id = CompileBroker::get_compilation_id();
uint flush_target = ((curr_max_comp_id - _highest_marked) >> 1) + _highest_marked;
if (PrintMethodFlushing && Verbose) {
tty->print_cr("### Cleaning code cache: Live blobs:" UINT32_FORMAT "/Free code cache:" SIZE_FORMAT " bytes",
CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
}
if (LogCompilation && (xtty != NULL)) {
ttyLocker ttyl;
xtty->begin_elem("start_cleaning_code_cache live_blobs='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
xtty->stamp();
xtty->end_elem();
}
log_sweep("start_cleaning");
nmethod* nm = CodeCache::alive_nmethod(CodeCache::first());
jint disconnected = 0;
@ -411,13 +381,9 @@ void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
nm = CodeCache::alive_nmethod(CodeCache::next(nm));
}
if (LogCompilation && (xtty != NULL)) {
ttyLocker ttyl;
xtty->begin_elem("stop_cleaning_code_cache disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "' live_blobs='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
disconnected, made_not_entrant, CodeCache::nof_blobs(), CodeCache::unallocated_capacity());
xtty->stamp();
xtty->end_elem();
}
log_sweep("stop_cleaning",
"disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "'",
disconnected, made_not_entrant);
// Shut off compiler. Sweeper will start over with a new stack scan and
// traversal cycle and turn it back on if it clears enough space.
@ -435,3 +401,38 @@ void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
}
#endif
}
// Print out some state information about the current sweep and the
// state of the code cache if it's requested.
void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) {
if (PrintMethodFlushing) {
ttyLocker ttyl;
tty->print("### sweeper: %s ", msg);
if (format != NULL) {
va_list ap;
va_start(ap, format);
tty->vprint(format, ap);
va_end(ap);
}
tty->print_cr(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
" adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
CodeCache::nof_blobs(), CodeCache::nof_nmethods(), CodeCache::nof_adapters(), CodeCache::unallocated_capacity());
}
if (LogCompilation && (xtty != NULL)) {
ttyLocker ttyl;
xtty->begin_elem("sweeper state='%s' traversals='" INTX_FORMAT "' ", msg, (intx)traversal_count());
if (format != NULL) {
va_list ap;
va_start(ap, format);
xtty->vprint(format, ap);
va_end(ap);
}
xtty->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
" adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
CodeCache::nof_blobs(), CodeCache::nof_nmethods(), CodeCache::nof_adapters(), CodeCache::unallocated_capacity());
xtty->stamp();
xtty->end_elem();
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2005, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,12 +31,13 @@ class NMethodSweeper : public AllStatic {
static long _traversals; // Stack traversal count
static nmethod* _current; // Current nmethod
static int _seen; // Nof. nmethod we have currently processed in current pass of CodeCache
static int _invocations; // No. of invocations left until we are completed with this pass
static volatile int _invocations; // No. of invocations left until we are completed with this pass
static volatile int _sweep_started; // Flag to control conc sweeper
static bool _rescan; // Indicates that we should do a full rescan of the
// of the code cache looking for work to do.
static bool _do_sweep; // Flag to skip the conc sweep if no stack scan happened
static jint _sweep_started; // Flag to control conc sweeper
static int _locked_seen; // Number of locked nmethods encountered during the scan
static int _not_entrant_seen_on_stack; // Number of not entrant nmethod were are still on stack
@ -47,6 +48,9 @@ class NMethodSweeper : public AllStatic {
static long _was_full_traversal; // trav number at last emergency unloading
static void process_nmethod(nmethod *nm);
static void log_sweep(const char* msg, const char* format = NULL, ...);
public:
static long traversal_count() { return _traversals; }

View File

@ -747,6 +747,8 @@ void Thread::muxRelease (volatile intptr_t * Lock) {
ObjectMonitor * ObjectSynchronizer::gBlockList = NULL ;
ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL ;
ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL ;
int ObjectSynchronizer::gOmInUseCount = 0;
static volatile intptr_t ListLock = 0 ; // protects global monitor free-list cache
static volatile int MonitorFreeCount = 0 ; // # on gFreeList
static volatile int MonitorPopulation = 0 ; // # Extant -- in circulation
@ -826,6 +828,22 @@ static void InduceScavenge (Thread * Self, const char * Whence) {
}
}
}
/* Too slow for general assert or debug
void ObjectSynchronizer::verifyInUse (Thread *Self) {
ObjectMonitor* mid;
int inusetally = 0;
for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) {
inusetally ++;
}
assert(inusetally == Self->omInUseCount, "inuse count off");
int freetally = 0;
for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) {
freetally ++;
}
assert(freetally == Self->omFreeCount, "free count off");
}
*/
ObjectMonitor * ATTR ObjectSynchronizer::omAlloc (Thread * Self) {
// A large MAXPRIVATE value reduces both list lock contention
@ -853,6 +871,9 @@ ObjectMonitor * ATTR ObjectSynchronizer::omAlloc (Thread * Self) {
m->FreeNext = Self->omInUseList;
Self->omInUseList = m;
Self->omInUseCount ++;
// verifyInUse(Self);
} else {
m->FreeNext = NULL;
}
return m ;
}
@ -874,13 +895,12 @@ ObjectMonitor * ATTR ObjectSynchronizer::omAlloc (Thread * Self) {
guarantee (take->object() == NULL, "invariant") ;
guarantee (!take->is_busy(), "invariant") ;
take->Recycle() ;
omRelease (Self, take) ;
omRelease (Self, take, false) ;
}
Thread::muxRelease (&ListLock) ;
Self->omFreeProvision += 1 + (Self->omFreeProvision/2) ;
if (Self->omFreeProvision > MAXPRIVATE ) Self->omFreeProvision = MAXPRIVATE ;
TEVENT (omFirst - reprovision) ;
continue ;
const int mx = MonitorBound ;
if (mx > 0 && (MonitorPopulation-MonitorFreeCount) > mx) {
@ -961,11 +981,34 @@ ObjectMonitor * ATTR ObjectSynchronizer::omAlloc (Thread * Self) {
// That is, *not* one-at-a-time.
void ObjectSynchronizer::omRelease (Thread * Self, ObjectMonitor * m) {
void ObjectSynchronizer::omRelease (Thread * Self, ObjectMonitor * m, bool fromPerThreadAlloc) {
guarantee (m->object() == NULL, "invariant") ;
m->FreeNext = Self->omFreeList ;
Self->omFreeList = m ;
Self->omFreeCount ++ ;
// Remove from omInUseList
if (MonitorInUseLists && fromPerThreadAlloc) {
ObjectMonitor* curmidinuse = NULL;
for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; ) {
if (m == mid) {
// extract from per-thread in-use-list
if (mid == Self->omInUseList) {
Self->omInUseList = mid->FreeNext;
} else if (curmidinuse != NULL) {
curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist
}
Self->omInUseCount --;
// verifyInUse(Self);
break;
} else {
curmidinuse = mid;
mid = mid->FreeNext;
}
}
}
// FreeNext is used for both onInUseList and omFreeList, so clear old before setting new
m->FreeNext = Self->omFreeList ;
Self->omFreeList = m ;
Self->omFreeCount ++ ;
}
// Return the monitors of a moribund thread's local free list to
@ -975,6 +1018,10 @@ void ObjectSynchronizer::omRelease (Thread * Self, ObjectMonitor * m) {
// consecutive STW safepoints. Relatedly, we might decay
// omFreeProvision at STW safepoints.
//
// Also return the monitors of a moribund thread"s omInUseList to
// a global gOmInUseList under the global list lock so these
// will continue to be scanned.
//
// We currently call omFlush() from the Thread:: dtor _after the thread
// has been excised from the thread list and is no longer a mutator.
// That means that omFlush() can run concurrently with a safepoint and
@ -987,24 +1034,50 @@ void ObjectSynchronizer::omRelease (Thread * Self, ObjectMonitor * m) {
void ObjectSynchronizer::omFlush (Thread * Self) {
ObjectMonitor * List = Self->omFreeList ; // Null-terminated SLL
Self->omFreeList = NULL ;
if (List == NULL) return ;
ObjectMonitor * Tail = NULL ;
ObjectMonitor * s ;
int Tally = 0;
for (s = List ; s != NULL ; s = s->FreeNext) {
Tally ++ ;
Tail = s ;
guarantee (s->object() == NULL, "invariant") ;
guarantee (!s->is_busy(), "invariant") ;
s->set_owner (NULL) ; // redundant but good hygiene
TEVENT (omFlush - Move one) ;
if (List != NULL) {
ObjectMonitor * s ;
for (s = List ; s != NULL ; s = s->FreeNext) {
Tally ++ ;
Tail = s ;
guarantee (s->object() == NULL, "invariant") ;
guarantee (!s->is_busy(), "invariant") ;
s->set_owner (NULL) ; // redundant but good hygiene
TEVENT (omFlush - Move one) ;
}
guarantee (Tail != NULL && List != NULL, "invariant") ;
}
ObjectMonitor * InUseList = Self->omInUseList;
ObjectMonitor * InUseTail = NULL ;
int InUseTally = 0;
if (InUseList != NULL) {
Self->omInUseList = NULL;
ObjectMonitor *curom;
for (curom = InUseList; curom != NULL; curom = curom->FreeNext) {
InUseTail = curom;
InUseTally++;
}
// TODO debug
assert(Self->omInUseCount == InUseTally, "inuse count off");
Self->omInUseCount = 0;
guarantee (InUseTail != NULL && InUseList != NULL, "invariant");
}
guarantee (Tail != NULL && List != NULL, "invariant") ;
Thread::muxAcquire (&ListLock, "omFlush") ;
Tail->FreeNext = gFreeList ;
gFreeList = List ;
MonitorFreeCount += Tally;
if (Tail != NULL) {
Tail->FreeNext = gFreeList ;
gFreeList = List ;
MonitorFreeCount += Tally;
}
if (InUseTail != NULL) {
InUseTail->FreeNext = gOmInUseList;
gOmInUseList = InUseList;
gOmInUseCount += InUseTally;
}
Thread::muxRelease (&ListLock) ;
TEVENT (omFlush) ;
}
@ -1166,7 +1239,6 @@ ObjectMonitor * ATTR ObjectSynchronizer::inflate (Thread * Self, oop object) {
// We do this before the CAS in order to minimize the length of time
// in which INFLATING appears in the mark.
m->Recycle();
m->FreeNext = NULL ;
m->_Responsible = NULL ;
m->OwnerIsThread = 0 ;
m->_recursions = 0 ;
@ -1174,7 +1246,7 @@ ObjectMonitor * ATTR ObjectSynchronizer::inflate (Thread * Self, oop object) {
markOop cmp = (markOop) Atomic::cmpxchg_ptr (markOopDesc::INFLATING(), object->mark_addr(), mark) ;
if (cmp != mark) {
omRelease (Self, m) ;
omRelease (Self, m, true) ;
continue ; // Interference -- just retry
}
@ -1262,7 +1334,6 @@ ObjectMonitor * ATTR ObjectSynchronizer::inflate (Thread * Self, oop object) {
m->set_object(object);
m->OwnerIsThread = 1 ;
m->_recursions = 0 ;
m->FreeNext = NULL ;
m->_Responsible = NULL ;
m->_SpinDuration = Knob_SpinLimit ; // consider: keep metastats by type/class
@ -1271,7 +1342,7 @@ ObjectMonitor * ATTR ObjectSynchronizer::inflate (Thread * Self, oop object) {
m->set_owner (NULL) ;
m->OwnerIsThread = 0 ;
m->Recycle() ;
omRelease (Self, m) ;
omRelease (Self, m, true) ;
m = NULL ;
continue ;
// interference - the markword changed - just retry.
@ -1852,6 +1923,10 @@ void ObjectSynchronizer::oops_do(OopClosure* f) {
// only scans the per-thread inuse lists. omAlloc() puts all
// assigned monitors on the per-thread list. deflate_idle_monitors()
// returns the non-busy monitors to the global free list.
// When a thread dies, omFlush() adds the list of active monitors for
// that thread to a global gOmInUseList acquiring the
// global list lock. deflate_idle_monitors() acquires the global
// list lock to scan for non-busy monitors to the global free list.
// An alternative could have used a single global inuse list. The
// downside would have been the additional cost of acquiring the global list lock
// for every omAlloc().
@ -1904,6 +1979,7 @@ bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
if (*FreeHeadp == NULL) *FreeHeadp = mid;
if (*FreeTailp != NULL) {
ObjectMonitor * prevtail = *FreeTailp;
assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); // TODO KK
prevtail->FreeNext = mid;
}
*FreeTailp = mid;
@ -1912,6 +1988,39 @@ bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
return deflated;
}
// Caller acquires ListLock
int ObjectSynchronizer::walk_monitor_list(ObjectMonitor** listheadp,
ObjectMonitor** FreeHeadp, ObjectMonitor** FreeTailp) {
ObjectMonitor* mid;
ObjectMonitor* next;
ObjectMonitor* curmidinuse = NULL;
int deflatedcount = 0;
for (mid = *listheadp; mid != NULL; ) {
oop obj = (oop) mid->object();
bool deflated = false;
if (obj != NULL) {
deflated = deflate_monitor(mid, obj, FreeHeadp, FreeTailp);
}
if (deflated) {
// extract from per-thread in-use-list
if (mid == *listheadp) {
*listheadp = mid->FreeNext;
} else if (curmidinuse != NULL) {
curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist
}
next = mid->FreeNext;
mid->FreeNext = NULL; // This mid is current tail in the FreeHead list
mid = next;
deflatedcount++;
} else {
curmidinuse = mid;
mid = mid->FreeNext;
}
}
return deflatedcount;
}
void ObjectSynchronizer::deflate_idle_monitors() {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
int nInuse = 0 ; // currently associated with objects
@ -1929,36 +2038,25 @@ void ObjectSynchronizer::deflate_idle_monitors() {
Thread::muxAcquire (&ListLock, "scavenge - return") ;
if (MonitorInUseLists) {
ObjectMonitor* mid;
ObjectMonitor* next;
ObjectMonitor* curmidinuse;
int inUse = 0;
for (JavaThread* cur = Threads::first(); cur != NULL; cur = cur->next()) {
curmidinuse = NULL;
for (mid = cur->omInUseList; mid != NULL; ) {
oop obj = (oop) mid->object();
deflated = false;
if (obj != NULL) {
deflated = deflate_monitor(mid, obj, &FreeHead, &FreeTail);
}
if (deflated) {
// extract from per-thread in-use-list
if (mid == cur->omInUseList) {
cur->omInUseList = mid->FreeNext;
} else if (curmidinuse != NULL) {
curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist
}
next = mid->FreeNext;
mid->FreeNext = NULL; // This mid is current tail in the FreeHead list
mid = next;
cur->omInUseCount--;
nScavenged ++ ;
} else {
curmidinuse = mid;
mid = mid->FreeNext;
nInuse ++;
}
nInCirculation+= cur->omInUseCount;
int deflatedcount = walk_monitor_list(cur->omInUseList_addr(), &FreeHead, &FreeTail);
cur->omInUseCount-= deflatedcount;
// verifyInUse(cur);
nScavenged += deflatedcount;
nInuse += cur->omInUseCount;
}
}
// For moribund threads, scan gOmInUseList
if (gOmInUseList) {
nInCirculation += gOmInUseCount;
int deflatedcount = walk_monitor_list((ObjectMonitor **)&gOmInUseList, &FreeHead, &FreeTail);
gOmInUseCount-= deflatedcount;
nScavenged += deflatedcount;
nInuse += gOmInUseCount;
}
} else for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) {
// Iterate over all extant monitors - Scavenge all idle monitors.
assert(block->object() == CHAINMARKER, "must be a block header");

View File

@ -122,8 +122,9 @@ class ObjectSynchronizer : AllStatic {
static void reenter (Handle obj, intptr_t recursion, TRAPS);
// thread-specific and global objectMonitor free list accessors
// static void verifyInUse (Thread * Self) ; too slow for general assert/debug
static ObjectMonitor * omAlloc (Thread * Self) ;
static void omRelease (Thread * Self, ObjectMonitor * m) ;
static void omRelease (Thread * Self, ObjectMonitor * m, bool FromPerThreadAlloc) ;
static void omFlush (Thread * Self) ;
// Inflate light weight monitor to heavy weight monitor
@ -150,6 +151,9 @@ class ObjectSynchronizer : AllStatic {
// Basically we deflate all monitors that are not busy.
// An adaptive profile-based deflation policy could be used if needed
static void deflate_idle_monitors();
static int walk_monitor_list(ObjectMonitor** listheadp,
ObjectMonitor** FreeHeadp,
ObjectMonitor** FreeTailp);
static bool deflate_monitor(ObjectMonitor* mid, oop obj, ObjectMonitor** FreeHeadp,
ObjectMonitor** FreeTailp);
static void oops_do(OopClosure* f);
@ -163,6 +167,8 @@ class ObjectSynchronizer : AllStatic {
enum { _BLOCKSIZE = 128 };
static ObjectMonitor* gBlockList;
static ObjectMonitor * volatile gFreeList;
static ObjectMonitor * volatile gOmInUseList; // for moribund thread, so monitors they inflated still get scanned
static int gOmInUseCount;
public:
static void Initialize () ;

View File

@ -270,6 +270,7 @@ class Thread: public ThreadShadow {
static void interrupt(Thread* thr);
static bool is_interrupted(Thread* thr, bool clear_interrupted);
ObjectMonitor** omInUseList_addr() { return (ObjectMonitor **)&omInUseList; }
Monitor* SR_lock() const { return _SR_lock; }
bool has_async_exception() const { return (_suspend_flags & _has_async_exception) != 0; }

View File

@ -614,7 +614,6 @@ static inline uint64_t cast_uint64_t(size_t x)
/* NMethods (NOTE: incomplete, but only a little) */ \
/**************************************************/ \
\
static_field(nmethod, _zombie_instruction_size, int) \
nonstatic_field(nmethod, _method, methodOop) \
nonstatic_field(nmethod, _entry_bci, int) \
nonstatic_field(nmethod, _osr_link, nmethod*) \

View File

@ -1,55 +0,0 @@
/*
* Copyright (c) 1998, 2007, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
// Interface to Intel's VTune profiler.
class VTune : AllStatic {
public:
static void create_nmethod(nmethod* nm); // register newly created nmethod
static void delete_nmethod(nmethod* nm); // unregister nmethod before discarding it
static void register_stub(const char* name, address start, address end);
// register internal VM stub
static void start_GC(); // start/end of GC or scavenge
static void end_GC();
static void start_class_load(); // start/end of class loading
static void end_class_load();
static void exit(); // VM exit
};
// helper objects
class VTuneGCMarker : StackObj {
public:
VTuneGCMarker() { VTune::start_GC(); }
~VTuneGCMarker() { VTune::end_GC(); }
};
class VTuneClassLoadMarker : StackObj {
public:
VTuneClassLoadMarker() { VTune::start_class_load(); }
~VTuneClassLoadMarker() { VTune::end_class_load(); }
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2009, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -182,73 +182,3 @@ bool ObjArrayTask::is_valid() const {
_index < objArrayOop(_obj)->length();
}
#endif // ASSERT
bool RegionTaskQueueWithOverflow::is_empty() {
return (_region_queue.size() == 0) &&
(_overflow_stack->length() == 0);
}
bool RegionTaskQueueWithOverflow::stealable_is_empty() {
return _region_queue.size() == 0;
}
bool RegionTaskQueueWithOverflow::overflow_is_empty() {
return _overflow_stack->length() == 0;
}
void RegionTaskQueueWithOverflow::initialize() {
_region_queue.initialize();
assert(_overflow_stack == 0, "Creating memory leak");
_overflow_stack =
new (ResourceObj::C_HEAP) GrowableArray<RegionTask>(10, true);
}
void RegionTaskQueueWithOverflow::save(RegionTask t) {
if (TraceRegionTasksQueuing && Verbose) {
gclog_or_tty->print_cr("CTQ: save " PTR_FORMAT, t);
}
if(!_region_queue.push(t)) {
_overflow_stack->push(t);
}
}
// Note that using this method will retrieve all regions
// that have been saved but that it will always check
// the overflow stack. It may be more efficient to
// check the stealable queue and the overflow stack
// separately.
bool RegionTaskQueueWithOverflow::retrieve(RegionTask& region_task) {
bool result = retrieve_from_overflow(region_task);
if (!result) {
result = retrieve_from_stealable_queue(region_task);
}
if (TraceRegionTasksQueuing && Verbose && result) {
gclog_or_tty->print_cr(" CTQ: retrieve " PTR_FORMAT, result);
}
return result;
}
bool RegionTaskQueueWithOverflow::retrieve_from_stealable_queue(
RegionTask& region_task) {
bool result = _region_queue.pop_local(region_task);
if (TraceRegionTasksQueuing && Verbose) {
gclog_or_tty->print_cr("CTQ: retrieve_stealable " PTR_FORMAT, region_task);
}
return result;
}
bool
RegionTaskQueueWithOverflow::retrieve_from_overflow(RegionTask& region_task) {
bool result;
if (!_overflow_stack->is_empty()) {
region_task = _overflow_stack->pop();
result = true;
} else {
region_task = (RegionTask) NULL;
result = false;
}
if (TraceRegionTasksQueuing && Verbose) {
gclog_or_tty->print_cr("CTQ: retrieve_stealable " PTR_FORMAT, region_task);
}
return result;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2009, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -109,8 +109,9 @@ protected:
public:
TaskQueueSuper() : _bottom(0), _age() {}
// Return true if the TaskQueue contains any tasks.
bool peek() { return _bottom != _age.top(); }
// Return true if the TaskQueue contains/does not contain any tasks.
bool peek() const { return _bottom != _age.top(); }
bool is_empty() const { return size() == 0; }
// Return an estimate of the number of elements in the queue.
// The "careful" version admits the possibility of pop_local/pop_global
@ -165,18 +166,16 @@ public:
void initialize();
// Push the task "t" on the queue. Returns "false" iff the queue is
// full.
// Push the task "t" on the queue. Returns "false" iff the queue is full.
inline bool push(E t);
// If succeeds in claiming a task (from the 'local' end, that is, the
// most recently pushed task), returns "true" and sets "t" to that task.
// Otherwise, the queue is empty and returns false.
// Attempts to claim a task from the "local" end of the queue (the most
// recently pushed). If successful, returns true and sets t to the task;
// otherwise, returns false (the queue is empty).
inline bool pop_local(E& t);
// If succeeds in claiming a task (from the 'global' end, that is, the
// least recently pushed task), returns "true" and sets "t" to that task.
// Otherwise, the queue is empty and returns false.
// Like pop_local(), but uses the "global" end of the queue (the least
// recently pushed).
bool pop_global(E& t);
// Delete any resource associated with the queue.
@ -198,7 +197,6 @@ GenericTaskQueue<E, N>::GenericTaskQueue() {
template<class E, unsigned int N>
void GenericTaskQueue<E, N>::initialize() {
_elems = NEW_C_HEAP_ARRAY(E, N);
guarantee(_elems != NULL, "Allocation failed.");
}
template<class E, unsigned int N>
@ -289,7 +287,87 @@ GenericTaskQueue<E, N>::~GenericTaskQueue() {
FREE_C_HEAP_ARRAY(E, _elems);
}
// Inherits the typedef of "Task" from above.
// OverflowTaskQueue is a TaskQueue that also includes an overflow stack for
// elements that do not fit in the TaskQueue.
//
// Three methods from super classes are overridden:
//
// initialize() - initialize the super classes and create the overflow stack
// push() - push onto the task queue or, if that fails, onto the overflow stack
// is_empty() - return true if both the TaskQueue and overflow stack are empty
//
// Note that size() is not overridden--it returns the number of elements in the
// TaskQueue, and does not include the size of the overflow stack. This
// simplifies replacement of GenericTaskQueues with OverflowTaskQueues.
template<class E, unsigned int N = TASKQUEUE_SIZE>
class OverflowTaskQueue: public GenericTaskQueue<E, N>
{
public:
typedef GrowableArray<E> overflow_t;
typedef GenericTaskQueue<E, N> taskqueue_t;
OverflowTaskQueue();
~OverflowTaskQueue();
void initialize();
inline overflow_t* overflow_stack() const { return _overflow_stack; }
// Push task t onto the queue or onto the overflow stack. Return true.
inline bool push(E t);
// Attempt to pop from the overflow stack; return true if anything was popped.
inline bool pop_overflow(E& t);
inline bool taskqueue_empty() const { return taskqueue_t::is_empty(); }
inline bool overflow_empty() const { return overflow_stack()->is_empty(); }
inline bool is_empty() const {
return taskqueue_empty() && overflow_empty();
}
private:
overflow_t* _overflow_stack;
};
template <class E, unsigned int N>
OverflowTaskQueue<E, N>::OverflowTaskQueue()
{
_overflow_stack = NULL;
}
template <class E, unsigned int N>
OverflowTaskQueue<E, N>::~OverflowTaskQueue()
{
if (_overflow_stack != NULL) {
delete _overflow_stack;
_overflow_stack = NULL;
}
}
template <class E, unsigned int N>
void OverflowTaskQueue<E, N>::initialize()
{
taskqueue_t::initialize();
assert(_overflow_stack == NULL, "memory leak");
_overflow_stack = new (ResourceObj::C_HEAP) GrowableArray<E>(10, true);
}
template <class E, unsigned int N>
bool OverflowTaskQueue<E, N>::push(E t)
{
if (!taskqueue_t::push(t)) {
overflow_stack()->push(t);
}
return true;
}
template <class E, unsigned int N>
bool OverflowTaskQueue<E, N>::pop_overflow(E& t)
{
if (overflow_empty()) return false;
t = overflow_stack()->pop();
return true;
}
class TaskQueueSetSuper: public CHeapObj {
protected:
static int randomParkAndMiller(int* seed0);
@ -323,11 +401,11 @@ public:
T* queue(uint n);
// The thread with queue number "queue_num" (and whose random number seed
// is at "seed") is trying to steal a task from some other queue. (It
// may try several queues, according to some configuration parameter.)
// If some steal succeeds, returns "true" and sets "t" the stolen task,
// otherwise returns false.
// The thread with queue number "queue_num" (and whose random number seed is
// at "seed") is trying to steal a task from some other queue. (It may try
// several queues, according to some configuration parameter.) If some steal
// succeeds, returns "true" and sets "t" to the stolen task, otherwise returns
// false.
bool steal(uint queue_num, int* seed, E& t);
bool peek();
@ -507,7 +585,7 @@ GenericTaskQueue<E, N>::pop_local(E& t) {
uint localBot = _bottom;
// This value cannot be N-1. That can only occur as a result of
// the assignment to bottom in this method. If it does, this method
// resets the size( to 0 before the next call (which is sequential,
// resets the size to 0 before the next call (which is sequential,
// since this is pop_local.)
uint dirty_n_elems = dirty_size(localBot, _age.top());
assert(dirty_n_elems != N - 1, "Shouldn't be possible...");
@ -533,8 +611,7 @@ GenericTaskQueue<E, N>::pop_local(E& t) {
}
}
typedef oop Task;
typedef GenericTaskQueue<Task> OopTaskQueue;
typedef GenericTaskQueue<oop> OopTaskQueue;
typedef GenericTaskQueueSet<OopTaskQueue> OopTaskQueueSet;
#ifdef _MSC_VER
@ -615,35 +692,8 @@ private:
#pragma warning(pop)
#endif
typedef GenericTaskQueue<StarTask> OopStarTaskQueue;
typedef OverflowTaskQueue<StarTask> OopStarTaskQueue;
typedef GenericTaskQueueSet<OopStarTaskQueue> OopStarTaskQueueSet;
typedef size_t RegionTask; // index for region
typedef GenericTaskQueue<RegionTask> RegionTaskQueue;
typedef GenericTaskQueueSet<RegionTaskQueue> RegionTaskQueueSet;
class RegionTaskQueueWithOverflow: public CHeapObj {
protected:
RegionTaskQueue _region_queue;
GrowableArray<RegionTask>* _overflow_stack;
public:
RegionTaskQueueWithOverflow() : _overflow_stack(NULL) {}
// Initialize both stealable queue and overflow
void initialize();
// Save first to stealable queue and then to overflow
void save(RegionTask t);
// Retrieve first from overflow and then from stealable queue
bool retrieve(RegionTask& region_index);
// Retrieve from stealable queue
bool retrieve_from_stealable_queue(RegionTask& region_index);
// Retrieve from overflow
bool retrieve_from_overflow(RegionTask& region_index);
bool is_empty();
bool stealable_is_empty();
bool overflow_is_empty();
uint stealable_size() { return _region_queue.size(); }
RegionTaskQueue* task_queue() { return &_region_queue; }
};
#define USE_RegionTaskQueueWithOverflow
typedef OverflowTaskQueue<size_t> RegionTaskQueue;
typedef GenericTaskQueueSet<RegionTaskQueue> RegionTaskQueueSet;