This commit is contained in:
Lana Steuck 2010-11-13 18:39:26 -08:00
commit cd33310431
675 changed files with 32112 additions and 18133 deletions

View File

@ -91,3 +91,5 @@ f960f117f1623629f64203e2b09a92a8f6f14ff5 jdk7-b112
750c1ccb2f2d1ddfa95ab6c7f897fdab2f87f7e9 jdk7-b114
9cb24917216bc68997154f6e9566c3de62acb2f4 jdk7-b115
a4e6aa1f45ad23a6f083ed98d970b5006ea4d292 jdk7-b116
228e73f288c543a8c34e2a54227103ae5649e6af jdk7-b117
2e876e59938a853934aa738c811b26c452bd9fe8 jdk7-b118

View File

@ -91,3 +91,5 @@ c1df968c4527bfab5f97662a89245f15d12d378b jdk7-b113
27985a5c6e5268014d25d55886e0ecb96af4763d jdk7-b114
e8ebdf41b9c01a26642848f4134f5504e8fb3233 jdk7-b115
94e9a1bfba8b8d1fe0bfd43b88629b1f27b02a76 jdk7-b116
7220e60b097fa027e922f1aeecdd330f3e37409f jdk7-b117
a12a9e78df8a9d534da0b4a244ed68f0de0bd58e jdk7-b118

View File

@ -91,3 +91,5 @@ a89a6c5be9d1a754868d3d359cbf7ad36aa95631 jdk7-b113
88fddb73c5c4a4b50c319cbae9380caf5172ab45 jdk7-b114
da7561d479e0ddaa4650d8023ac0fc7294e014e3 jdk7-b115
98c028de4301106f2285ac0e128a1bb9b4c24f5c jdk7-b116
fa502e4834dac2176499cc1f44794d5dc32a11b9 jdk7-b117
42e77836fded7c2a3080d27316b96634ea9e33c6 jdk7-b118

View File

@ -127,3 +127,7 @@ beef35b96b81129c375d572357fb9548d9020db1 jdk7-b113
5511edd5d719f3fc9fdd04879482026a3d2c8652 hs20-b01
bdbc48857210a509b3c50a3291ecb9dd6a72e016 jdk7-b115
96b3f2a7add0b445b8aa421f6823cff5a2e2fe03 jdk7-b116
52f19c724d9634af79044a2e0defbe4a5f1adbda hs20-b02
806d0c037e6bbb88dac0699673f4ba55ee8c02da jdk7-b117
698b7b727e12de44139d8cca6ab9a494ead13253 jdk7-b118
3ef7426b4deac5dcfd4afb35cabe9ab3d666df91 hs20-b02

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -188,7 +188,7 @@ public class BytecodeLoadConstant extends BytecodeWithCPIndex {
} else {
throw new RuntimeException("should not reach here");
}
} else if (ctag.isMethodHandle() || ctag.isMethodType()) {
} else if (ctag.isMethodHandle()) {
Oop x = getCachedConstant();
int refidx = cpool.getMethodHandleIndexAt(cpIndex);
int refkind = cpool.getMethodHandleRefKindAt(cpIndex);

View File

@ -53,11 +53,19 @@ public class ConstantPool extends Oop implements ClassConstants {
private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
Type type = db.lookupType("constantPoolOopDesc");
tags = new OopField(type.getOopField("_tags"), 0);
operands = new OopField(type.getOopField("_operands"), 0);
cache = new OopField(type.getOopField("_cache"), 0);
poolHolder = new OopField(type.getOopField("_pool_holder"), 0);
length = new CIntField(type.getCIntegerField("_length"), 0);
headerSize = type.getSize();
elementSize = 0;
// fetch constants:
MULTI_OPERAND_COUNT_OFFSET = db.lookupIntConstant("constantPoolOopDesc::_multi_operand_count_offset").intValue();
MULTI_OPERAND_BASE_OFFSET = db.lookupIntConstant("constantPoolOopDesc::_multi_operand_base_offset").intValue();
INDY_BSM_OFFSET = db.lookupIntConstant("constantPoolOopDesc::_indy_bsm_offset").intValue();
INDY_NT_OFFSET = db.lookupIntConstant("constantPoolOopDesc::_indy_nt_offset").intValue();
INDY_ARGC_OFFSET = db.lookupIntConstant("constantPoolOopDesc::_indy_argc_offset").intValue();
INDY_ARGV_OFFSET = db.lookupIntConstant("constantPoolOopDesc::_indy_argv_offset").intValue();
}
ConstantPool(OopHandle handle, ObjectHeap heap) {
@ -67,6 +75,7 @@ public class ConstantPool extends Oop implements ClassConstants {
public boolean isConstantPool() { return true; }
private static OopField tags;
private static OopField operands;
private static OopField cache;
private static OopField poolHolder;
private static CIntField length; // number of elements in oop
@ -74,7 +83,15 @@ public class ConstantPool extends Oop implements ClassConstants {
private static long headerSize;
private static long elementSize;
private static int MULTI_OPERAND_COUNT_OFFSET;
private static int MULTI_OPERAND_BASE_OFFSET;
private static int INDY_BSM_OFFSET;
private static int INDY_NT_OFFSET;
private static int INDY_ARGC_OFFSET;
private static int INDY_ARGV_OFFSET;
public TypeArray getTags() { return (TypeArray) tags.getValue(this); }
public TypeArray getOperands() { return (TypeArray) operands.getValue(this); }
public ConstantPoolCache getCache() { return (ConstantPoolCache) cache.getValue(this); }
public Klass getPoolHolder() { return (Klass) poolHolder.getValue(this); }
public int getLength() { return (int)length.getValue(this); }
@ -278,6 +295,25 @@ public class ConstantPool extends Oop implements ClassConstants {
return res;
}
/** Lookup for multi-operand (InvokeDynamic) entries. */
public int[] getMultiOperandsAt(int i) {
if (Assert.ASSERTS_ENABLED) {
Assert.that(getTagAt(i).isInvokeDynamic(), "Corrupted constant pool");
}
int pos = this.getIntAt(i);
int countPos = pos + MULTI_OPERAND_COUNT_OFFSET; // == pos-1
int basePos = pos + MULTI_OPERAND_BASE_OFFSET; // == pos
if (countPos < 0) return null; // safety first
TypeArray operands = getOperands();
if (operands == null) return null; // safety first
int length = operands.getIntAt(countPos);
int[] values = new int[length];
for (int j = 0; j < length; j++) {
values[j] = operands.getIntAt(basePos+j);
}
return values;
}
final private static String[] nameForTag = new String[] {
};
@ -522,15 +558,20 @@ public class ConstantPool extends Oop implements ClassConstants {
case JVM_CONSTANT_InvokeDynamic: {
dos.writeByte(cpConstType);
int value = getIntAt(ci);
short bootstrapMethodIndex = (short) extractLowShortFromInt(value);
short nameAndTypeIndex = (short) extractHighShortFromInt(value);
dos.writeShort(bootstrapMethodIndex);
dos.writeShort(nameAndTypeIndex);
int[] values = getMultiOperandsAt(ci);
for (int vn = 0; vn < values.length; vn++) {
dos.writeShort(values[vn]);
}
int bootstrapMethodIndex = values[INDY_BSM_OFFSET];
int nameAndTypeIndex = values[INDY_NT_OFFSET];
int argumentCount = values[INDY_ARGC_OFFSET];
assert(INDY_ARGV_OFFSET + argumentCount == values.length);
if (DEBUG) debugMessage("CP[" + ci + "] = indy BSM = " + bootstrapMethodIndex
+ ", N&T = " + nameAndTypeIndex);
+ ", N&T = " + nameAndTypeIndex
+ ", argc = " + argumentCount);
break;
}
default:
throw new InternalError("unknown tag: " + cpConstType);
} // switch

View File

@ -42,7 +42,8 @@ public interface ClassConstants
public static final int JVM_CONSTANT_NameAndType = 12;
public static final int JVM_CONSTANT_MethodHandle = 15;
public static final int JVM_CONSTANT_MethodType = 16;
public static final int JVM_CONSTANT_InvokeDynamic = 17;
public static final int JVM_CONSTANT_InvokeDynamicTrans = 17; // only occurs in old class files
public static final int JVM_CONSTANT_InvokeDynamic = 18;
// JVM_CONSTANT_MethodHandle subtypes
public static final int JVM_REF_getField = 1;

View File

@ -303,12 +303,12 @@ public class ClassWriter implements /* imports */ ClassConstants
case JVM_CONSTANT_MethodHandle: {
dos.writeByte(cpConstType);
int value = cpool.getIntAt(ci);
short bootstrapMethodIndex = (short) extractLowShortFromInt(value);
short nameAndTypeIndex = (short) extractHighShortFromInt(value);
dos.writeShort(bootstrapMethodIndex);
dos.writeShort(nameAndTypeIndex);
if (DEBUG) debugMessage("CP[" + ci + "] = indy BSM = " +
bootstrapMethodIndex + ", N&T = " + nameAndTypeIndex);
byte refKind = (byte) extractLowShortFromInt(value);
short memberIndex = (short) extractHighShortFromInt(value);
dos.writeByte(refKind);
dos.writeShort(memberIndex);
if (DEBUG) debugMessage("CP[" + ci + "] = MH kind = " +
refKind + ", mem = " + memberIndex);
break;
}
@ -323,10 +323,11 @@ public class ClassWriter implements /* imports */ ClassConstants
case JVM_CONSTANT_InvokeDynamic: {
dos.writeByte(cpConstType);
int value = cpool.getIntAt(ci);
short refIndex = (short) value;
dos.writeShort(refIndex);
if (DEBUG) debugMessage("CP[" + ci + "] = MT index = " + refIndex);
int[] values = cpool.getMultiOperandsAt(ci);
for (int vn = 0; vn < values.length; vn++) {
dos.writeShort(values[vn]);
}
if (DEBUG) debugMessage("CP[" + ci + "] = INDY indexes = " + Arrays.toString(values));
break;
}

View File

@ -460,6 +460,18 @@ public class HTMLGenerator implements /* imports */ ClassConstants {
return buf.toString();
}
private String genListOfShort(int[] values) {
Formatter buf = new Formatter(genHTML);
buf.append('[');
for (int i = 0; i < values.length; i++) {
if (i > 0) buf.append(' ');
buf.append('#');
buf.append(Integer.toString(values[i]));
}
buf.append(']');
return buf.toString();
}
protected String genHTMLTableForConstantPool(ConstantPool cpool) {
Formatter buf = new Formatter(genHTML);
buf.beginTable(1);
@ -584,7 +596,7 @@ public class HTMLGenerator implements /* imports */ ClassConstants {
case JVM_CONSTANT_InvokeDynamic:
buf.cell("JVM_CONSTANT_InvokeDynamic");
buf.cell(genLowHighShort(cpool.getIntAt(index)));
buf.cell(genListOfShort(cpool.getMultiOperandsAt(index)));
break;
default:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2005, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -40,7 +40,8 @@ public class ConstantTag {
private static int JVM_CONSTANT_NameAndType = 12;
private static int JVM_CONSTANT_MethodHandle = 15; // JSR 292
private static int JVM_CONSTANT_MethodType = 16; // JSR 292
private static int JVM_CONSTANT_InvokeDynamic = 17; // JSR 292
// static int JVM_CONSTANT_InvokeDynamicTrans = 17; // JSR 292, only occurs in old class files
private static int JVM_CONSTANT_InvokeDynamic = 18; // JSR 292
private static int JVM_CONSTANT_Invalid = 0; // For bad value initialization
private static int JVM_CONSTANT_UnresolvedClass = 100; // Temporary tag until actual use
private static int JVM_CONSTANT_ClassIndex = 101; // Temporary tag while constructing constant pool

View File

@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2010
HS_MAJOR_VER=20
HS_MINOR_VER=0
HS_BUILD_NUMBER=02
HS_BUILD_NUMBER=03
JDK_MAJOR_VER=1
JDK_MINOR_VER=7

View File

@ -62,7 +62,9 @@ endif
include $(GAMMADIR)/make/$(OSNAME)/makefiles/rules.make
ifndef CC_INTERP
FORCE_TIERED=1
ifndef FORCE_TIERED
FORCE_TIERED=1
endif
endif
ifdef LP64
@ -254,7 +256,7 @@ $(SUBDIRS_TIERED): $(BUILDTREE_MAKE)
$(BUILDTREE) VARIANT=tiered
$(SUBDIRS_C2): $(BUILDTREE_MAKE)
ifdef FORCE_TIERED
ifeq ($(FORCE_TIERED),1)
$(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks
$(BUILDTREE) VARIANT=tiered FORCE_TIERED=1
else

View File

@ -53,7 +53,9 @@ endif
include $(GAMMADIR)/make/$(OSNAME)/makefiles/rules.make
ifndef CC_INTERP
FORCE_TIERED=1
ifndef FORCE_TIERED
FORCE_TIERED=1
endif
endif
ifdef LP64
@ -210,7 +212,7 @@ $(SUBDIRS_TIERED): $(BUILDTREE_MAKE)
$(BUILDTREE) VARIANT=tiered
$(SUBDIRS_C2): $(BUILDTREE_MAKE)
ifdef FORCE_TIERED
ifeq ($(FORCE_TIERED),1)
$(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks
$(BUILDTREE) VARIANT=tiered FORCE_TIERED=1
else

View File

@ -74,9 +74,11 @@ BUILDARCH=ia64
!if "$(BUILDARCH)" != "ia64"
!ifndef CC_INTERP
!ifndef FORCE_TIERED
FORCE_TIERED=1
!endif
!endif
!endif
!if "$(BUILDARCH)" == "amd64"
Platform_arch=x86
@ -100,7 +102,7 @@ VARIANT_TEXT=Core
!if "$(Variant)" == "compiler1"
VARIANT_TEXT=Client
!elseif "$(Variant)" == "compiler2"
!ifdef FORCE_TIERED
!if "$(FORCE_TIERED)" == "1"
VARIANT_TEXT=Server
realVariant=tiered
!else

View File

@ -1126,7 +1126,7 @@ public:
inline void add(Register s1, int simm13a, Register d, relocInfo::relocType rtype = relocInfo::none);
inline void add(Register s1, int simm13a, Register d, RelocationHolder const& rspec);
inline void add(Register s1, RegisterOrConstant s2, Register d, int offset = 0);
inline void add(const Address& a, Register d, int offset = 0) { add( a.base(), a.disp() + offset, d, a.rspec(offset)); }
inline void add(const Address& a, Register d, int offset = 0);
void addcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(add_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
void addcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(add_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -206,6 +206,11 @@ inline void Assembler::ld( Register s1, RegisterOrConstant s2, Register d) { ld
inline void Assembler::ldd( Register s1, RegisterOrConstant s2, Register d) { ldd( Address(s1, s2), d); }
// form effective addresses this way:
inline void Assembler::add(const Address& a, Register d, int offset) {
if (a.has_index()) add(a.base(), a.index(), d);
else { add(a.base(), a.disp() + offset, d, a.rspec(offset)); offset = 0; }
if (offset != 0) add(d, offset, d);
}
inline void Assembler::add(Register s1, RegisterOrConstant s2, Register d, int offset) {
if (s2.is_register()) add(s1, s2.as_register(), d);
else { add(s1, s2.as_constant() + offset, d); offset = 0; }

View File

@ -664,7 +664,7 @@ void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
// Use temps to avoid kills
LIR_Opr t1 = FrameMap::G1_opr;
LIR_Opr t2 = FrameMap::G3_opr;
LIR_Opr addr = (type == objectType) ? new_register(T_OBJECT) : new_pointer_register();
LIR_Opr addr = new_pointer_register();
// get address of field
obj.load_item();

View File

@ -62,3 +62,5 @@ define_pd_global(intx, PreInflateSpin, 40); // Determined by running desi
define_pd_global(bool, RewriteBytecodes, true);
define_pd_global(bool, RewriteFrequentPairs, true);
define_pd_global(bool, UseMembar, false);

View File

@ -70,17 +70,29 @@ MethodHandleEntry* MethodHandleEntry::finish_compiled_entry(MacroAssembler* _mas
// Code generation
address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) {
// I5_savedSP: sender SP (must preserve)
// I5_savedSP/O5_savedSP: sender SP (must preserve)
// G4 (Gargs): incoming argument list (must preserve)
// G5_method: invoke methodOop; becomes method type.
// G5_method: invoke methodOop
// G3_method_handle: receiver method handle (must load from sp[MethodTypeForm.vmslots])
// O0, O1: garbage temps, blown away
Register O0_argslot = O0;
// O0, O1, O2, O3, O4: garbage temps, blown away
Register O0_mtype = O0;
Register O1_scratch = O1;
Register O2_scratch = O2;
Register O3_scratch = O3;
Register O4_argslot = O4;
Register O4_argbase = O4;
// emit WrongMethodType path first, to enable back-branch from main path
Label wrong_method_type;
__ bind(wrong_method_type);
Label invoke_generic_slow_path;
assert(methodOopDesc::intrinsic_id_size_in_bytes() == sizeof(u1), "");;
__ ldub(Address(G5_method, methodOopDesc::intrinsic_id_offset_in_bytes()), O1_scratch);
__ cmp(O1_scratch, (int) vmIntrinsics::_invokeExact);
__ brx(Assembler::notEqual, false, Assembler::pt, invoke_generic_slow_path);
__ delayed()->nop();
__ mov(O0_mtype, G5_method_type); // required by throw_WrongMethodType
// mov(G3_method_handle, G3_method_handle); // already in this register
__ jump_to(AddressLiteral(Interpreter::throw_WrongMethodType_entry()), O1_scratch);
__ delayed()->nop();
@ -88,23 +100,74 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
__ align(CodeEntryAlignment);
address entry_point = __ pc();
// fetch the MethodType from the method handle into G5_method_type
// fetch the MethodType from the method handle
{
Register tem = G5_method;
assert(tem == G5_method_type, "yes, it's the same register");
for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) {
__ ld_ptr(Address(tem, *pchase), G5_method_type);
__ ld_ptr(Address(tem, *pchase), O0_mtype);
tem = O0_mtype; // in case there is another indirection
}
}
// given the MethodType, find out where the MH argument is buried
__ load_heap_oop(Address(G5_method_type, __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, O1_scratch)), O0_argslot);
__ ldsw( Address(O0_argslot, __ delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, O1_scratch)), O0_argslot);
__ ld_ptr(__ argument_address(O0_argslot), G3_method_handle);
__ load_heap_oop(Address(O0_mtype, __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, O1_scratch)), O4_argslot);
__ ldsw( Address(O4_argslot, __ delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, O1_scratch)), O4_argslot);
__ add(Gargs, __ argument_offset(O4_argslot, 1), O4_argbase);
// Note: argument_address uses its input as a scratch register!
__ ld_ptr(Address(O4_argbase, -Interpreter::stackElementSize), G3_method_handle);
__ check_method_handle_type(G5_method_type, G3_method_handle, O1_scratch, wrong_method_type);
trace_method_handle(_masm, "invokeExact");
__ check_method_handle_type(O0_mtype, G3_method_handle, O1_scratch, wrong_method_type);
__ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
// for invokeGeneric (only), apply argument and result conversions on the fly
__ bind(invoke_generic_slow_path);
#ifdef ASSERT
{ Label L;
__ ldub(Address(G5_method, methodOopDesc::intrinsic_id_offset_in_bytes()), O1_scratch);
__ cmp(O1_scratch, (int) vmIntrinsics::_invokeGeneric);
__ brx(Assembler::equal, false, Assembler::pt, L);
__ delayed()->nop();
__ stop("bad methodOop::intrinsic_id");
__ bind(L);
}
#endif //ASSERT
// make room on the stack for another pointer:
insert_arg_slots(_masm, 2 * stack_move_unit(), _INSERT_REF_MASK, O4_argbase, O1_scratch, O2_scratch, O3_scratch);
// load up an adapter from the calling type (Java weaves this)
Register O2_form = O2_scratch;
Register O3_adapter = O3_scratch;
__ load_heap_oop(Address(O0_mtype, __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, O1_scratch)), O2_form);
// load_heap_oop(Address(O2_form, __ delayed_value(java_dyn_MethodTypeForm::genericInvoker_offset_in_bytes, O1_scratch)), O3_adapter);
// deal with old JDK versions:
__ add( Address(O2_form, __ delayed_value(java_dyn_MethodTypeForm::genericInvoker_offset_in_bytes, O1_scratch)), O3_adapter);
__ cmp(O3_adapter, O2_form);
Label sorry_no_invoke_generic;
__ brx(Assembler::lessUnsigned, false, Assembler::pn, sorry_no_invoke_generic);
__ delayed()->nop();
__ load_heap_oop(Address(O3_adapter, 0), O3_adapter);
__ tst(O3_adapter);
__ brx(Assembler::zero, false, Assembler::pn, sorry_no_invoke_generic);
__ delayed()->nop();
__ st_ptr(O3_adapter, Address(O4_argbase, 1 * Interpreter::stackElementSize));
// As a trusted first argument, pass the type being called, so the adapter knows
// the actual types of the arguments and return values.
// (Generic invokers are shared among form-families of method-type.)
__ st_ptr(O0_mtype, Address(O4_argbase, 0 * Interpreter::stackElementSize));
// FIXME: assert that O3_adapter is of the right method-type.
__ mov(O3_adapter, G3_method_handle);
trace_method_handle(_masm, "invokeGeneric");
__ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
__ bind(sorry_no_invoke_generic); // no invokeGeneric implementation available!
__ mov(O0_mtype, G5_method_type); // required by throw_WrongMethodType
// mov(G3_method_handle, G3_method_handle); // already in this register
__ jump_to(AddressLiteral(Interpreter::throw_WrongMethodType_entry()), O1_scratch);
__ delayed()->nop();
return entry_point;
}
@ -630,10 +693,16 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
switch (ek) {
case _adapter_opt_i2i:
case _adapter_opt_l2i:
__ unimplemented(entry_name(ek));
value = vmarg;
break;
case _adapter_opt_l2i:
{
// just delete the extra slot
__ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
remove_arg_slots(_masm, -stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch);
value = vmarg = Address(O0_argslot, 0);
}
break;
case _adapter_opt_unboxi:
{
// Load the value up from the heap.

View File

@ -1843,6 +1843,12 @@ bool Matcher::is_spillable_arg( int reg ) {
return can_be_java_arg(reg);
}
bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
// Use hardware SDIVX instruction when it is
// faster than a code which use multiply.
return VM_Version::has_fast_idiv();
}
// Register for DIVI projection of divmodI
RegMask Matcher::divI_proj_mask() {
ShouldNotReachHere();
@ -9510,16 +9516,16 @@ instruct countLeadingZerosI(iRegI dst, iRegI src, iRegI tmp, flagsReg cr) %{
Register Rdst = $dst$$Register;
Register Rsrc = $src$$Register;
Register Rtmp = $tmp$$Register;
__ srl(Rsrc, 1, Rtmp);
__ srl(Rsrc, 0, Rdst);
__ srl(Rsrc, 1, Rtmp);
__ srl(Rsrc, 0, Rdst);
__ or3(Rdst, Rtmp, Rdst);
__ srl(Rdst, 2, Rtmp);
__ srl(Rdst, 2, Rtmp);
__ or3(Rdst, Rtmp, Rdst);
__ srl(Rdst, 4, Rtmp);
__ srl(Rdst, 4, Rtmp);
__ or3(Rdst, Rtmp, Rdst);
__ srl(Rdst, 8, Rtmp);
__ srl(Rdst, 8, Rtmp);
__ or3(Rdst, Rtmp, Rdst);
__ srl(Rdst, 16, Rtmp);
__ srl(Rdst, 16, Rtmp);
__ or3(Rdst, Rtmp, Rdst);
__ popc(Rdst, Rdst);
__ mov(BitsPerInt, Rtmp);
@ -9528,7 +9534,7 @@ instruct countLeadingZerosI(iRegI dst, iRegI src, iRegI tmp, flagsReg cr) %{
ins_pipe(ialu_reg);
%}
instruct countLeadingZerosL(iRegI dst, iRegL src, iRegL tmp, flagsReg cr) %{
instruct countLeadingZerosL(iRegIsafe dst, iRegL src, iRegL tmp, flagsReg cr) %{
predicate(UsePopCountInstruction); // See Matcher::match_rule_supported
match(Set dst (CountLeadingZerosL src));
effect(TEMP dst, TEMP tmp, KILL cr);
@ -9559,18 +9565,18 @@ instruct countLeadingZerosL(iRegI dst, iRegL src, iRegL tmp, flagsReg cr) %{
Register Rdst = $dst$$Register;
Register Rsrc = $src$$Register;
Register Rtmp = $tmp$$Register;
__ srlx(Rsrc, 1, Rtmp);
__ or3(Rsrc, Rtmp, Rdst);
__ srlx(Rdst, 2, Rtmp);
__ or3(Rdst, Rtmp, Rdst);
__ srlx(Rdst, 4, Rtmp);
__ or3(Rdst, Rtmp, Rdst);
__ srlx(Rdst, 8, Rtmp);
__ or3(Rdst, Rtmp, Rdst);
__ srlx(Rdst, 16, Rtmp);
__ or3(Rdst, Rtmp, Rdst);
__ srlx(Rdst, 32, Rtmp);
__ or3(Rdst, Rtmp, Rdst);
__ srlx(Rsrc, 1, Rtmp);
__ or3( Rsrc, Rtmp, Rdst);
__ srlx(Rdst, 2, Rtmp);
__ or3( Rdst, Rtmp, Rdst);
__ srlx(Rdst, 4, Rtmp);
__ or3( Rdst, Rtmp, Rdst);
__ srlx(Rdst, 8, Rtmp);
__ or3( Rdst, Rtmp, Rdst);
__ srlx(Rdst, 16, Rtmp);
__ or3( Rdst, Rtmp, Rdst);
__ srlx(Rdst, 32, Rtmp);
__ or3( Rdst, Rtmp, Rdst);
__ popc(Rdst, Rdst);
__ mov(BitsPerLong, Rtmp);
__ sub(Rtmp, Rdst, Rdst);

View File

@ -341,6 +341,26 @@ void TemplateTable::fast_aldc(bool wide) {
resolve_cache_and_index(f1_oop, Otos_i, Rcache, Rscratch, wide ? sizeof(u2) : sizeof(u1));
__ verify_oop(Otos_i);
Label L_done;
const Register Rcon_klass = G3_scratch; // same as Rcache
const Register Rarray_klass = G4_scratch; // same as Rscratch
__ load_klass(Otos_i, Rcon_klass);
AddressLiteral array_klass_addr((address)Universe::systemObjArrayKlassObj_addr());
__ load_contents(array_klass_addr, Rarray_klass);
__ cmp(Rarray_klass, Rcon_klass);
__ brx(Assembler::notEqual, false, Assembler::pt, L_done);
__ delayed()->nop();
__ ld(Address(Otos_i, arrayOopDesc::length_offset_in_bytes()), Rcon_klass);
__ tst(Rcon_klass);
__ brx(Assembler::zero, true, Assembler::pt, L_done);
__ delayed()->clr(Otos_i); // executed only if branch is taken
// Load the exception from the system-array which wraps it:
__ load_heap_oop(Otos_i, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i);
__ throw_if_not_x(Assembler::never, Interpreter::throw_exception_entry(), G3_scratch);
__ bind(L_done);
}
void TemplateTable::ldc2_w() {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -80,7 +80,8 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(InteriorEntryAlignment, 4);
}
if (is_niagara1_plus()) {
if (AllocatePrefetchStyle > 0 && FLAG_IS_DEFAULT(AllocatePrefetchStyle)) {
if (has_blk_init() && AllocatePrefetchStyle > 0 &&
FLAG_IS_DEFAULT(AllocatePrefetchStyle)) {
// Use BIS instruction for allocation prefetch.
FLAG_SET_DEFAULT(AllocatePrefetchStyle, 3);
if (FLAG_IS_DEFAULT(AllocatePrefetchDistance)) {
@ -118,16 +119,18 @@ void VM_Version::initialize() {
#endif
char buf[512];
jio_snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s%s%s%s",
jio_snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
(has_v8() ? ", has_v8" : ""),
(has_v9() ? ", has_v9" : ""),
(has_hardware_popc() ? ", popc" : ""),
(has_vis1() ? ", has_vis1" : ""),
(has_vis2() ? ", has_vis2" : ""),
(has_blk_init() ? ", has_blk_init" : ""),
(is_ultra3() ? ", is_ultra3" : ""),
(is_sun4v() ? ", is_sun4v" : ""),
(is_niagara1() ? ", is_niagara1" : ""),
(is_niagara1_plus() ? ", is_niagara1_plus" : ""),
(is_sparc64() ? ", is_sparc64" : ""),
(!has_hardware_mul32() ? ", no-mul32" : ""),
(!has_hardware_div32() ? ", no-div32" : ""),
(!has_hardware_fsmuld() ? ", no-fsmuld" : ""));

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -33,7 +33,9 @@ protected:
v9_instructions = 5,
vis1_instructions = 6,
vis2_instructions = 7,
sun4v_instructions = 8
sun4v_instructions = 8,
blk_init_instructions = 9,
fmaf_instructions = 10
};
enum Feature_Flag_Set {
@ -49,6 +51,8 @@ protected:
vis1_instructions_m = 1 << vis1_instructions,
vis2_instructions_m = 1 << vis2_instructions,
sun4v_m = 1 << sun4v_instructions,
blk_init_instructions_m = 1 << blk_init_instructions,
fmaf_instructions_m = 1 << fmaf_instructions,
generic_v8_m = v8_instructions_m | hardware_mul32_m | hardware_div32_m | hardware_fsmuld_m,
generic_v9_m = generic_v8_m | v9_instructions_m,
@ -67,6 +71,7 @@ protected:
static int platform_features(int features);
static bool is_niagara1(int features) { return (features & sun4v_m) != 0; }
static bool is_sparc64(int features) { return (features & fmaf_instructions_m) != 0; }
static int maximum_niagara1_processor_count() { return 32; }
// Returns true if the platform is in the niagara line and
@ -86,6 +91,7 @@ public:
static bool has_hardware_popc() { return (_features & hardware_popc_m) != 0; }
static bool has_vis1() { return (_features & vis1_instructions_m) != 0; }
static bool has_vis2() { return (_features & vis2_instructions_m) != 0; }
static bool has_blk_init() { return (_features & blk_init_instructions_m) != 0; }
static bool supports_compare_and_exchange()
{ return has_v9(); }
@ -93,8 +99,10 @@ public:
static bool is_ultra3() { return (_features & ultra3_m) == ultra3_m; }
static bool is_sun4v() { return (_features & sun4v_m) != 0; }
static bool is_niagara1() { return is_niagara1(_features); }
static bool is_sparc64() { return is_sparc64(_features); }
static bool has_fast_fxtof() { return has_v9() && !is_ultra3(); }
static bool has_fast_idiv() { return is_niagara1_plus() || is_sparc64(); }
static const char* cpu_features() { return _features_str; }

View File

@ -1275,6 +1275,12 @@ void Assembler::idivl(Register src) {
emit_byte(0xF8 | encode);
}
void Assembler::divl(Register src) { // Unsigned
int encode = prefix_and_encode(src->encoding());
emit_byte(0xF7);
emit_byte(0xF0 | encode);
}
void Assembler::imull(Register dst, Register src) {
int encode = prefix_and_encode(dst->encoding(), src->encoding());
emit_byte(0x0F);
@ -1288,7 +1294,7 @@ void Assembler::imull(Register dst, Register src, int value) {
if (is8bit(value)) {
emit_byte(0x6B);
emit_byte(0xC0 | encode);
emit_byte(value);
emit_byte(value & 0xFF);
} else {
emit_byte(0x69);
emit_byte(0xC0 | encode);
@ -3903,7 +3909,7 @@ void Assembler::imulq(Register dst, Register src, int value) {
if (is8bit(value)) {
emit_byte(0x6B);
emit_byte(0xC0 | encode);
emit_byte(value);
emit_byte(value & 0xFF);
} else {
emit_byte(0x69);
emit_byte(0xC0 | encode);

View File

@ -1011,6 +1011,7 @@ private:
void hlt();
void idivl(Register src);
void divl(Register src); // Unsigned division
void idivq(Register src);

View File

@ -499,7 +499,7 @@ void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
Register new_val_reg = new_val()->as_register();
__ cmpptr(new_val_reg, (int32_t) NULL_WORD);
__ jcc(Assembler::equal, _continuation);
ce->store_parameter(addr()->as_register(), 0);
ce->store_parameter(addr()->as_pointer_register(), 0);
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_post_barrier_slow_id)));
__ jmp(_continuation);
}

View File

@ -765,7 +765,7 @@ void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
ShouldNotReachHere();
}
LIR_Opr addr = (type == objectType) ? new_register(T_OBJECT) : new_pointer_register();
LIR_Opr addr = new_pointer_register();
LIR_Address* a;
if(offset.result()->is_constant()) {
a = new LIR_Address(obj.result(),

View File

@ -63,3 +63,5 @@ define_pd_global(intx, PreInflateSpin, 10);
define_pd_global(bool, RewriteBytecodes, true);
define_pd_global(bool, RewriteFrequentPairs, true);
define_pd_global(bool, UseMembar, false);

View File

@ -399,6 +399,23 @@ void TemplateTable::fast_aldc(bool wide) {
if (VerifyOops) {
__ verify_oop(rax);
}
Label L_done, L_throw_exception;
const Register con_klass_temp = rcx; // same as Rcache
__ movptr(con_klass_temp, Address(rax, oopDesc::klass_offset_in_bytes()));
__ cmpptr(con_klass_temp, ExternalAddress((address)Universe::systemObjArrayKlassObj_addr()));
__ jcc(Assembler::notEqual, L_done);
__ cmpl(Address(rax, arrayOopDesc::length_offset_in_bytes()), 0);
__ jcc(Assembler::notEqual, L_throw_exception);
__ xorptr(rax, rax);
__ jmp(L_done);
// Load the exception from the system-array which wraps it:
__ bind(L_throw_exception);
__ movptr(rax, Address(rax, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
__ jump(ExternalAddress(Interpreter::throw_exception_entry()));
__ bind(L_done);
}
void TemplateTable::ldc2_w() {

View File

@ -413,6 +413,25 @@ void TemplateTable::fast_aldc(bool wide) {
if (VerifyOops) {
__ verify_oop(rax);
}
Label L_done, L_throw_exception;
const Register con_klass_temp = rcx; // same as cache
const Register array_klass_temp = rdx; // same as index
__ movptr(con_klass_temp, Address(rax, oopDesc::klass_offset_in_bytes()));
__ lea(array_klass_temp, ExternalAddress((address)Universe::systemObjArrayKlassObj_addr()));
__ cmpptr(con_klass_temp, Address(array_klass_temp, 0));
__ jcc(Assembler::notEqual, L_done);
__ cmpl(Address(rax, arrayOopDesc::length_offset_in_bytes()), 0);
__ jcc(Assembler::notEqual, L_throw_exception);
__ xorptr(rax, rax);
__ jmp(L_done);
// Load the exception from the system-array which wraps it:
__ bind(L_throw_exception);
__ movptr(rax, Address(rax, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
__ jump(ExternalAddress(Interpreter::throw_exception_entry()));
__ bind(L_done);
}
void TemplateTable::ldc2_w() {

View File

@ -446,6 +446,10 @@ public:
static bool supports_lzcnt() { return (_cpuFeatures & CPU_LZCNT) != 0; }
static bool supports_sse4a() { return (_cpuFeatures & CPU_SSE4A) != 0; }
// Intel Core and newer cpus have fast IDIV instruction (excluding Atom).
static bool has_fast_idiv() { return is_intel() && cpu_family() == 6 &&
supports_sse3() && _model != 0x1C; }
static bool supports_compare_and_exchange() { return true; }
static const char* cpu_features() { return _features_str; }

View File

@ -1508,6 +1508,16 @@ bool Matcher::is_spillable_arg( int reg ) {
return can_be_java_arg(reg);
}
bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
// Use hardware integer DIV instruction when
// it is faster than a code which use multiply.
// Only when constant divisor fits into 32 bit
// (min_jint is excluded to get only correct
// positive 32 bit values from negative).
return VM_Version::has_fast_idiv() &&
(divisor == (int)divisor && divisor != min_jint);
}
// Register for DIVI projection of divmodI
RegMask Matcher::divI_proj_mask() {
return EAX_REG_mask;
@ -1546,6 +1556,9 @@ bool is_operand_hi32_zero(Node* n) {
return true;
}
}
if (opc == Op_ConL && (n->get_long() & 0xFFFFFFFF00000000LL) == 0LL) {
return true;
}
return false;
}
@ -2309,9 +2322,11 @@ encode %{
enc_class move_long_big_shift_sign( eRegL dst, immI_32_63 cnt ) %{
emit_opcode( cbuf, 0x8B ); // Move
emit_rm(cbuf, 0x3, $dst$$reg, HIGH_FROM_LOW($dst$$reg));
emit_d8(cbuf,$primary);
emit_rm(cbuf, 0x3, $secondary, $dst$$reg);
emit_d8(cbuf,$cnt$$constant-32);
if( $cnt$$constant > 32 ) { // Shift, if not by zero
emit_d8(cbuf,$primary);
emit_rm(cbuf, 0x3, $secondary, $dst$$reg);
emit_d8(cbuf,$cnt$$constant-32);
}
emit_d8(cbuf,$primary);
emit_rm(cbuf, 0x3, $secondary, HIGH_FROM_LOW($dst$$reg));
emit_d8(cbuf,31);
@ -8842,6 +8857,144 @@ instruct modL_eReg( eADXRegL dst, eRegL src1, eRegL src2, eFlagsReg cr, eCXRegI
ins_pipe( pipe_slow );
%}
// Divide Register Long (no special case since divisor != -1)
instruct divL_eReg_imm32( eADXRegL dst, immL32 imm, eRegI tmp, eRegI tmp2, eFlagsReg cr ) %{
match(Set dst (DivL dst imm));
effect( TEMP tmp, TEMP tmp2, KILL cr );
ins_cost(1000);
format %{ "MOV $tmp,abs($imm) # ldiv EDX:EAX,$imm\n\t"
"XOR $tmp2,$tmp2\n\t"
"CMP $tmp,EDX\n\t"
"JA,s fast\n\t"
"MOV $tmp2,EAX\n\t"
"MOV EAX,EDX\n\t"
"MOV EDX,0\n\t"
"JLE,s pos\n\t"
"LNEG EAX : $tmp2\n\t"
"DIV $tmp # unsigned division\n\t"
"XCHG EAX,$tmp2\n\t"
"DIV $tmp\n\t"
"LNEG $tmp2 : EAX\n\t"
"JMP,s done\n"
"pos:\n\t"
"DIV $tmp\n\t"
"XCHG EAX,$tmp2\n"
"fast:\n\t"
"DIV $tmp\n"
"done:\n\t"
"MOV EDX,$tmp2\n\t"
"NEG EDX:EAX # if $imm < 0" %}
ins_encode %{
int con = (int)$imm$$constant;
assert(con != 0 && con != -1 && con != min_jint, "wrong divisor");
int pcon = (con > 0) ? con : -con;
Label Lfast, Lpos, Ldone;
__ movl($tmp$$Register, pcon);
__ xorl($tmp2$$Register,$tmp2$$Register);
__ cmpl($tmp$$Register, HIGH_FROM_LOW($dst$$Register));
__ jccb(Assembler::above, Lfast); // result fits into 32 bit
__ movl($tmp2$$Register, $dst$$Register); // save
__ movl($dst$$Register, HIGH_FROM_LOW($dst$$Register));
__ movl(HIGH_FROM_LOW($dst$$Register),0); // preserve flags
__ jccb(Assembler::lessEqual, Lpos); // result is positive
// Negative dividend.
// convert value to positive to use unsigned division
__ lneg($dst$$Register, $tmp2$$Register);
__ divl($tmp$$Register);
__ xchgl($dst$$Register, $tmp2$$Register);
__ divl($tmp$$Register);
// revert result back to negative
__ lneg($tmp2$$Register, $dst$$Register);
__ jmpb(Ldone);
__ bind(Lpos);
__ divl($tmp$$Register); // Use unsigned division
__ xchgl($dst$$Register, $tmp2$$Register);
// Fallthrow for final divide, tmp2 has 32 bit hi result
__ bind(Lfast);
// fast path: src is positive
__ divl($tmp$$Register); // Use unsigned division
__ bind(Ldone);
__ movl(HIGH_FROM_LOW($dst$$Register),$tmp2$$Register);
if (con < 0) {
__ lneg(HIGH_FROM_LOW($dst$$Register), $dst$$Register);
}
%}
ins_pipe( pipe_slow );
%}
// Remainder Register Long (remainder fit into 32 bits)
instruct modL_eReg_imm32( eADXRegL dst, immL32 imm, eRegI tmp, eRegI tmp2, eFlagsReg cr ) %{
match(Set dst (ModL dst imm));
effect( TEMP tmp, TEMP tmp2, KILL cr );
ins_cost(1000);
format %{ "MOV $tmp,abs($imm) # lrem EDX:EAX,$imm\n\t"
"CMP $tmp,EDX\n\t"
"JA,s fast\n\t"
"MOV $tmp2,EAX\n\t"
"MOV EAX,EDX\n\t"
"MOV EDX,0\n\t"
"JLE,s pos\n\t"
"LNEG EAX : $tmp2\n\t"
"DIV $tmp # unsigned division\n\t"
"MOV EAX,$tmp2\n\t"
"DIV $tmp\n\t"
"NEG EDX\n\t"
"JMP,s done\n"
"pos:\n\t"
"DIV $tmp\n\t"
"MOV EAX,$tmp2\n"
"fast:\n\t"
"DIV $tmp\n"
"done:\n\t"
"MOV EAX,EDX\n\t"
"SAR EDX,31\n\t" %}
ins_encode %{
int con = (int)$imm$$constant;
assert(con != 0 && con != -1 && con != min_jint, "wrong divisor");
int pcon = (con > 0) ? con : -con;
Label Lfast, Lpos, Ldone;
__ movl($tmp$$Register, pcon);
__ cmpl($tmp$$Register, HIGH_FROM_LOW($dst$$Register));
__ jccb(Assembler::above, Lfast); // src is positive and result fits into 32 bit
__ movl($tmp2$$Register, $dst$$Register); // save
__ movl($dst$$Register, HIGH_FROM_LOW($dst$$Register));
__ movl(HIGH_FROM_LOW($dst$$Register),0); // preserve flags
__ jccb(Assembler::lessEqual, Lpos); // result is positive
// Negative dividend.
// convert value to positive to use unsigned division
__ lneg($dst$$Register, $tmp2$$Register);
__ divl($tmp$$Register);
__ movl($dst$$Register, $tmp2$$Register);
__ divl($tmp$$Register);
// revert remainder back to negative
__ negl(HIGH_FROM_LOW($dst$$Register));
__ jmpb(Ldone);
__ bind(Lpos);
__ divl($tmp$$Register);
__ movl($dst$$Register, $tmp2$$Register);
__ bind(Lfast);
// fast path: src is positive
__ divl($tmp$$Register);
__ bind(Ldone);
__ movl($dst$$Register, HIGH_FROM_LOW($dst$$Register));
__ sarl(HIGH_FROM_LOW($dst$$Register), 31); // result sign
%}
ins_pipe( pipe_slow );
%}
// Integer Shift Instructions
// Shift Left by one
instruct shlI_eReg_1(eRegI dst, immI1 shift, eFlagsReg cr) %{

View File

@ -2065,6 +2065,13 @@ bool Matcher::is_spillable_arg(int reg)
return can_be_java_arg(reg);
}
bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
// In 64 bit mode a code which use multiply when
// devisor is constant is faster than hardware
// DIV instruction (it uses MulHiL).
return false;
}
// Register for DIVI projection of divmodI
RegMask Matcher::divI_proj_mask() {
return INT_RAX_REG_mask;

View File

@ -45,3 +45,5 @@ define_pd_global(intx, StackShadowPages, 5 LP64_ONLY(+1) DEBUG_ONLY(+3));
define_pd_global(bool, RewriteBytecodes, true);
define_pd_global(bool, RewriteFrequentPairs, true);
define_pd_global(bool, UseMembar, false);

View File

@ -176,10 +176,10 @@ int LinuxAttachListener::init() {
int n = snprintf(path, UNIX_PATH_MAX, "%s/.java_pid%d",
os::get_temp_directory(), os::current_process_id());
if (n <= (int)UNIX_PATH_MAX) {
if (n < (int)UNIX_PATH_MAX) {
n = snprintf(initial_path, UNIX_PATH_MAX, "%s.tmp", path);
}
if (n > (int)UNIX_PATH_MAX) {
if (n >= (int)UNIX_PATH_MAX) {
return -1;
}

View File

@ -1,24 +0,0 @@
/*
* Copyright (c) 1999, 2005, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/

View File

@ -1,23 +0,0 @@
/*
* Copyright (c) 1999, 2005, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2009, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -827,8 +827,10 @@ bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
switch (thr_type) {
case os::java_thread:
// Java threads use ThreadStackSize which default value can be changed with the flag -Xss
if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create();
// Java threads use ThreadStackSize which default value can be
// changed with the flag -Xss
assert (JavaThread::stack_size_at_create() > 0, "this should be set");
stack_size = JavaThread::stack_size_at_create();
break;
case os::compiler_thread:
if (CompilerThreadStackSize > 0) {
@ -3922,12 +3924,21 @@ jint os::init_2(void)
Linux::signal_sets_init();
Linux::install_signal_handlers();
// Check minimum allowable stack size for thread creation and to initialize
// the java system classes, including StackOverflowError - depends on page
// size. Add a page for compiler2 recursion in main thread.
// Add in 2*BytesPerWord times page size to account for VM stack during
// class initialization depending on 32 or 64 bit VM.
os::Linux::min_stack_allowed = MAX2(os::Linux::min_stack_allowed,
(size_t)(StackYellowPages+StackRedPages+StackShadowPages+
2*BytesPerWord COMPILER2_PRESENT(+1)) * Linux::page_size());
size_t threadStackSizeInBytes = ThreadStackSize * K;
if (threadStackSizeInBytes != 0 &&
threadStackSizeInBytes < Linux::min_stack_allowed) {
threadStackSizeInBytes < os::Linux::min_stack_allowed) {
tty->print_cr("\nThe stack size specified is too small, "
"Specify at least %dk",
Linux::min_stack_allowed / K);
os::Linux::min_stack_allowed/ K);
return JNI_ERR;
}
@ -4839,7 +4850,7 @@ void Parker::park(bool isAbsolute, jlong time) {
// Next, demultiplex/decode time arguments
timespec absTime;
if (time < 0) { // don't wait at all
if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all
return;
}
if (time > 0) {

View File

@ -1,23 +0,0 @@
/*
* Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/

View File

@ -1,25 +0,0 @@
/*
* Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
private:

View File

@ -1,23 +0,0 @@
/*
* Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -4878,18 +4878,17 @@ jint os::init_2(void) {
// Check minimum allowable stack size for thread creation and to initialize
// the java system classes, including StackOverflowError - depends on page
// size. Add a page for compiler2 recursion in main thread.
// Add in BytesPerWord times page size to account for VM stack during
// Add in 2*BytesPerWord times page size to account for VM stack during
// class initialization depending on 32 or 64 bit VM.
guarantee((Solaris::min_stack_allowed >=
(StackYellowPages+StackRedPages+StackShadowPages+BytesPerWord
COMPILER2_PRESENT(+1)) * page_size),
"need to increase Solaris::min_stack_allowed on this platform");
os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed,
(size_t)(StackYellowPages+StackRedPages+StackShadowPages+
2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size);
size_t threadStackSizeInBytes = ThreadStackSize * K;
if (threadStackSizeInBytes != 0 &&
threadStackSizeInBytes < Solaris::min_stack_allowed) {
threadStackSizeInBytes < os::Solaris::min_stack_allowed) {
tty->print_cr("\nThe stack size specified is too small, Specify at least %dk",
Solaris::min_stack_allowed/K);
os::Solaris::min_stack_allowed/K);
return JNI_ERR;
}
@ -5837,7 +5836,7 @@ void Parker::park(bool isAbsolute, jlong time) {
// First, demultiplex/decode time arguments
timespec absTime;
if (time < 0) { // don't wait at all
if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all
return;
}
if (time > 0) {

View File

@ -1,25 +0,0 @@
/*
* Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "incls/_precompiled.incl"

View File

@ -1,25 +0,0 @@
/*
* Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
private:

View File

@ -1,23 +0,0 @@
/*
* Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/

View File

@ -3311,7 +3311,6 @@ extern "C" {
}
}
// this is called _after_ the global arguments have been parsed
jint os::init_2(void) {
// Allocate a single page and mark it as readable for safepoint polling
@ -3390,6 +3389,21 @@ jint os::init_2(void) {
actual_reserve_size = default_reserve_size;
}
// Check minimum allowable stack size for thread creation and to initialize
// the java system classes, including StackOverflowError - depends on page
// size. Add a page for compiler2 recursion in main thread.
// Add in 2*BytesPerWord times page size to account for VM stack during
// class initialization depending on 32 or 64 bit VM.
size_t min_stack_allowed =
(size_t)(StackYellowPages+StackRedPages+StackShadowPages+
2*BytesPerWord COMPILER2_PRESENT(+1)) * os::vm_page_size();
if (actual_reserve_size < min_stack_allowed) {
tty->print_cr("\nThe stack size specified is too small, "
"Specify at least %dk",
min_stack_allowed / K);
return JNI_ERR;
}
JavaThread::set_stack_size_at_create(stack_commit_size);
// Calculate theoretical max. size of Threads to guard gainst artifical
@ -3992,7 +4006,7 @@ void Parker::park(bool isAbsolute, jlong time) {
if (time < 0) { // don't wait
return;
}
else if (time == 0) {
else if (time == 0 && !isAbsolute) {
time = INFINITE;
}
else if (isAbsolute) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -200,6 +200,18 @@ void os::print_context(outputStream *st, void *context) {
sigcontext* sc = (sigcontext*)context;
st->print_cr("Registers:");
st->print_cr(" G1=" INTPTR_FORMAT " G2=" INTPTR_FORMAT
" G3=" INTPTR_FORMAT " G4=" INTPTR_FORMAT,
SIG_REGS(sc).u_regs[CON_G1],
SIG_REGS(sc).u_regs[CON_G2],
SIG_REGS(sc).u_regs[CON_G3],
SIG_REGS(sc).u_regs[CON_G4]);
st->print_cr(" G5=" INTPTR_FORMAT " G6=" INTPTR_FORMAT
" G7=" INTPTR_FORMAT " Y=" INTPTR_FORMAT,
SIG_REGS(sc).u_regs[CON_G5],
SIG_REGS(sc).u_regs[CON_G6],
SIG_REGS(sc).u_regs[CON_G7],
SIG_REGS(sc).y);
st->print_cr(" O0=" INTPTR_FORMAT " O1=" INTPTR_FORMAT
" O2=" INTPTR_FORMAT " O3=" INTPTR_FORMAT,
SIG_REGS(sc).u_regs[CON_O0],
@ -213,18 +225,32 @@ void os::print_context(outputStream *st, void *context) {
SIG_REGS(sc).u_regs[CON_O6],
SIG_REGS(sc).u_regs[CON_O7]);
st->print_cr(" G1=" INTPTR_FORMAT " G2=" INTPTR_FORMAT
" G3=" INTPTR_FORMAT " G4=" INTPTR_FORMAT,
SIG_REGS(sc).u_regs[CON_G1],
SIG_REGS(sc).u_regs[CON_G2],
SIG_REGS(sc).u_regs[CON_G3],
SIG_REGS(sc).u_regs[CON_G4]);
st->print_cr(" G5=" INTPTR_FORMAT " G6=" INTPTR_FORMAT
" G7=" INTPTR_FORMAT " Y=" INTPTR_FORMAT,
SIG_REGS(sc).u_regs[CON_G5],
SIG_REGS(sc).u_regs[CON_G6],
SIG_REGS(sc).u_regs[CON_G7],
SIG_REGS(sc).y);
intptr_t *sp = (intptr_t *)os::Linux::ucontext_get_sp(uc);
st->print_cr(" L0=" INTPTR_FORMAT " L1=" INTPTR_FORMAT
" L2=" INTPTR_FORMAT " L3=" INTPTR_FORMAT,
sp[L0->sp_offset_in_saved_window()],
sp[L1->sp_offset_in_saved_window()],
sp[L2->sp_offset_in_saved_window()],
sp[L3->sp_offset_in_saved_window()]);
st->print_cr(" L4=" INTPTR_FORMAT " L5=" INTPTR_FORMAT
" L6=" INTPTR_FORMAT " L7=" INTPTR_FORMAT,
sp[L4->sp_offset_in_saved_window()],
sp[L5->sp_offset_in_saved_window()],
sp[L6->sp_offset_in_saved_window()],
sp[L7->sp_offset_in_saved_window()]);
st->print_cr(" I0=" INTPTR_FORMAT " I1=" INTPTR_FORMAT
" I2=" INTPTR_FORMAT " I3=" INTPTR_FORMAT,
sp[I0->sp_offset_in_saved_window()],
sp[I1->sp_offset_in_saved_window()],
sp[I2->sp_offset_in_saved_window()],
sp[I3->sp_offset_in_saved_window()]);
st->print_cr(" I4=" INTPTR_FORMAT " I5=" INTPTR_FORMAT
" I6=" INTPTR_FORMAT " I7=" INTPTR_FORMAT,
sp[I4->sp_offset_in_saved_window()],
sp[I5->sp_offset_in_saved_window()],
sp[I6->sp_offset_in_saved_window()],
sp[I7->sp_offset_in_saved_window()]);
st->print_cr(" PC=" INTPTR_FORMAT " nPC=" INTPTR_FORMAT,
SIG_PC(sc),
@ -232,7 +258,6 @@ void os::print_context(outputStream *st, void *context) {
st->cr();
st->cr();
intptr_t *sp = (intptr_t *)os::Linux::ucontext_get_sp(uc);
st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", sp);
print_hex_dump(st, (address)sp, (address)(sp + 32), sizeof(intptr_t));
st->cr();
@ -242,7 +267,58 @@ void os::print_context(outputStream *st, void *context) {
// this at the end, and hope for the best.
address pc = os::Linux::ucontext_get_pc(uc);
st->print_cr("Instructions: (pc=" PTR_FORMAT ")", pc);
print_hex_dump(st, pc - 16, pc + 16, sizeof(char));
print_hex_dump(st, pc - 32, pc + 32, sizeof(char));
}
void os::print_register_info(outputStream *st, void *context) {
if (context == NULL) return;
ucontext_t *uc = (ucontext_t*)context;
intptr_t *sp = (intptr_t *)os::Linux::ucontext_get_sp(uc);
st->print_cr("Register to memory mapping:");
st->cr();
// this is only for the "general purpose" registers
st->print("G1="); print_location(st, SIG_REGS(sc).u_regs[CON__G1]);
st->print("G2="); print_location(st, SIG_REGS(sc).u_regs[CON__G2]);
st->print("G3="); print_location(st, SIG_REGS(sc).u_regs[CON__G3]);
st->print("G4="); print_location(st, SIG_REGS(sc).u_regs[CON__G4]);
st->print("G5="); print_location(st, SIG_REGS(sc).u_regs[CON__G5]);
st->print("G6="); print_location(st, SIG_REGS(sc).u_regs[CON__G6]);
st->print("G7="); print_location(st, SIG_REGS(sc).u_regs[CON__G7]);
st->cr();
st->print("O0="); print_location(st, SIG_REGS(sc).u_regs[CON__O0]);
st->print("O1="); print_location(st, SIG_REGS(sc).u_regs[CON__O1]);
st->print("O2="); print_location(st, SIG_REGS(sc).u_regs[CON__O2]);
st->print("O3="); print_location(st, SIG_REGS(sc).u_regs[CON__O3]);
st->print("O4="); print_location(st, SIG_REGS(sc).u_regs[CON__O4]);
st->print("O5="); print_location(st, SIG_REGS(sc).u_regs[CON__O5]);
st->print("O6="); print_location(st, SIG_REGS(sc).u_regs[CON__O6]);
st->print("O7="); print_location(st, SIG_REGS(sc).u_regs[CON__O7]);
st->cr();
st->print("L0="); print_location(st, sp[L0->sp_offset_in_saved_window()]);
st->print("L1="); print_location(st, sp[L1->sp_offset_in_saved_window()]);
st->print("L2="); print_location(st, sp[L2->sp_offset_in_saved_window()]);
st->print("L3="); print_location(st, sp[L3->sp_offset_in_saved_window()]);
st->print("L4="); print_location(st, sp[L4->sp_offset_in_saved_window()]);
st->print("L5="); print_location(st, sp[L5->sp_offset_in_saved_window()]);
st->print("L6="); print_location(st, sp[L6->sp_offset_in_saved_window()]);
st->print("L7="); print_location(st, sp[L7->sp_offset_in_saved_window()]);
st->cr();
st->print("I0="); print_location(st, sp[I0->sp_offset_in_saved_window()]);
st->print("I1="); print_location(st, sp[I1->sp_offset_in_saved_window()]);
st->print("I2="); print_location(st, sp[I2->sp_offset_in_saved_window()]);
st->print("I3="); print_location(st, sp[I3->sp_offset_in_saved_window()]);
st->print("I4="); print_location(st, sp[I4->sp_offset_in_saved_window()]);
st->print("I5="); print_location(st, sp[I5->sp_offset_in_saved_window()]);
st->print("I6="); print_location(st, sp[I6->sp_offset_in_saved_window()]);
st->print("I7="); print_location(st, sp[I7->sp_offset_in_saved_window()]);
st->cr();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2009, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -718,11 +718,6 @@ void os::print_context(outputStream *st, void *context) {
ucontext_t *uc = (ucontext_t*)context;
st->print_cr("Registers:");
// this is horrendously verbose but the layout of the registers in the
// context does not match how we defined our abstract Register set, so
// we can't just iterate through the gregs area
#ifdef AMD64
st->print( "RAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RAX]);
st->print(", RBX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RBX]);
@ -745,68 +740,11 @@ void os::print_context(outputStream *st, void *context) {
st->print(", R15=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R15]);
st->cr();
st->print( "RIP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RIP]);
st->print(", EFL=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EFL]);
st->print(", EFLAGS=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EFL]);
st->print(", CSGSFS=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_CSGSFS]);
st->print(", ERR=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_ERR]);
st->cr();
st->print(" TRAPNO=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_TRAPNO]);
st->cr();
st->cr();
st->print_cr("Register to memory mapping:");
st->cr();
// this is only for the "general purpose" registers
st->print_cr("RAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RAX]);
print_location(st, uc->uc_mcontext.gregs[REG_RAX]);
st->cr();
st->print_cr("RBX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RBX]);
print_location(st, uc->uc_mcontext.gregs[REG_RBX]);
st->cr();
st->print_cr("RCX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RCX]);
print_location(st, uc->uc_mcontext.gregs[REG_RCX]);
st->cr();
st->print_cr("RDX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RDX]);
print_location(st, uc->uc_mcontext.gregs[REG_RDX]);
st->cr();
st->print_cr("RSP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RSP]);
print_location(st, uc->uc_mcontext.gregs[REG_RSP]);
st->cr();
st->print_cr("RBP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RBP]);
print_location(st, uc->uc_mcontext.gregs[REG_RBP]);
st->cr();
st->print_cr("RSI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RSI]);
print_location(st, uc->uc_mcontext.gregs[REG_RSI]);
st->cr();
st->print_cr("RDI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RDI]);
print_location(st, uc->uc_mcontext.gregs[REG_RDI]);
st->cr();
st->print_cr("R8 =" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R8]);
print_location(st, uc->uc_mcontext.gregs[REG_R8]);
st->cr();
st->print_cr("R9 =" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R9]);
print_location(st, uc->uc_mcontext.gregs[REG_R9]);
st->cr();
st->print_cr("R10=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R10]);
print_location(st, uc->uc_mcontext.gregs[REG_R10]);
st->cr();
st->print_cr("R11=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R11]);
print_location(st, uc->uc_mcontext.gregs[REG_R11]);
st->cr();
st->print_cr("R12=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R12]);
print_location(st, uc->uc_mcontext.gregs[REG_R12]);
st->cr();
st->print_cr("R13=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R13]);
print_location(st, uc->uc_mcontext.gregs[REG_R13]);
st->cr();
st->print_cr("R14=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R14]);
print_location(st, uc->uc_mcontext.gregs[REG_R14]);
st->cr();
st->print_cr("R15=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R15]);
print_location(st, uc->uc_mcontext.gregs[REG_R15]);
#else
st->print( "EAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EAX]);
st->print(", EBX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EBX]);
@ -819,41 +757,8 @@ void os::print_context(outputStream *st, void *context) {
st->print(", EDI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EDI]);
st->cr();
st->print( "EIP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EIP]);
st->print(", CR2=" INTPTR_FORMAT, uc->uc_mcontext.cr2);
st->print(", EFLAGS=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EFL]);
st->cr();
st->cr();
st->print_cr("Register to memory mapping:");
st->cr();
// this is only for the "general purpose" registers
st->print_cr("EAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EAX]);
print_location(st, uc->uc_mcontext.gregs[REG_EAX]);
st->cr();
st->print_cr("EBX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EBX]);
print_location(st, uc->uc_mcontext.gregs[REG_EBX]);
st->cr();
st->print_cr("ECX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_ECX]);
print_location(st, uc->uc_mcontext.gregs[REG_ECX]);
st->cr();
st->print_cr("EDX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EDX]);
print_location(st, uc->uc_mcontext.gregs[REG_EDX]);
st->cr();
st->print_cr("ESP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_ESP]);
print_location(st, uc->uc_mcontext.gregs[REG_ESP]);
st->cr();
st->print_cr("EBP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EBP]);
print_location(st, uc->uc_mcontext.gregs[REG_EBP]);
st->cr();
st->print_cr("ESI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_ESI]);
print_location(st, uc->uc_mcontext.gregs[REG_ESI]);
st->cr();
st->print_cr("EDI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EDI]);
print_location(st, uc->uc_mcontext.gregs[REG_EDI]);
st->print(", CR2=" INTPTR_FORMAT, uc->uc_mcontext.cr2);
#endif // AMD64
st->cr();
st->cr();
@ -868,7 +773,52 @@ void os::print_context(outputStream *st, void *context) {
// this at the end, and hope for the best.
address pc = os::Linux::ucontext_get_pc(uc);
st->print_cr("Instructions: (pc=" PTR_FORMAT ")", pc);
print_hex_dump(st, pc - 16, pc + 16, sizeof(char));
print_hex_dump(st, pc - 32, pc + 32, sizeof(char));
}
void os::print_register_info(outputStream *st, void *context) {
if (context == NULL) return;
ucontext_t *uc = (ucontext_t*)context;
st->print_cr("Register to memory mapping:");
st->cr();
// this is horrendously verbose but the layout of the registers in the
// context does not match how we defined our abstract Register set, so
// we can't just iterate through the gregs area
// this is only for the "general purpose" registers
#ifdef AMD64
st->print("RAX="); print_location(st, uc->uc_mcontext.gregs[REG_RAX]);
st->print("RBX="); print_location(st, uc->uc_mcontext.gregs[REG_RBX]);
st->print("RCX="); print_location(st, uc->uc_mcontext.gregs[REG_RCX]);
st->print("RDX="); print_location(st, uc->uc_mcontext.gregs[REG_RDX]);
st->print("RSP="); print_location(st, uc->uc_mcontext.gregs[REG_RSP]);
st->print("RBP="); print_location(st, uc->uc_mcontext.gregs[REG_RBP]);
st->print("RSI="); print_location(st, uc->uc_mcontext.gregs[REG_RSI]);
st->print("RDI="); print_location(st, uc->uc_mcontext.gregs[REG_RDI]);
st->print("R8 ="); print_location(st, uc->uc_mcontext.gregs[REG_R8]);
st->print("R9 ="); print_location(st, uc->uc_mcontext.gregs[REG_R9]);
st->print("R10="); print_location(st, uc->uc_mcontext.gregs[REG_R10]);
st->print("R11="); print_location(st, uc->uc_mcontext.gregs[REG_R11]);
st->print("R12="); print_location(st, uc->uc_mcontext.gregs[REG_R12]);
st->print("R13="); print_location(st, uc->uc_mcontext.gregs[REG_R13]);
st->print("R14="); print_location(st, uc->uc_mcontext.gregs[REG_R14]);
st->print("R15="); print_location(st, uc->uc_mcontext.gregs[REG_R15]);
#else
st->print("EAX="); print_location(st, uc->uc_mcontext.gregs[REG_EAX]);
st->print("EBX="); print_location(st, uc->uc_mcontext.gregs[REG_EBX]);
st->print("ECX="); print_location(st, uc->uc_mcontext.gregs[REG_ECX]);
st->print("EDX="); print_location(st, uc->uc_mcontext.gregs[REG_EDX]);
st->print("ESP="); print_location(st, uc->uc_mcontext.gregs[REG_ESP]);
st->print("EBP="); print_location(st, uc->uc_mcontext.gregs[REG_EBP]);
st->print("ESI="); print_location(st, uc->uc_mcontext.gregs[REG_ESI]);
st->print("EDI="); print_location(st, uc->uc_mcontext.gregs[REG_EDI]);
#endif // AMD64
st->cr();
}
void os::setup_fpu() {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2009, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -540,6 +540,11 @@ int JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid, int abort_
pc = (address) uc->uc_mcontext.gregs[REG_PC];
}
// Sometimes the register windows are not properly flushed.
if(uc->uc_mcontext.gwins != NULL) {
::handle_unflushed_register_windows(uc->uc_mcontext.gwins);
}
// unmask current signal
sigset_t newset;
sigemptyset(&newset);
@ -558,6 +563,18 @@ void os::print_context(outputStream *st, void *context) {
ucontext_t *uc = (ucontext_t*)context;
st->print_cr("Registers:");
st->print_cr(" G1=" INTPTR_FORMAT " G2=" INTPTR_FORMAT
" G3=" INTPTR_FORMAT " G4=" INTPTR_FORMAT,
uc->uc_mcontext.gregs[REG_G1],
uc->uc_mcontext.gregs[REG_G2],
uc->uc_mcontext.gregs[REG_G3],
uc->uc_mcontext.gregs[REG_G4]);
st->print_cr(" G5=" INTPTR_FORMAT " G6=" INTPTR_FORMAT
" G7=" INTPTR_FORMAT " Y=" INTPTR_FORMAT,
uc->uc_mcontext.gregs[REG_G5],
uc->uc_mcontext.gregs[REG_G6],
uc->uc_mcontext.gregs[REG_G7],
uc->uc_mcontext.gregs[REG_Y]);
st->print_cr(" O0=" INTPTR_FORMAT " O1=" INTPTR_FORMAT
" O2=" INTPTR_FORMAT " O3=" INTPTR_FORMAT,
uc->uc_mcontext.gregs[REG_O0],
@ -571,81 +588,39 @@ void os::print_context(outputStream *st, void *context) {
uc->uc_mcontext.gregs[REG_O6],
uc->uc_mcontext.gregs[REG_O7]);
st->print_cr(" G1=" INTPTR_FORMAT " G2=" INTPTR_FORMAT
" G3=" INTPTR_FORMAT " G4=" INTPTR_FORMAT,
uc->uc_mcontext.gregs[REG_G1],
uc->uc_mcontext.gregs[REG_G2],
uc->uc_mcontext.gregs[REG_G3],
uc->uc_mcontext.gregs[REG_G4]);
st->print_cr(" G5=" INTPTR_FORMAT " G6=" INTPTR_FORMAT
" G7=" INTPTR_FORMAT " Y=" INTPTR_FORMAT,
uc->uc_mcontext.gregs[REG_G5],
uc->uc_mcontext.gregs[REG_G6],
uc->uc_mcontext.gregs[REG_G7],
uc->uc_mcontext.gregs[REG_Y]);
intptr_t *sp = (intptr_t *)os::Solaris::ucontext_get_sp(uc);
st->print_cr(" L0=" INTPTR_FORMAT " L1=" INTPTR_FORMAT
" L2=" INTPTR_FORMAT " L3=" INTPTR_FORMAT,
sp[L0->sp_offset_in_saved_window()],
sp[L1->sp_offset_in_saved_window()],
sp[L2->sp_offset_in_saved_window()],
sp[L3->sp_offset_in_saved_window()]);
st->print_cr(" L4=" INTPTR_FORMAT " L5=" INTPTR_FORMAT
" L6=" INTPTR_FORMAT " L7=" INTPTR_FORMAT,
sp[L4->sp_offset_in_saved_window()],
sp[L5->sp_offset_in_saved_window()],
sp[L6->sp_offset_in_saved_window()],
sp[L7->sp_offset_in_saved_window()]);
st->print_cr(" I0=" INTPTR_FORMAT " I1=" INTPTR_FORMAT
" I2=" INTPTR_FORMAT " I3=" INTPTR_FORMAT,
sp[I0->sp_offset_in_saved_window()],
sp[I1->sp_offset_in_saved_window()],
sp[I2->sp_offset_in_saved_window()],
sp[I3->sp_offset_in_saved_window()]);
st->print_cr(" I4=" INTPTR_FORMAT " I5=" INTPTR_FORMAT
" I6=" INTPTR_FORMAT " I7=" INTPTR_FORMAT,
sp[I4->sp_offset_in_saved_window()],
sp[I5->sp_offset_in_saved_window()],
sp[I6->sp_offset_in_saved_window()],
sp[I7->sp_offset_in_saved_window()]);
st->print_cr(" PC=" INTPTR_FORMAT " nPC=" INTPTR_FORMAT,
uc->uc_mcontext.gregs[REG_PC],
uc->uc_mcontext.gregs[REG_nPC]);
st->cr();
st->cr();
st->print_cr("Register to memory mapping:");
st->cr();
// this is only for the "general purpose" registers
st->print_cr("O0=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_O0]);
print_location(st, uc->uc_mcontext.gregs[REG_O0]);
st->cr();
st->print_cr("O1=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_O1]);
print_location(st, uc->uc_mcontext.gregs[REG_O1]);
st->cr();
st->print_cr("O2=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_O2]);
print_location(st, uc->uc_mcontext.gregs[REG_O2]);
st->cr();
st->print_cr("O3=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_O3]);
print_location(st, uc->uc_mcontext.gregs[REG_O3]);
st->cr();
st->print_cr("O4=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_O4]);
print_location(st, uc->uc_mcontext.gregs[REG_O4]);
st->cr();
st->print_cr("O5=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_O5]);
print_location(st, uc->uc_mcontext.gregs[REG_O5]);
st->cr();
st->print_cr("O6=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_O6]);
print_location(st, uc->uc_mcontext.gregs[REG_O6]);
st->cr();
st->print_cr("O7=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_O7]);
print_location(st, uc->uc_mcontext.gregs[REG_O7]);
st->cr();
st->print_cr("G1=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_G1]);
print_location(st, uc->uc_mcontext.gregs[REG_G1]);
st->cr();
st->print_cr("G2=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_G2]);
print_location(st, uc->uc_mcontext.gregs[REG_G2]);
st->cr();
st->print_cr("G3=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_G3]);
print_location(st, uc->uc_mcontext.gregs[REG_G3]);
st->cr();
st->print_cr("G4=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_G4]);
print_location(st, uc->uc_mcontext.gregs[REG_G4]);
st->cr();
st->print_cr("G5=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_G5]);
print_location(st, uc->uc_mcontext.gregs[REG_G5]);
st->cr();
st->print_cr("G6=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_G6]);
print_location(st, uc->uc_mcontext.gregs[REG_G6]);
st->cr();
st->print_cr("G7=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_G7]);
print_location(st, uc->uc_mcontext.gregs[REG_G7]);
st->cr();
st->cr();
intptr_t *sp = (intptr_t *)os::Solaris::ucontext_get_sp(uc);
st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", sp);
print_hex_dump(st, (address)sp, (address)(sp + 32), sizeof(intptr_t));
st->cr();
@ -656,7 +631,57 @@ void os::print_context(outputStream *st, void *context) {
ExtendedPC epc = os::Solaris::ucontext_get_ExtendedPC(uc);
address pc = epc.pc();
st->print_cr("Instructions: (pc=" PTR_FORMAT ")", pc);
print_hex_dump(st, pc - 16, pc + 16, sizeof(char));
print_hex_dump(st, pc - 32, pc + 32, sizeof(char));
}
void os::print_register_info(outputStream *st, void *context) {
if (context == NULL) return;
ucontext_t *uc = (ucontext_t*)context;
intptr_t *sp = (intptr_t *)os::Solaris::ucontext_get_sp(uc);
st->print_cr("Register to memory mapping:");
st->cr();
// this is only for the "general purpose" registers
st->print("G1="); print_location(st, uc->uc_mcontext.gregs[REG_G1]);
st->print("G2="); print_location(st, uc->uc_mcontext.gregs[REG_G2]);
st->print("G3="); print_location(st, uc->uc_mcontext.gregs[REG_G3]);
st->print("G4="); print_location(st, uc->uc_mcontext.gregs[REG_G4]);
st->print("G5="); print_location(st, uc->uc_mcontext.gregs[REG_G5]);
st->print("G6="); print_location(st, uc->uc_mcontext.gregs[REG_G6]);
st->print("G7="); print_location(st, uc->uc_mcontext.gregs[REG_G7]);
st->cr();
st->print("O0="); print_location(st, uc->uc_mcontext.gregs[REG_O0]);
st->print("O1="); print_location(st, uc->uc_mcontext.gregs[REG_O1]);
st->print("O2="); print_location(st, uc->uc_mcontext.gregs[REG_O2]);
st->print("O3="); print_location(st, uc->uc_mcontext.gregs[REG_O3]);
st->print("O4="); print_location(st, uc->uc_mcontext.gregs[REG_O4]);
st->print("O5="); print_location(st, uc->uc_mcontext.gregs[REG_O5]);
st->print("O6="); print_location(st, uc->uc_mcontext.gregs[REG_O6]);
st->print("O7="); print_location(st, uc->uc_mcontext.gregs[REG_O7]);
st->cr();
st->print("L0="); print_location(st, sp[L0->sp_offset_in_saved_window()]);
st->print("L1="); print_location(st, sp[L1->sp_offset_in_saved_window()]);
st->print("L2="); print_location(st, sp[L2->sp_offset_in_saved_window()]);
st->print("L3="); print_location(st, sp[L3->sp_offset_in_saved_window()]);
st->print("L4="); print_location(st, sp[L4->sp_offset_in_saved_window()]);
st->print("L5="); print_location(st, sp[L5->sp_offset_in_saved_window()]);
st->print("L6="); print_location(st, sp[L6->sp_offset_in_saved_window()]);
st->print("L7="); print_location(st, sp[L7->sp_offset_in_saved_window()]);
st->cr();
st->print("I0="); print_location(st, sp[I0->sp_offset_in_saved_window()]);
st->print("I1="); print_location(st, sp[I1->sp_offset_in_saved_window()]);
st->print("I2="); print_location(st, sp[I2->sp_offset_in_saved_window()]);
st->print("I3="); print_location(st, sp[I3->sp_offset_in_saved_window()]);
st->print("I4="); print_location(st, sp[I4->sp_offset_in_saved_window()]);
st->print("I5="); print_location(st, sp[I5->sp_offset_in_saved_window()]);
st->print("I6="); print_location(st, sp[I6->sp_offset_in_saved_window()]);
st->print("I7="); print_location(st, sp[I7->sp_offset_in_saved_window()]);
st->cr();
}
void os::Solaris::init_thread_fpu_state(void) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2006, 2009, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -65,10 +65,6 @@ int VM_Version::platform_features(int features) {
// getisax(2), SI_ARCHITECTURE_32, and SI_ARCHITECTURE_64 are
// supported on Solaris 10 and later.
if (os::Solaris::supports_getisax()) {
#ifndef PRODUCT
if (PrintMiscellaneous && Verbose)
tty->print_cr("getisax(2) supported.");
#endif
// Check 32-bit architecture.
do_sysinfo(SI_ARCHITECTURE_32, "sparc", &features, v8_instructions_m);
@ -81,6 +77,11 @@ int VM_Version::platform_features(int features) {
uint_t avn = os::Solaris::getisax(&av, 1);
assert(avn == 1, "should only return one av");
#ifndef PRODUCT
if (PrintMiscellaneous && Verbose)
tty->print_cr("getisax(2) returned: " PTR32_FORMAT, av);
#endif
if (av & AV_SPARC_MUL32) features |= hardware_mul32_m;
if (av & AV_SPARC_DIV32) features |= hardware_div32_m;
if (av & AV_SPARC_FSMULD) features |= hardware_fsmuld_m;
@ -88,11 +89,22 @@ int VM_Version::platform_features(int features) {
if (av & AV_SPARC_POPC) features |= hardware_popc_m;
if (av & AV_SPARC_VIS) features |= vis1_instructions_m;
if (av & AV_SPARC_VIS2) features |= vis2_instructions_m;
// Next values are not defined before Solaris 10
// but Solaris 8 is used for jdk6 update builds.
#ifndef AV_SPARC_ASI_BLK_INIT
#define AV_SPARC_ASI_BLK_INIT 0x0080 /* ASI_BLK_INIT_xxx ASI */
#endif
#ifndef AV_SPARC_FMAF
#define AV_SPARC_FMAF 0x0100 /* Sparc64 Fused Multiply-Add */
#endif
if (av & AV_SPARC_ASI_BLK_INIT) features |= blk_init_instructions_m;
if (av & AV_SPARC_FMAF) features |= fmaf_instructions_m;
} else {
// getisax(2) failed, use the old legacy code.
#ifndef PRODUCT
if (PrintMiscellaneous && Verbose)
tty->print_cr("getisax(2) not supported.");
tty->print_cr("getisax(2) is not supported.");
#endif
char tmp;

View File

@ -719,11 +719,6 @@ void os::print_context(outputStream *st, void *context) {
ucontext_t *uc = (ucontext_t*)context;
st->print_cr("Registers:");
// this is horrendously verbose but the layout of the registers in the
// context does not match how we defined our abstract Register set, so
// we can't just iterate through the gregs area
#ifdef AMD64
st->print( "RAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RAX]);
st->print(", RBX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RBX]);
@ -735,8 +730,8 @@ void os::print_context(outputStream *st, void *context) {
st->print(", RSI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RSI]);
st->print(", RDI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RDI]);
st->cr();
st->print( "R8=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R8]);
st->print(", R9=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R9]);
st->print( "R8 =" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R8]);
st->print(", R9 =" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R9]);
st->print(", R10=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R10]);
st->print(", R11=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R11]);
st->cr();
@ -747,63 +742,6 @@ void os::print_context(outputStream *st, void *context) {
st->cr();
st->print( "RIP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RIP]);
st->print(", RFLAGS=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RFL]);
st->cr();
st->cr();
st->print_cr("Register to memory mapping:");
st->cr();
// this is only for the "general purpose" registers
st->print_cr("RAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RAX]);
print_location(st, uc->uc_mcontext.gregs[REG_RAX]);
st->cr();
st->print_cr("RBX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RBX]);
print_location(st, uc->uc_mcontext.gregs[REG_RBX]);
st->cr();
st->print_cr("RCX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RCX]);
print_location(st, uc->uc_mcontext.gregs[REG_RCX]);
st->cr();
st->print_cr("RDX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RDX]);
print_location(st, uc->uc_mcontext.gregs[REG_RDX]);
st->cr();
st->print_cr("RSP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RSP]);
print_location(st, uc->uc_mcontext.gregs[REG_RSP]);
st->cr();
st->print_cr("RBP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RBP]);
print_location(st, uc->uc_mcontext.gregs[REG_RSP]);
st->cr();
st->print_cr("RSI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RSI]);
print_location(st, uc->uc_mcontext.gregs[REG_RSI]);
st->cr();
st->print_cr("RDI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RDI]);
print_location(st, uc->uc_mcontext.gregs[REG_RDI]);
st->cr();
st->print_cr("R8 =" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R8]);
print_location(st, uc->uc_mcontext.gregs[REG_R8]);
st->cr();
st->print_cr("R9 =" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R9]);
print_location(st, uc->uc_mcontext.gregs[REG_R9]);
st->cr();
st->print_cr("R10=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R10]);
print_location(st, uc->uc_mcontext.gregs[REG_R10]);
st->cr();
st->print_cr("R11=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R11]);
print_location(st, uc->uc_mcontext.gregs[REG_R11]);
st->cr();
st->print_cr("R12=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R12]);
print_location(st, uc->uc_mcontext.gregs[REG_R12]);
st->cr();
st->print_cr("R13=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R13]);
print_location(st, uc->uc_mcontext.gregs[REG_R13]);
st->cr();
st->print_cr("R14=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R14]);
print_location(st, uc->uc_mcontext.gregs[REG_R14]);
st->cr();
st->print_cr("R15=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R15]);
print_location(st, uc->uc_mcontext.gregs[REG_R15]);
#else
st->print( "EAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EAX]);
st->print(", EBX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EBX]);
@ -817,39 +755,6 @@ void os::print_context(outputStream *st, void *context) {
st->cr();
st->print( "EIP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EIP]);
st->print(", EFLAGS=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EFL]);
st->cr();
st->cr();
st->print_cr("Register to memory mapping:");
st->cr();
// this is only for the "general purpose" registers
st->print_cr("EAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EAX]);
print_location(st, uc->uc_mcontext.gregs[EAX]);
st->cr();
st->print_cr("EBX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EBX]);
print_location(st, uc->uc_mcontext.gregs[EBX]);
st->cr();
st->print_cr("ECX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[ECX]);
print_location(st, uc->uc_mcontext.gregs[ECX]);
st->cr();
st->print_cr("EDX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EDX]);
print_location(st, uc->uc_mcontext.gregs[EDX]);
st->cr();
st->print_cr("ESP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[UESP]);
print_location(st, uc->uc_mcontext.gregs[UESP]);
st->cr();
st->print_cr("EBP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EBP]);
print_location(st, uc->uc_mcontext.gregs[EBP]);
st->cr();
st->print_cr("ESI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[ESI]);
print_location(st, uc->uc_mcontext.gregs[ESI]);
st->cr();
st->print_cr("EDI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EDI]);
print_location(st, uc->uc_mcontext.gregs[EDI]);
#endif // AMD64
st->cr();
st->cr();
@ -865,7 +770,52 @@ void os::print_context(outputStream *st, void *context) {
ExtendedPC epc = os::Solaris::ucontext_get_ExtendedPC(uc);
address pc = epc.pc();
st->print_cr("Instructions: (pc=" PTR_FORMAT ")", pc);
print_hex_dump(st, pc - 16, pc + 16, sizeof(char));
print_hex_dump(st, pc - 32, pc + 32, sizeof(char));
}
void os::print_register_info(outputStream *st, void *context) {
if (context == NULL) return;
ucontext_t *uc = (ucontext_t*)context;
st->print_cr("Register to memory mapping:");
st->cr();
// this is horrendously verbose but the layout of the registers in the
// context does not match how we defined our abstract Register set, so
// we can't just iterate through the gregs area
// this is only for the "general purpose" registers
#ifdef AMD64
st->print("RAX="); print_location(st, uc->uc_mcontext.gregs[REG_RAX]);
st->print("RBX="); print_location(st, uc->uc_mcontext.gregs[REG_RBX]);
st->print("RCX="); print_location(st, uc->uc_mcontext.gregs[REG_RCX]);
st->print("RDX="); print_location(st, uc->uc_mcontext.gregs[REG_RDX]);
st->print("RSP="); print_location(st, uc->uc_mcontext.gregs[REG_RSP]);
st->print("RBP="); print_location(st, uc->uc_mcontext.gregs[REG_RBP]);
st->print("RSI="); print_location(st, uc->uc_mcontext.gregs[REG_RSI]);
st->print("RDI="); print_location(st, uc->uc_mcontext.gregs[REG_RDI]);
st->print("R8 ="); print_location(st, uc->uc_mcontext.gregs[REG_R8]);
st->print("R9 ="); print_location(st, uc->uc_mcontext.gregs[REG_R9]);
st->print("R10="); print_location(st, uc->uc_mcontext.gregs[REG_R10]);
st->print("R11="); print_location(st, uc->uc_mcontext.gregs[REG_R11]);
st->print("R12="); print_location(st, uc->uc_mcontext.gregs[REG_R12]);
st->print("R13="); print_location(st, uc->uc_mcontext.gregs[REG_R13]);
st->print("R14="); print_location(st, uc->uc_mcontext.gregs[REG_R14]);
st->print("R15="); print_location(st, uc->uc_mcontext.gregs[REG_R15]);
#else
st->print("EAX="); print_location(st, uc->uc_mcontext.gregs[EAX]);
st->print("EBX="); print_location(st, uc->uc_mcontext.gregs[EBX]);
st->print("ECX="); print_location(st, uc->uc_mcontext.gregs[ECX]);
st->print("EDX="); print_location(st, uc->uc_mcontext.gregs[EDX]);
st->print("ESP="); print_location(st, uc->uc_mcontext.gregs[UESP]);
st->print("EBP="); print_location(st, uc->uc_mcontext.gregs[EBP]);
st->print("ESI="); print_location(st, uc->uc_mcontext.gregs[ESI]);
st->print("EDI="); print_location(st, uc->uc_mcontext.gregs[EDI]);
#endif
st->cr();
}

View File

@ -387,8 +387,8 @@ void os::print_context(outputStream *st, void *context) {
st->print(", RSI=" INTPTR_FORMAT, uc->Rsi);
st->print(", RDI=" INTPTR_FORMAT, uc->Rdi);
st->cr();
st->print( "R8=" INTPTR_FORMAT, uc->R8);
st->print(", R9=" INTPTR_FORMAT, uc->R9);
st->print( "R8 =" INTPTR_FORMAT, uc->R8);
st->print(", R9 =" INTPTR_FORMAT, uc->R9);
st->print(", R10=" INTPTR_FORMAT, uc->R10);
st->print(", R11=" INTPTR_FORMAT, uc->R11);
st->cr();
@ -399,62 +399,6 @@ void os::print_context(outputStream *st, void *context) {
st->cr();
st->print( "RIP=" INTPTR_FORMAT, uc->Rip);
st->print(", EFLAGS=" INTPTR_FORMAT, uc->EFlags);
st->cr();
st->cr();
st->print_cr("Register to memory mapping:");
st->cr();
// this is only for the "general purpose" registers
st->print_cr("RAX=" INTPTR_FORMAT, uc->Rax);
print_location(st, uc->Rax);
st->cr();
st->print_cr("RBX=" INTPTR_FORMAT, uc->Rbx);
print_location(st, uc->Rbx);
st->cr();
st->print_cr("RCX=" INTPTR_FORMAT, uc->Rcx);
print_location(st, uc->Rcx);
st->cr();
st->print_cr("RDX=" INTPTR_FORMAT, uc->Rdx);
print_location(st, uc->Rdx);
st->cr();
st->print_cr("RSP=" INTPTR_FORMAT, uc->Rsp);
print_location(st, uc->Rsp);
st->cr();
st->print_cr("RBP=" INTPTR_FORMAT, uc->Rbp);
print_location(st, uc->Rbp);
st->cr();
st->print_cr("RSI=" INTPTR_FORMAT, uc->Rsi);
print_location(st, uc->Rsi);
st->cr();
st->print_cr("RDI=" INTPTR_FORMAT, uc->Rdi);
print_location(st, uc->Rdi);
st->cr();
st->print_cr("R8 =" INTPTR_FORMAT, uc->R8);
print_location(st, uc->R8);
st->cr();
st->print_cr("R9 =" INTPTR_FORMAT, uc->R9);
print_location(st, uc->R9);
st->cr();
st->print_cr("R10=" INTPTR_FORMAT, uc->R10);
print_location(st, uc->R10);
st->cr();
st->print_cr("R11=" INTPTR_FORMAT, uc->R11);
print_location(st, uc->R11);
st->cr();
st->print_cr("R12=" INTPTR_FORMAT, uc->R12);
print_location(st, uc->R12);
st->cr();
st->print_cr("R13=" INTPTR_FORMAT, uc->R13);
print_location(st, uc->R13);
st->cr();
st->print_cr("R14=" INTPTR_FORMAT, uc->R14);
print_location(st, uc->R14);
st->cr();
st->print_cr("R15=" INTPTR_FORMAT, uc->R15);
print_location(st, uc->R15);
#else
st->print( "EAX=" INTPTR_FORMAT, uc->Eax);
st->print(", EBX=" INTPTR_FORMAT, uc->Ebx);
@ -468,38 +412,6 @@ void os::print_context(outputStream *st, void *context) {
st->cr();
st->print( "EIP=" INTPTR_FORMAT, uc->Eip);
st->print(", EFLAGS=" INTPTR_FORMAT, uc->EFlags);
st->cr();
st->cr();
st->print_cr("Register to memory mapping:");
st->cr();
// this is only for the "general purpose" registers
st->print_cr("EAX=" INTPTR_FORMAT, uc->Eax);
print_location(st, uc->Eax);
st->cr();
st->print_cr("EBX=" INTPTR_FORMAT, uc->Ebx);
print_location(st, uc->Ebx);
st->cr();
st->print_cr("ECX=" INTPTR_FORMAT, uc->Ecx);
print_location(st, uc->Ecx);
st->cr();
st->print_cr("EDX=" INTPTR_FORMAT, uc->Edx);
print_location(st, uc->Edx);
st->cr();
st->print_cr("ESP=" INTPTR_FORMAT, uc->Esp);
print_location(st, uc->Esp);
st->cr();
st->print_cr("EBP=" INTPTR_FORMAT, uc->Ebp);
print_location(st, uc->Ebp);
st->cr();
st->print_cr("ESI=" INTPTR_FORMAT, uc->Esi);
print_location(st, uc->Esi);
st->cr();
st->print_cr("EDI=" INTPTR_FORMAT, uc->Edi);
print_location(st, uc->Edi);
#endif // AMD64
st->cr();
st->cr();
@ -514,7 +426,49 @@ void os::print_context(outputStream *st, void *context) {
// this at the end, and hope for the best.
address pc = (address)uc->REG_PC;
st->print_cr("Instructions: (pc=" PTR_FORMAT ")", pc);
print_hex_dump(st, pc - 16, pc + 16, sizeof(char));
print_hex_dump(st, pc - 32, pc + 32, sizeof(char));
st->cr();
}
void os::print_register_info(outputStream *st, void *context) {
if (context == NULL) return;
CONTEXT* uc = (CONTEXT*)context;
st->print_cr("Register to memory mapping:");
st->cr();
// this is only for the "general purpose" registers
#ifdef AMD64
st->print("RAX="); print_location(st, uc->Rax);
st->print("RBX="); print_location(st, uc->Rbx);
st->print("RCX="); print_location(st, uc->Rcx);
st->print("RDX="); print_location(st, uc->Rdx);
st->print("RSP="); print_location(st, uc->Rsp);
st->print("RBP="); print_location(st, uc->Rbp);
st->print("RSI="); print_location(st, uc->Rsi);
st->print("RDI="); print_location(st, uc->Rdi);
st->print("R8 ="); print_location(st, uc->R8);
st->print("R9 ="); print_location(st, uc->R9);
st->print("R10="); print_location(st, uc->R10);
st->print("R11="); print_location(st, uc->R11);
st->print("R12="); print_location(st, uc->R12);
st->print("R13="); print_location(st, uc->R13);
st->print("R14="); print_location(st, uc->R14);
st->print("R15="); print_location(st, uc->R15);
#else
st->print("EAX="); print_location(st, uc->Eax);
st->print("EBX="); print_location(st, uc->Ebx);
st->print("ECX="); print_location(st, uc->Ecx);
st->print("EDX="); print_location(st, uc->Edx);
st->print("ESP="); print_location(st, uc->Esp);
st->print("EBP="); print_location(st, uc->Ebp);
st->print("ESI="); print_location(st, uc->Esi);
st->print("EDI="); print_location(st, uc->Edi);
#endif
st->cr();
}

View File

@ -178,15 +178,11 @@ class Compilation: public StackObj {
return (int) NMethodSizeLimit; // default 256K or 512K
#else
// conditional branches on PPC are restricted to 16 bit signed
return MAX2((unsigned int)NMethodSizeLimit,32*K);
return MIN2((unsigned int)NMethodSizeLimit,32*K);
#endif
}
static int desired_max_constant_size() {
#ifndef PPC
return (int) NMethodSizeLimit / 10; // about 25K
#else
return (MAX2((unsigned int)NMethodSizeLimit, 32*K)) / 10;
#endif
return desired_max_code_buffer_size() / 10;
}
static void setup_code_buffer(CodeBuffer* cb, int call_stub_estimate);

View File

@ -321,7 +321,7 @@ class UseCountComputer: public ValueVisitor, BlockClosure {
void visit(Value* n) {
// Local instructions and Phis for expression stack values at the
// start of basic blocks are not added to the instruction list
if (!(*n)->is_linked()&& (*n)->can_be_linked()) {
if (!(*n)->is_linked() && (*n)->can_be_linked()) {
assert(false, "a node was not appended to the graph");
Compilation::current()->bailout("a node was not appended to the graph");
}

View File

@ -415,28 +415,26 @@ bool Constant::is_equal(Value v) const {
return false;
}
BlockBegin* Constant::compare(Instruction::Condition cond, Value right,
BlockBegin* true_sux, BlockBegin* false_sux) {
Constant::CompareResult Constant::compare(Instruction::Condition cond, Value right) const {
Constant* rc = right->as_Constant();
// other is not a constant
if (rc == NULL) return NULL;
if (rc == NULL) return not_comparable;
ValueType* lt = type();
ValueType* rt = rc->type();
// different types
if (lt->base() != rt->base()) return NULL;
if (lt->base() != rt->base()) return not_comparable;
switch (lt->tag()) {
case intTag: {
int x = lt->as_IntConstant()->value();
int y = rt->as_IntConstant()->value();
switch (cond) {
case If::eql: return x == y ? true_sux : false_sux;
case If::neq: return x != y ? true_sux : false_sux;
case If::lss: return x < y ? true_sux : false_sux;
case If::leq: return x <= y ? true_sux : false_sux;
case If::gtr: return x > y ? true_sux : false_sux;
case If::geq: return x >= y ? true_sux : false_sux;
case If::eql: return x == y ? cond_true : cond_false;
case If::neq: return x != y ? cond_true : cond_false;
case If::lss: return x < y ? cond_true : cond_false;
case If::leq: return x <= y ? cond_true : cond_false;
case If::gtr: return x > y ? cond_true : cond_false;
case If::geq: return x >= y ? cond_true : cond_false;
}
break;
}
@ -444,12 +442,12 @@ BlockBegin* Constant::compare(Instruction::Condition cond, Value right,
jlong x = lt->as_LongConstant()->value();
jlong y = rt->as_LongConstant()->value();
switch (cond) {
case If::eql: return x == y ? true_sux : false_sux;
case If::neq: return x != y ? true_sux : false_sux;
case If::lss: return x < y ? true_sux : false_sux;
case If::leq: return x <= y ? true_sux : false_sux;
case If::gtr: return x > y ? true_sux : false_sux;
case If::geq: return x >= y ? true_sux : false_sux;
case If::eql: return x == y ? cond_true : cond_false;
case If::neq: return x != y ? cond_true : cond_false;
case If::lss: return x < y ? cond_true : cond_false;
case If::leq: return x <= y ? cond_true : cond_false;
case If::gtr: return x > y ? cond_true : cond_false;
case If::geq: return x >= y ? cond_true : cond_false;
}
break;
}
@ -459,14 +457,14 @@ BlockBegin* Constant::compare(Instruction::Condition cond, Value right,
assert(xvalue != NULL && yvalue != NULL, "not constants");
if (xvalue->is_loaded() && yvalue->is_loaded()) {
switch (cond) {
case If::eql: return xvalue == yvalue ? true_sux : false_sux;
case If::neq: return xvalue != yvalue ? true_sux : false_sux;
case If::eql: return xvalue == yvalue ? cond_true : cond_false;
case If::neq: return xvalue != yvalue ? cond_true : cond_false;
}
}
break;
}
}
return NULL;
return not_comparable;
}

View File

@ -443,7 +443,7 @@ class Instruction: public CompilationResourceObj {
// generic
virtual Instruction* as_Instruction() { return this; } // to satisfy HASHING1 macro
virtual Phi* as_Phi() { return NULL; }
virtual Phi* as_Phi() { return NULL; }
virtual Local* as_Local() { return NULL; }
virtual Constant* as_Constant() { return NULL; }
virtual AccessField* as_AccessField() { return NULL; }
@ -650,8 +650,24 @@ LEAF(Constant, Instruction)
virtual intx hash() const;
virtual bool is_equal(Value v) const;
virtual BlockBegin* compare(Instruction::Condition condition, Value right,
BlockBegin* true_sux, BlockBegin* false_sux);
enum CompareResult { not_comparable = -1, cond_false, cond_true };
virtual CompareResult compare(Instruction::Condition condition, Value right) const;
BlockBegin* compare(Instruction::Condition cond, Value right,
BlockBegin* true_sux, BlockBegin* false_sux) const {
switch (compare(cond, right)) {
case not_comparable:
return NULL;
case cond_false:
return false_sux;
case cond_true:
return true_sux;
default:
ShouldNotReachHere();
return NULL;
}
}
};

View File

@ -1350,7 +1350,6 @@ void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_Opr
addr = ptr;
}
assert(addr->is_register(), "must be a register at this point");
assert(addr->type() == T_OBJECT, "addr should point to an object");
LIR_Opr xor_res = new_pointer_register();
LIR_Opr xor_shift_res = new_pointer_register();

View File

@ -38,18 +38,20 @@ class CE_Eliminator: public BlockClosure {
private:
IR* _hir;
int _cee_count; // the number of CEs successfully eliminated
int _ifop_count; // the number of IfOps successfully simplified
int _has_substitution;
public:
CE_Eliminator(IR* hir) : _cee_count(0), _hir(hir) {
CE_Eliminator(IR* hir) : _cee_count(0), _ifop_count(0), _hir(hir) {
_has_substitution = false;
_hir->iterate_preorder(this);
if (_has_substitution) {
// substituted some phis so resolve the substitution
// substituted some ifops/phis, so resolve the substitution
SubstitutionResolver sr(_hir);
}
}
int cee_count() const { return _cee_count; }
int ifop_count() const { return _ifop_count; }
void adjust_exception_edges(BlockBegin* block, BlockBegin* sux) {
int e = sux->number_of_exception_handlers();
@ -68,156 +70,214 @@ class CE_Eliminator: public BlockClosure {
}
}
virtual void block_do(BlockBegin* block) {
// 1) find conditional expression
// check if block ends with an If
If* if_ = block->end()->as_If();
if (if_ == NULL) return;
virtual void block_do(BlockBegin* block);
// check if If works on int or object types
// (we cannot handle If's working on long, float or doubles yet,
// since IfOp doesn't support them - these If's show up if cmp
// operations followed by If's are eliminated)
ValueType* if_type = if_->x()->type();
if (!if_type->is_int() && !if_type->is_object()) return;
BlockBegin* t_block = if_->tsux();
BlockBegin* f_block = if_->fsux();
Instruction* t_cur = t_block->next();
Instruction* f_cur = f_block->next();
// one Constant may be present between BlockBegin and BlockEnd
Value t_const = NULL;
Value f_const = NULL;
if (t_cur->as_Constant() != NULL && !t_cur->can_trap()) {
t_const = t_cur;
t_cur = t_cur->next();
}
if (f_cur->as_Constant() != NULL && !f_cur->can_trap()) {
f_const = f_cur;
f_cur = f_cur->next();
}
// check if both branches end with a goto
Goto* t_goto = t_cur->as_Goto();
if (t_goto == NULL) return;
Goto* f_goto = f_cur->as_Goto();
if (f_goto == NULL) return;
// check if both gotos merge into the same block
BlockBegin* sux = t_goto->default_sux();
if (sux != f_goto->default_sux()) return;
// check if at least one word was pushed on sux_state
ValueStack* sux_state = sux->state();
if (sux_state->stack_size() <= if_->state()->stack_size()) return;
// check if phi function is present at end of successor stack and that
// only this phi was pushed on the stack
Value sux_phi = sux_state->stack_at(if_->state()->stack_size());
if (sux_phi == NULL || sux_phi->as_Phi() == NULL || sux_phi->as_Phi()->block() != sux) return;
if (sux_phi->type()->size() != sux_state->stack_size() - if_->state()->stack_size()) return;
// get the values that were pushed in the true- and false-branch
Value t_value = t_goto->state()->stack_at(if_->state()->stack_size());
Value f_value = f_goto->state()->stack_at(if_->state()->stack_size());
// backend does not support floats
assert(t_value->type()->base() == f_value->type()->base(), "incompatible types");
if (t_value->type()->is_float_kind()) return;
// check that successor has no other phi functions but sux_phi
// this can happen when t_block or f_block contained additonal stores to local variables
// that are no longer represented by explicit instructions
for_each_phi_fun(sux, phi,
if (phi != sux_phi) return;
);
// true and false blocks can't have phis
for_each_phi_fun(t_block, phi, return; );
for_each_phi_fun(f_block, phi, return; );
// 2) substitute conditional expression
// with an IfOp followed by a Goto
// cut if_ away and get node before
Instruction* cur_end = if_->prev(block);
// append constants of true- and false-block if necessary
// clone constants because original block must not be destroyed
assert((t_value != f_const && f_value != t_const) || t_const == f_const, "mismatch");
if (t_value == t_const) {
t_value = new Constant(t_const->type());
NOT_PRODUCT(t_value->set_printable_bci(if_->printable_bci()));
cur_end = cur_end->set_next(t_value);
}
if (f_value == f_const) {
f_value = new Constant(f_const->type());
NOT_PRODUCT(f_value->set_printable_bci(if_->printable_bci()));
cur_end = cur_end->set_next(f_value);
}
// it is very unlikely that the condition can be statically decided
// (this was checked previously by the Canonicalizer), so always
// append IfOp
Value result = new IfOp(if_->x(), if_->cond(), if_->y(), t_value, f_value);
NOT_PRODUCT(result->set_printable_bci(if_->printable_bci()));
cur_end = cur_end->set_next(result);
// append Goto to successor
ValueStack* state_before = if_->is_safepoint() ? if_->state_before() : NULL;
Goto* goto_ = new Goto(sux, state_before, if_->is_safepoint() || t_goto->is_safepoint() || f_goto->is_safepoint());
// prepare state for Goto
ValueStack* goto_state = if_->state();
while (sux_state->scope() != goto_state->scope()) {
goto_state = goto_state->caller_state();
assert(goto_state != NULL, "states do not match up");
}
goto_state = goto_state->copy(ValueStack::StateAfter, goto_state->bci());
goto_state->push(result->type(), result);
assert(goto_state->is_same(sux_state), "states must match now");
goto_->set_state(goto_state);
cur_end = cur_end->set_next(goto_, goto_state->bci());
// Adjust control flow graph
BlockBegin::disconnect_edge(block, t_block);
BlockBegin::disconnect_edge(block, f_block);
if (t_block->number_of_preds() == 0) {
BlockBegin::disconnect_edge(t_block, sux);
}
adjust_exception_edges(block, t_block);
if (f_block->number_of_preds() == 0) {
BlockBegin::disconnect_edge(f_block, sux);
}
adjust_exception_edges(block, f_block);
// update block end
block->set_end(goto_);
// substitute the phi if possible
if (sux_phi->as_Phi()->operand_count() == 1) {
assert(sux_phi->as_Phi()->operand_at(0) == result, "screwed up phi");
sux_phi->set_subst(result);
_has_substitution = true;
}
// 3) successfully eliminated a conditional expression
_cee_count++;
if (PrintCEE) {
tty->print_cr("%d. CEE in B%d (B%d B%d)", cee_count(), block->block_id(), t_block->block_id(), f_block->block_id());
}
_hir->verify();
}
private:
Value make_ifop(Value x, Instruction::Condition cond, Value y, Value tval, Value fval);
};
void CE_Eliminator::block_do(BlockBegin* block) {
// 1) find conditional expression
// check if block ends with an If
If* if_ = block->end()->as_If();
if (if_ == NULL) return;
// check if If works on int or object types
// (we cannot handle If's working on long, float or doubles yet,
// since IfOp doesn't support them - these If's show up if cmp
// operations followed by If's are eliminated)
ValueType* if_type = if_->x()->type();
if (!if_type->is_int() && !if_type->is_object()) return;
BlockBegin* t_block = if_->tsux();
BlockBegin* f_block = if_->fsux();
Instruction* t_cur = t_block->next();
Instruction* f_cur = f_block->next();
// one Constant may be present between BlockBegin and BlockEnd
Value t_const = NULL;
Value f_const = NULL;
if (t_cur->as_Constant() != NULL && !t_cur->can_trap()) {
t_const = t_cur;
t_cur = t_cur->next();
}
if (f_cur->as_Constant() != NULL && !f_cur->can_trap()) {
f_const = f_cur;
f_cur = f_cur->next();
}
// check if both branches end with a goto
Goto* t_goto = t_cur->as_Goto();
if (t_goto == NULL) return;
Goto* f_goto = f_cur->as_Goto();
if (f_goto == NULL) return;
// check if both gotos merge into the same block
BlockBegin* sux = t_goto->default_sux();
if (sux != f_goto->default_sux()) return;
// check if at least one word was pushed on sux_state
ValueStack* sux_state = sux->state();
if (sux_state->stack_size() <= if_->state()->stack_size()) return;
// check if phi function is present at end of successor stack and that
// only this phi was pushed on the stack
Value sux_phi = sux_state->stack_at(if_->state()->stack_size());
if (sux_phi == NULL || sux_phi->as_Phi() == NULL || sux_phi->as_Phi()->block() != sux) return;
if (sux_phi->type()->size() != sux_state->stack_size() - if_->state()->stack_size()) return;
// get the values that were pushed in the true- and false-branch
Value t_value = t_goto->state()->stack_at(if_->state()->stack_size());
Value f_value = f_goto->state()->stack_at(if_->state()->stack_size());
// backend does not support floats
assert(t_value->type()->base() == f_value->type()->base(), "incompatible types");
if (t_value->type()->is_float_kind()) return;
// check that successor has no other phi functions but sux_phi
// this can happen when t_block or f_block contained additonal stores to local variables
// that are no longer represented by explicit instructions
for_each_phi_fun(sux, phi,
if (phi != sux_phi) return;
);
// true and false blocks can't have phis
for_each_phi_fun(t_block, phi, return; );
for_each_phi_fun(f_block, phi, return; );
// 2) substitute conditional expression
// with an IfOp followed by a Goto
// cut if_ away and get node before
Instruction* cur_end = if_->prev(block);
// append constants of true- and false-block if necessary
// clone constants because original block must not be destroyed
assert((t_value != f_const && f_value != t_const) || t_const == f_const, "mismatch");
if (t_value == t_const) {
t_value = new Constant(t_const->type());
NOT_PRODUCT(t_value->set_printable_bci(if_->printable_bci()));
cur_end = cur_end->set_next(t_value);
}
if (f_value == f_const) {
f_value = new Constant(f_const->type());
NOT_PRODUCT(f_value->set_printable_bci(if_->printable_bci()));
cur_end = cur_end->set_next(f_value);
}
Value result = make_ifop(if_->x(), if_->cond(), if_->y(), t_value, f_value);
assert(result != NULL, "make_ifop must return a non-null instruction");
if (!result->is_linked() && result->can_be_linked()) {
NOT_PRODUCT(result->set_printable_bci(if_->printable_bci()));
cur_end = cur_end->set_next(result);
}
// append Goto to successor
ValueStack* state_before = if_->is_safepoint() ? if_->state_before() : NULL;
Goto* goto_ = new Goto(sux, state_before, if_->is_safepoint() || t_goto->is_safepoint() || f_goto->is_safepoint());
// prepare state for Goto
ValueStack* goto_state = if_->state();
while (sux_state->scope() != goto_state->scope()) {
goto_state = goto_state->caller_state();
assert(goto_state != NULL, "states do not match up");
}
goto_state = goto_state->copy(ValueStack::StateAfter, goto_state->bci());
goto_state->push(result->type(), result);
assert(goto_state->is_same(sux_state), "states must match now");
goto_->set_state(goto_state);
cur_end = cur_end->set_next(goto_, goto_state->bci());
// Adjust control flow graph
BlockBegin::disconnect_edge(block, t_block);
BlockBegin::disconnect_edge(block, f_block);
if (t_block->number_of_preds() == 0) {
BlockBegin::disconnect_edge(t_block, sux);
}
adjust_exception_edges(block, t_block);
if (f_block->number_of_preds() == 0) {
BlockBegin::disconnect_edge(f_block, sux);
}
adjust_exception_edges(block, f_block);
// update block end
block->set_end(goto_);
// substitute the phi if possible
if (sux_phi->as_Phi()->operand_count() == 1) {
assert(sux_phi->as_Phi()->operand_at(0) == result, "screwed up phi");
sux_phi->set_subst(result);
_has_substitution = true;
}
// 3) successfully eliminated a conditional expression
_cee_count++;
if (PrintCEE) {
tty->print_cr("%d. CEE in B%d (B%d B%d)", cee_count(), block->block_id(), t_block->block_id(), f_block->block_id());
tty->print_cr("%d. IfOp in B%d", ifop_count(), block->block_id());
}
_hir->verify();
}
Value CE_Eliminator::make_ifop(Value x, Instruction::Condition cond, Value y, Value tval, Value fval) {
if (!OptimizeIfOps) {
return new IfOp(x, cond, y, tval, fval);
}
tval = tval->subst();
fval = fval->subst();
if (tval == fval) {
_ifop_count++;
return tval;
}
x = x->subst();
y = y->subst();
Constant* y_const = y->as_Constant();
if (y_const != NULL) {
IfOp* x_ifop = x->as_IfOp();
if (x_ifop != NULL) { // x is an ifop, y is a constant
Constant* x_tval_const = x_ifop->tval()->subst()->as_Constant();
Constant* x_fval_const = x_ifop->fval()->subst()->as_Constant();
if (x_tval_const != NULL && x_fval_const != NULL) {
Instruction::Condition x_ifop_cond = x_ifop->cond();
Constant::CompareResult t_compare_res = x_tval_const->compare(cond, y_const);
Constant::CompareResult f_compare_res = x_fval_const->compare(cond, y_const);
guarantee(t_compare_res != Constant::not_comparable && f_compare_res != Constant::not_comparable, "incomparable constants in IfOp");
Value new_tval = t_compare_res == Constant::cond_true ? tval : fval;
Value new_fval = f_compare_res == Constant::cond_true ? tval : fval;
_ifop_count++;
if (new_tval == new_fval) {
return new_tval;
} else {
return new IfOp(x_ifop->x(), x_ifop_cond, x_ifop->y(), new_tval, new_fval);
}
}
} else {
Constant* x_const = x->as_Constant();
if (x_const != NULL) { // x and y are constants
Constant::CompareResult x_compare_res = x_const->compare(cond, y_const);
guarantee(x_compare_res != Constant::not_comparable, "incomparable constants in IfOp");
_ifop_count++;
return x_compare_res == Constant::cond_true ? tval : fval;
}
}
}
return new IfOp(x, cond, y, tval, fval);
}
void Optimizer::eliminate_conditional_expressions() {
// find conditional expressions & replace them with IfOps
CE_Eliminator ce(ir());
}
class BlockMerger: public BlockClosure {
private:
IR* _hir;

View File

@ -107,7 +107,6 @@ static void deopt_caller() {
RegisterMap reg_map(thread, false);
frame runtime_frame = thread->last_frame();
frame caller_frame = runtime_frame.sender(&reg_map);
// bypass VM_DeoptimizeFrame and deoptimize the frame directly
Deoptimization::deoptimize_frame(thread, caller_frame.id());
assert(caller_is_deopted(), "Must be deoptimized");
}
@ -368,8 +367,7 @@ JRT_BLOCK_ENTRY(address, Runtime1::counter_overflow(JavaThread* thread, int bci,
if (osr_nm != NULL) {
RegisterMap map(thread, false);
frame fr = thread->last_frame().sender(&map);
VM_DeoptimizeFrame deopt(thread, fr.id());
VMThread::execute(&deopt);
Deoptimization::deoptimize_frame(thread, fr.id());
}
JRT_BLOCK_END
return NULL;
@ -441,8 +439,8 @@ JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* t
// We don't really want to deoptimize the nmethod itself since we
// can actually continue in the exception handler ourselves but I
// don't see an easy way to have the desired effect.
VM_DeoptimizeFrame deopt(thread, caller_frame.id());
VMThread::execute(&deopt);
Deoptimization::deoptimize_frame(thread, caller_frame.id());
assert(caller_is_deopted(), "Must be deoptimized");
return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
}
@ -835,8 +833,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_i
nm->make_not_entrant();
}
VM_DeoptimizeFrame deopt(thread, caller_frame.id());
VMThread::execute(&deopt);
Deoptimization::deoptimize_frame(thread, caller_frame.id());
// Return to the now deoptimized frame.
}

View File

@ -75,6 +75,9 @@
develop(bool, SelectivePhiFunctions, true, \
"create phi functions at loop headers only when necessary") \
\
develop(bool, OptimizeIfOps, true, \
"Optimize multiple IfOps") \
\
develop(bool, DoCEE, true, \
"Do Conditional Expression Elimination to simplify CFG") \
\

View File

@ -564,7 +564,7 @@ bool ciInstanceKlass::is_leaf_type() {
// This is OK, since any dependencies we decide to assert
// will be checked later under the Compile_lock.
ciInstanceKlass* ciInstanceKlass::implementor(int n) {
if (n > implementors_limit) {
if (n >= implementors_limit) {
return NULL;
}
ciInstanceKlass* impl = _implementors[n];

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -73,6 +73,12 @@ void ClassFileParser::parse_constant_pool_entries(constantPoolHandle cp, int len
unsigned int hashValues[SymbolTable::symbol_alloc_batch_size];
int names_count = 0;
// Side buffer for operands of variable-sized (InvokeDynamic) entries.
GrowableArray<int>* operands = NULL;
#ifdef ASSERT
GrowableArray<int>* indy_instructions = new GrowableArray<int>(THREAD, 10);
#endif
// parsing Index 0 is unused
for (int index = 1; index < length; index++) {
// Each of the following case guarantees one more byte in the stream
@ -141,6 +147,7 @@ void ClassFileParser::parse_constant_pool_entries(constantPoolHandle cp, int len
ShouldNotReachHere();
}
break;
case JVM_CONSTANT_InvokeDynamicTrans : // this tag appears only in old classfiles
case JVM_CONSTANT_InvokeDynamic :
{
if (!EnableInvokeDynamic ||
@ -151,10 +158,36 @@ void ClassFileParser::parse_constant_pool_entries(constantPoolHandle cp, int len
"Class file version does not support constant tag %u in class file %s"),
tag, CHECK);
}
cfs->guarantee_more(5, CHECK); // bsm_index, name_and_type_index, tag/access_flags
if (!AllowTransitionalJSR292 && tag == JVM_CONSTANT_InvokeDynamicTrans) {
classfile_parse_error(
"This JVM does not support transitional InvokeDynamic tag %u in class file %s",
tag, CHECK);
}
bool trans_no_argc = AllowTransitionalJSR292 && (tag == JVM_CONSTANT_InvokeDynamicTrans);
cfs->guarantee_more(7, CHECK); // bsm_index, nt, argc, ..., tag/access_flags
u2 bootstrap_method_index = cfs->get_u2_fast();
u2 name_and_type_index = cfs->get_u2_fast();
cp->invoke_dynamic_at_put(index, bootstrap_method_index, name_and_type_index);
int argument_count = trans_no_argc ? 0 : cfs->get_u2_fast();
cfs->guarantee_more(2*argument_count + 1, CHECK); // argv[argc]..., tag/access_flags
int argv_offset = constantPoolOopDesc::_indy_argv_offset;
int op_count = argv_offset + argument_count; // bsm, nt, argc, argv[]...
int op_base = start_operand_group(operands, op_count, CHECK);
assert(argv_offset == 3, "else adjust next 3 assignments");
operands->at_put(op_base + constantPoolOopDesc::_indy_bsm_offset, bootstrap_method_index);
operands->at_put(op_base + constantPoolOopDesc::_indy_nt_offset, name_and_type_index);
operands->at_put(op_base + constantPoolOopDesc::_indy_argc_offset, argument_count);
for (int arg_i = 0; arg_i < argument_count; arg_i++) {
int arg = cfs->get_u2_fast();
operands->at_put(op_base + constantPoolOopDesc::_indy_argv_offset + arg_i, arg);
}
cp->invoke_dynamic_at_put(index, op_base, op_count);
#ifdef ASSERT
// Record the steps just taken for later checking.
indy_instructions->append(index);
indy_instructions->append(bootstrap_method_index);
indy_instructions->append(name_and_type_index);
indy_instructions->append(argument_count);
#endif //ASSERT
}
break;
case JVM_CONSTANT_Integer :
@ -257,6 +290,23 @@ void ClassFileParser::parse_constant_pool_entries(constantPoolHandle cp, int len
oopFactory::new_symbols(cp, names_count, names, lengths, indices, hashValues, CHECK);
}
if (operands != NULL && operands->length() > 0) {
store_operand_array(operands, cp, CHECK);
}
#ifdef ASSERT
// Re-assert the indy structures, now that assertion checking can work.
for (int indy_i = 0; indy_i < indy_instructions->length(); ) {
int index = indy_instructions->at(indy_i++);
int bootstrap_method_index = indy_instructions->at(indy_i++);
int name_and_type_index = indy_instructions->at(indy_i++);
int argument_count = indy_instructions->at(indy_i++);
assert(cp->check_invoke_dynamic_at(index,
bootstrap_method_index, name_and_type_index,
argument_count),
"indy structure is OK");
}
#endif //ASSERT
// Copy _current pointer of local copy back to stream().
#ifdef ASSERT
assert(cfs0->current() == old_current, "non-exclusive use of stream()");
@ -264,6 +314,41 @@ void ClassFileParser::parse_constant_pool_entries(constantPoolHandle cp, int len
cfs0->set_current(cfs1.current());
}
int ClassFileParser::start_operand_group(GrowableArray<int>* &operands, int op_count, TRAPS) {
if (operands == NULL) {
operands = new GrowableArray<int>(THREAD, 100);
int fillp_offset = constantPoolOopDesc::_multi_operand_buffer_fill_pointer_offset;
while (operands->length() <= fillp_offset)
operands->append(0); // force op_base > 0, for an error check
DEBUG_ONLY(operands->at_put(fillp_offset, (int)badHeapWordVal));
}
int cnt_pos = operands->append(op_count);
int arg_pos = operands->length();
operands->at_grow(arg_pos + op_count - 1); // grow to include the operands
assert(operands->length() == arg_pos + op_count, "");
int op_base = cnt_pos - constantPoolOopDesc::_multi_operand_count_offset;
return op_base;
}
void ClassFileParser::store_operand_array(GrowableArray<int>* operands, constantPoolHandle cp, TRAPS) {
// Collect the buffer of operands from variable-sized entries into a permanent array.
int arraylen = operands->length();
int fillp_offset = constantPoolOopDesc::_multi_operand_buffer_fill_pointer_offset;
assert(operands->at(fillp_offset) == (int)badHeapWordVal, "value unused so far");
operands->at_put(fillp_offset, arraylen);
cp->multi_operand_buffer_grow(arraylen, CHECK);
typeArrayOop operands_oop = cp->operands();
assert(operands_oop->length() == arraylen, "");
for (int i = 0; i < arraylen; i++) {
operands_oop->int_at_put(i, operands->at(i));
}
cp->set_operands(operands_oop);
// The fill_pointer is used only by constantPoolOop::copy_entry_to and friends,
// when constant pools need to be merged. Make sure it is sane now.
assert(cp->multi_operand_buffer_fill_pointer() == arraylen, "");
}
bool inline valid_cp_range(int index, int length) { return (index > 0 && index < length); }
constantPoolHandle ClassFileParser::parse_constant_pool(TRAPS) {
@ -431,6 +516,8 @@ constantPoolHandle ClassFileParser::parse_constant_pool(TRAPS) {
ref_index, CHECK_(nullHandle));
}
break;
case JVM_CONSTANT_InvokeDynamicTrans :
ShouldNotReachHere(); // this tag does not appear in the heap
case JVM_CONSTANT_InvokeDynamic :
{
int bootstrap_method_ref_index = cp->invoke_dynamic_bootstrap_method_ref_index_at(index);
@ -438,7 +525,7 @@ constantPoolHandle ClassFileParser::parse_constant_pool(TRAPS) {
check_property((bootstrap_method_ref_index == 0 && AllowTransitionalJSR292)
||
(valid_cp_range(bootstrap_method_ref_index, length) &&
cp->tag_at(bootstrap_method_ref_index).is_method_handle()),
(cp->tag_at(bootstrap_method_ref_index).is_method_handle())),
"Invalid constant pool index %u in class file %s",
bootstrap_method_ref_index,
CHECK_(nullHandle));
@ -447,6 +534,18 @@ constantPoolHandle ClassFileParser::parse_constant_pool(TRAPS) {
"Invalid constant pool index %u in class file %s",
name_and_type_ref_index,
CHECK_(nullHandle));
int argc = cp->invoke_dynamic_argument_count_at(index);
for (int arg_i = 0; arg_i < argc; arg_i++) {
int arg = cp->invoke_dynamic_argument_index_at(index, arg_i);
check_property(valid_cp_range(arg, length) &&
cp->tag_at(arg).is_loadable_constant() ||
// temporary early forms of string and class:
cp->tag_at(arg).is_klass_index() ||
cp->tag_at(arg).is_string_index(),
"Invalid constant pool index %u in class file %s",
arg,
CHECK_(nullHandle));
}
break;
}
default:
@ -2505,18 +2604,6 @@ void ClassFileParser::java_lang_ref_Reference_fix_pre(typeArrayHandle* fields_pt
// the check for the "discovered" field should issue a warning if
// the field is not found. For 1.6 this code should be issue a
// fatal error if the "discovered" field is not found.
//
// Increment fac.nonstatic_oop_count so that the start of the
// next type of non-static oops leaves room for the fake oop.
// Do not increment next_nonstatic_oop_offset so that the
// fake oop is place after the java.lang.ref.Reference oop
// fields.
//
// Check the fields in java.lang.ref.Reference for the "discovered"
// field. If it is not present, artifically create a field for it.
// This allows this VM to run on early JDK where the field is not
// present.
//
// Increment fac.nonstatic_oop_count so that the start of the
// next type of non-static oops leaves room for the fake oop.
@ -2663,7 +2750,7 @@ void ClassFileParser::java_lang_Class_fix_post(int* next_nonstatic_oop_offset_pt
// Force MethodHandle.vmentry to be an unmanaged pointer.
// There is no way for a classfile to express this, so we must help it.
void ClassFileParser::java_dyn_MethodHandle_fix_pre(constantPoolHandle cp,
typeArrayHandle* fields_ptr,
typeArrayHandle fields,
FieldAllocationCount *fac_ptr,
TRAPS) {
// Add fake fields for java.dyn.MethodHandle instances
@ -2687,41 +2774,45 @@ void ClassFileParser::java_dyn_MethodHandle_fix_pre(constantPoolHandle cp,
THROW_MSG(vmSymbols::java_lang_VirtualMachineError(),
"missing I or J signature (for vmentry) in java.dyn.MethodHandle");
// Find vmentry field and change the signature.
bool found_vmentry = false;
const int n = (*fields_ptr)()->length();
for (int i = 0; i < n; i += instanceKlass::next_offset) {
int name_index = (*fields_ptr)->ushort_at(i + instanceKlass::name_index_offset);
int sig_index = (*fields_ptr)->ushort_at(i + instanceKlass::signature_index_offset);
int acc_flags = (*fields_ptr)->ushort_at(i + instanceKlass::access_flags_offset);
for (int i = 0; i < fields->length(); i += instanceKlass::next_offset) {
int name_index = fields->ushort_at(i + instanceKlass::name_index_offset);
int sig_index = fields->ushort_at(i + instanceKlass::signature_index_offset);
int acc_flags = fields->ushort_at(i + instanceKlass::access_flags_offset);
symbolOop f_name = cp->symbol_at(name_index);
symbolOop f_sig = cp->symbol_at(sig_index);
if (f_sig == vmSymbols::byte_signature() &&
f_name == vmSymbols::vmentry_name() &&
(acc_flags & JVM_ACC_STATIC) == 0) {
// Adjust the field type from byte to an unmanaged pointer.
assert(fac_ptr->nonstatic_byte_count > 0, "");
fac_ptr->nonstatic_byte_count -= 1;
(*fields_ptr)->ushort_at_put(i + instanceKlass::signature_index_offset, word_sig_index);
assert(wordSize == longSize || wordSize == jintSize, "ILP32 or LP64");
if (wordSize == longSize) fac_ptr->nonstatic_double_count += 1;
else fac_ptr->nonstatic_word_count += 1;
if (f_name == vmSymbols::vmentry_name() && (acc_flags & JVM_ACC_STATIC) == 0) {
if (f_sig == vmSymbols::machine_word_signature()) {
// If the signature of vmentry is already changed, we're done.
found_vmentry = true;
break;
}
else if (f_sig == vmSymbols::byte_signature()) {
// Adjust the field type from byte to an unmanaged pointer.
assert(fac_ptr->nonstatic_byte_count > 0, "");
fac_ptr->nonstatic_byte_count -= 1;
FieldAllocationType atype = (FieldAllocationType) (*fields_ptr)->ushort_at(i + instanceKlass::low_offset);
assert(atype == NONSTATIC_BYTE, "");
FieldAllocationType new_atype = (wordSize == longSize) ? NONSTATIC_DOUBLE : NONSTATIC_WORD;
(*fields_ptr)->ushort_at_put(i + instanceKlass::low_offset, new_atype);
fields->ushort_at_put(i + instanceKlass::signature_index_offset, word_sig_index);
assert(wordSize == longSize || wordSize == jintSize, "ILP32 or LP64");
if (wordSize == longSize) fac_ptr->nonstatic_double_count += 1;
else fac_ptr->nonstatic_word_count += 1;
found_vmentry = true;
break;
FieldAllocationType atype = (FieldAllocationType) fields->ushort_at(i + instanceKlass::low_offset);
assert(atype == NONSTATIC_BYTE, "");
FieldAllocationType new_atype = (wordSize == longSize) ? NONSTATIC_DOUBLE : NONSTATIC_WORD;
fields->ushort_at_put(i + instanceKlass::low_offset, new_atype);
found_vmentry = true;
break;
}
}
}
if (!found_vmentry)
THROW_MSG(vmSymbols::java_lang_VirtualMachineError(),
"missing vmentry byte field in java.dyn.MethodHandle");
}
@ -3082,7 +3173,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
// adjust the vmentry field declaration in java.dyn.MethodHandle
if (EnableMethodHandles && class_name() == vmSymbols::sun_dyn_MethodHandleImpl() && class_loader.is_null()) {
java_dyn_MethodHandle_fix_pre(cp, &fields, &fac, CHECK_(nullHandle));
java_dyn_MethodHandle_fix_pre(cp, fields, &fac, CHECK_(nullHandle));
}
// Add a fake "discovered" field if it is not present
@ -4309,20 +4400,21 @@ int ClassFileParser::verify_legal_method_signature(symbolHandle name, symbolHand
}
// Unqualified names may not contain the characters '.', ';', or '/'.
// Method names also may not contain the characters '<' or '>', unless <init> or <clinit>.
// Note that method names may not be <init> or <clinit> in this method.
// Because these names have been checked as special cases before calling this method
// in verify_legal_method_name.
bool ClassFileParser::verify_unqualified_name(char* name, unsigned int length, int type) {
// Unqualified names may not contain the characters '.', ';', '[', or '/'.
// Method names also may not contain the characters '<' or '>', unless <init>
// or <clinit>. Note that method names may not be <init> or <clinit> in this
// method. Because these names have been checked as special cases before
// calling this method in verify_legal_method_name.
bool ClassFileParser::verify_unqualified_name(
char* name, unsigned int length, int type) {
jchar ch;
for (char* p = name; p != name + length; ) {
ch = *p;
if (ch < 128) {
p++;
if (ch == '.' || ch == ';') {
return false; // do not permit '.' or ';'
if (ch == '.' || ch == ';' || ch == '[' ) {
return false; // do not permit '.', ';', or '['
}
if (type != LegalClass && ch == '/') {
return false; // do not permit '/' unless it's class name

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -56,6 +56,9 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
constantPoolHandle parse_constant_pool(TRAPS);
static int start_operand_group(GrowableArray<int>* &operands, int op_count, TRAPS);
static void store_operand_array(GrowableArray<int>* operands, constantPoolHandle cp, TRAPS);
// Interface parsing
objArrayHandle parse_interfaces(constantPoolHandle cp,
int length,
@ -151,7 +154,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
// Adjust the field allocation counts for java.dyn.MethodHandle to add
// a fake address (void*) field.
void java_dyn_MethodHandle_fix_pre(constantPoolHandle cp,
typeArrayHandle* fields_ptr,
typeArrayHandle fields,
FieldAllocationCount *fac_ptr, TRAPS);
// Format checker methods

View File

@ -0,0 +1,916 @@
/*
* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
// These classes represent the stack-map substructures described in the JVMS
// (hence the non-conforming naming scheme).
// These classes work with the types in their compressed form in-place (as they
// would appear in the classfile). No virtual methods or fields allowed.
class verification_type_info {
private:
// u1 tag
// u2 cpool_index || u2 bci (for ITEM_Object & ITEM_Uninitailized only)
address tag_addr() const { return (address)this; }
address cpool_index_addr() const { return tag_addr() + sizeof(u1); }
address bci_addr() const { return cpool_index_addr(); }
protected:
// No constructors - should be 'private', but GCC issues a warning if it is
verification_type_info() {}
verification_type_info(const verification_type_info&) {}
public:
static verification_type_info* at(address addr) {
return (verification_type_info*)addr;
}
static verification_type_info* create_at(address addr, u1 tag) {
verification_type_info* vti = (verification_type_info*)addr;
vti->set_tag(tag);
return vti;
}
static verification_type_info* create_object_at(address addr, u2 cp_idx) {
verification_type_info* vti = (verification_type_info*)addr;
vti->set_tag(ITEM_Object);
vti->set_cpool_index(cp_idx);
return vti;
}
static verification_type_info* create_uninit_at(address addr, u2 bci) {
verification_type_info* vti = (verification_type_info*)addr;
vti->set_tag(ITEM_Uninitialized);
vti->set_bci(bci);
return vti;
}
static size_t calculate_size(u1 tag) {
if (tag == ITEM_Object || tag == ITEM_Uninitialized) {
return sizeof(u1) + sizeof(u2);
} else {
return sizeof(u1);
}
}
static size_t max_size() { return sizeof(u1) + sizeof(u2); }
u1 tag() const { return *(u1*)tag_addr(); }
void set_tag(u1 tag) { *((u1*)tag_addr()) = tag; }
bool is_object() const { return tag() == ITEM_Object; }
bool is_uninitialized() const { return tag() == ITEM_Uninitialized; }
u2 cpool_index() const {
assert(is_object(), "This type has no cp_index");
return Bytes::get_Java_u2(cpool_index_addr());
}
void set_cpool_index(u2 idx) {
assert(is_object(), "This type has no cp_index");
Bytes::put_Java_u2(cpool_index_addr(), idx);
}
u2 bci() const {
assert(is_uninitialized(), "This type has no bci");
return Bytes::get_Java_u2(bci_addr());
}
void set_bci(u2 bci) {
assert(is_uninitialized(), "This type has no bci");
Bytes::put_Java_u2(bci_addr(), bci);
}
void copy_from(verification_type_info* from) {
set_tag(from->tag());
if (from->is_object()) {
set_cpool_index(from->cpool_index());
} else if (from->is_uninitialized()) {
set_bci(from->bci());
}
}
size_t size() const {
return calculate_size(tag());
}
verification_type_info* next() {
return (verification_type_info*)((address)this + size());
}
// This method is used when reading unverified data in order to ensure
// that we don't read past a particular memory limit. It returns false
// if any part of the data structure is outside the specified memory bounds.
bool verify(address start, address end) {
return ((address)this >= start &&
(address)this < end &&
(bci_addr() + sizeof(u2) <= end ||
!is_object() && !is_uninitialized()));
}
#ifdef ASSERT
void print_on(outputStream* st) {
switch (tag()) {
case ITEM_Top: st->print("Top"); break;
case ITEM_Integer: st->print("Integer"); break;
case ITEM_Float: st->print("Float"); break;
case ITEM_Double: st->print("Double"); break;
case ITEM_Long: st->print("Long"); break;
case ITEM_Null: st->print("Null"); break;
case ITEM_UninitializedThis:
st->print("UninitializedThis"); break;
case ITEM_Uninitialized:
st->print("Uninitialized[#%d]", bci()); break;
case ITEM_Object:
st->print("Object[#%d]", cpool_index()); break;
default:
assert(false, "Bad verification_type_info");
}
}
#endif
};
#define FOR_EACH_STACKMAP_FRAME_TYPE(macro, arg1, arg2) \
macro(same_frame, arg1, arg2) \
macro(same_frame_extended, arg1, arg2) \
macro(same_frame_1_stack_item_frame, arg1, arg2) \
macro(same_frame_1_stack_item_extended, arg1, arg2) \
macro(chop_frame, arg1, arg2) \
macro(append_frame, arg1, arg2) \
macro(full_frame, arg1, arg2)
#define SM_FORWARD_DECL(type, arg1, arg2) class type;
FOR_EACH_STACKMAP_FRAME_TYPE(SM_FORWARD_DECL, x, x)
#undef SM_FORWARD_DECL
class stack_map_frame {
protected:
address frame_type_addr() const { return (address)this; }
// No constructors - should be 'private', but GCC issues a warning if it is
stack_map_frame() {}
stack_map_frame(const stack_map_frame&) {}
public:
static stack_map_frame* at(address addr) {
return (stack_map_frame*)addr;
}
stack_map_frame* next() const {
return at((address)this + size());
}
u1 frame_type() const { return *(u1*)frame_type_addr(); }
void set_frame_type(u1 type) { *((u1*)frame_type_addr()) = type; }
// pseudo-virtual methods
inline size_t size() const;
inline int offset_delta() const;
inline void set_offset_delta(int offset_delta);
inline int number_of_types() const; // number of types contained in the frame
inline verification_type_info* types() const; // pointer to first type
inline bool is_valid_offset(int offset_delta) const;
// This method must be used when reading unverified data in order to ensure
// that we don't read past a particular memory limit. It returns false
// if any part of the data structure is outside the specified memory bounds.
inline bool verify(address start, address end) const;
#ifdef ASSERT
inline void print_on(outputStream* st) const;
#endif
// Create as_xxx and is_xxx methods for the subtypes
#define FRAME_TYPE_DECL(stackmap_frame_type, arg1, arg2) \
inline stackmap_frame_type* as_##stackmap_frame_type() const; \
bool is_##stackmap_frame_type() { \
return as_##stackmap_frame_type() != NULL; \
}
FOR_EACH_STACKMAP_FRAME_TYPE(FRAME_TYPE_DECL, x, x)
#undef FRAME_TYPE_DECL
};
class same_frame : public stack_map_frame {
private:
static int frame_type_to_offset_delta(u1 frame_type) {
return frame_type + 1; }
static u1 offset_delta_to_frame_type(int offset_delta) {
return (u1)(offset_delta - 1); }
public:
static bool is_frame_type(u1 tag) {
return tag < 64;
}
static same_frame* at(address addr) {
assert(is_frame_type(*addr), "Wrong frame id");
return (same_frame*)addr;
}
static same_frame* create_at(address addr, int offset_delta) {
same_frame* sm = (same_frame*)addr;
sm->set_offset_delta(offset_delta);
return sm;
}
static size_t calculate_size() { return sizeof(u1); }
size_t size() const { return calculate_size(); }
int offset_delta() const { return frame_type_to_offset_delta(frame_type()); }
void set_offset_delta(int offset_delta) {
assert(offset_delta <= 64, "Offset too large for same_frame");
set_frame_type(offset_delta_to_frame_type(offset_delta));
}
int number_of_types() const { return 0; }
verification_type_info* types() const { return NULL; }
bool is_valid_offset(int offset_delta) const {
return is_frame_type(offset_delta_to_frame_type(offset_delta));
}
bool verify_subtype(address start, address end) const {
return true;
}
#ifdef ASSERT
void print_on(outputStream* st) const {
st->print("same_frame(%d)", offset_delta());
}
#endif
};
class same_frame_extended : public stack_map_frame {
private:
enum { _frame_id = 251 };
address offset_delta_addr() const { return frame_type_addr() + sizeof(u1); }
public:
static bool is_frame_type(u1 tag) {
return tag == _frame_id;
}
static same_frame_extended* at(address addr) {
assert(is_frame_type(*addr), "Wrong frame type");
return (same_frame_extended*)addr;
}
static same_frame_extended* create_at(address addr, u2 offset_delta) {
same_frame_extended* sm = (same_frame_extended*)addr;
sm->set_frame_type(_frame_id);
sm->set_offset_delta(offset_delta);
return sm;
}
static size_t calculate_size() { return sizeof(u1) + sizeof(u2); }
size_t size() const { return calculate_size(); }
int offset_delta() const {
return Bytes::get_Java_u2(offset_delta_addr()) + 1;
}
void set_offset_delta(int offset_delta) {
Bytes::put_Java_u2(offset_delta_addr(), offset_delta - 1);
}
int number_of_types() const { return 0; }
verification_type_info* types() const { return NULL; }
bool is_valid_offset(int offset) const { return true; }
bool verify_subtype(address start, address end) const {
return frame_type_addr() + size() <= end;
}
#ifdef ASSERT
void print_on(outputStream* st) const {
st->print("same_frame_extended(%d)", offset_delta());
}
#endif
};
class same_frame_1_stack_item_frame : public stack_map_frame {
private:
address type_addr() const { return frame_type_addr() + sizeof(u1); }
static int frame_type_to_offset_delta(u1 frame_type) {
return frame_type - 63; }
static u1 offset_delta_to_frame_type(int offset_delta) {
return (u1)(offset_delta + 63); }
public:
static bool is_frame_type(u1 tag) {
return tag >= 64 && tag < 128;
}
static same_frame_1_stack_item_frame* at(address addr) {
assert(is_frame_type(*addr), "Wrong frame id");
return (same_frame_1_stack_item_frame*)addr;
}
static same_frame_1_stack_item_frame* create_at(
address addr, int offset_delta, verification_type_info* vti) {
same_frame_1_stack_item_frame* sm = (same_frame_1_stack_item_frame*)addr;
sm->set_offset_delta(offset_delta);
if (vti != NULL) {
sm->set_type(vti);
}
return sm;
}
static size_t calculate_size(verification_type_info* vti) {
return sizeof(u1) + vti->size();
}
static size_t max_size() {
return sizeof(u1) + verification_type_info::max_size();
}
size_t size() const { return calculate_size(types()); }
int offset_delta() const { return frame_type_to_offset_delta(frame_type()); }
void set_offset_delta(int offset_delta) {
assert(offset_delta > 0 && offset_delta <= 64,
"Offset too large for this frame type");
set_frame_type(offset_delta_to_frame_type(offset_delta));
}
void set_type(verification_type_info* vti) {
verification_type_info* cur = types();
cur->copy_from(vti);
}
int number_of_types() const { return 1; }
verification_type_info* types() const {
return verification_type_info::at(type_addr());
}
bool is_valid_offset(int offset_delta) const {
return is_frame_type(offset_delta_to_frame_type(offset_delta));
}
bool verify_subtype(address start, address end) const {
return types()->verify(start, end);
}
#ifdef ASSERT
void print_on(outputStream* st) const {
st->print("same_frame_1_stack_item_frame(%d,", offset_delta());
types()->print_on(st);
st->print(")");
}
#endif
};
class same_frame_1_stack_item_extended : public stack_map_frame {
private:
address offset_delta_addr() const { return frame_type_addr() + sizeof(u1); }
address type_addr() const { return offset_delta_addr() + sizeof(u2); }
enum { _frame_id = 247 };
public:
static bool is_frame_type(u1 tag) {
return tag == _frame_id;
}
static same_frame_1_stack_item_extended* at(address addr) {
assert(is_frame_type(*addr), "Wrong frame id");
return (same_frame_1_stack_item_extended*)addr;
}
static same_frame_1_stack_item_extended* create_at(
address addr, int offset_delta, verification_type_info* vti) {
same_frame_1_stack_item_extended* sm =
(same_frame_1_stack_item_extended*)addr;
sm->set_frame_type(_frame_id);
sm->set_offset_delta(offset_delta);
if (vti != NULL) {
sm->set_type(vti);
}
return sm;
}
static size_t calculate_size(verification_type_info* vti) {
return sizeof(u1) + sizeof(u2) + vti->size();
}
size_t size() const { return calculate_size(types()); }
int offset_delta() const {
return Bytes::get_Java_u2(offset_delta_addr()) + 1;
}
void set_offset_delta(int offset_delta) {
Bytes::put_Java_u2(offset_delta_addr(), offset_delta - 1);
}
void set_type(verification_type_info* vti) {
verification_type_info* cur = types();
cur->copy_from(vti);
}
int number_of_types() const { return 1; }
verification_type_info* types() const {
return verification_type_info::at(type_addr());
}
bool is_valid_offset(int offset) { return true; }
bool verify_subtype(address start, address end) const {
return type_addr() < end && types()->verify(start, end);
}
#ifdef ASSERT
void print_on(outputStream* st) const {
st->print("same_frame_1_stack_item_extended(%d,", offset_delta());
types()->print_on(st);
st->print(")");
}
#endif
};
class chop_frame : public stack_map_frame {
private:
address offset_delta_addr() const { return frame_type_addr() + sizeof(u1); }
static int frame_type_to_chops(u1 frame_type) {
int chop = 251 - frame_type;
return chop;
}
static u1 chops_to_frame_type(int chop) {
return 251 - chop;
}
public:
static bool is_frame_type(u1 tag) {
return frame_type_to_chops(tag) > 0 && frame_type_to_chops(tag) < 4;
}
static chop_frame* at(address addr) {
assert(is_frame_type(*addr), "Wrong frame id");
return (chop_frame*)addr;
}
static chop_frame* create_at(address addr, int offset_delta, int chops) {
chop_frame* sm = (chop_frame*)addr;
sm->set_chops(chops);
sm->set_offset_delta(offset_delta);
return sm;
}
static size_t calculate_size() {
return sizeof(u1) + sizeof(u2);
}
size_t size() const { return calculate_size(); }
int offset_delta() const {
return Bytes::get_Java_u2(offset_delta_addr()) + 1;
}
void set_offset_delta(int offset_delta) {
Bytes::put_Java_u2(offset_delta_addr(), offset_delta - 1);
}
int chops() const {
int chops = frame_type_to_chops(frame_type());
assert(chops > 0 && chops < 4, "Invalid number of chops in frame");
return chops;
}
void set_chops(int chops) {
assert(chops > 0 && chops <= 3, "Bad number of chops");
set_frame_type(chops_to_frame_type(chops));
}
int number_of_types() const { return 0; }
verification_type_info* types() const { return NULL; }
bool is_valid_offset(int offset) { return true; }
bool verify_subtype(address start, address end) const {
return frame_type_addr() + size() <= end;
}
#ifdef ASSERT
void print_on(outputStream* st) const {
st->print("chop_frame(%d,%d)", offset_delta(), chops());
}
#endif
};
class append_frame : public stack_map_frame {
private:
address offset_delta_addr() const { return frame_type_addr() + sizeof(u1); }
address types_addr() const { return offset_delta_addr() + sizeof(u2); }
static int frame_type_to_appends(u1 frame_type) {
int append = frame_type - 251;
return append;
}
static u1 appends_to_frame_type(int appends) {
assert(appends > 0 && appends < 4, "Invalid append amount");
return 251 + appends;
}
public:
static bool is_frame_type(u1 tag) {
return frame_type_to_appends(tag) > 0 && frame_type_to_appends(tag) < 4;
}
static append_frame* at(address addr) {
assert(is_frame_type(*addr), "Wrong frame id");
return (append_frame*)addr;
}
static append_frame* create_at(
address addr, int offset_delta, int appends,
verification_type_info* types) {
append_frame* sm = (append_frame*)addr;
sm->set_appends(appends);
sm->set_offset_delta(offset_delta);
if (types != NULL) {
verification_type_info* cur = sm->types();
for (int i = 0; i < appends; ++i) {
cur->copy_from(types);
cur = cur->next();
types = types->next();
}
}
return sm;
}
static size_t calculate_size(int appends, verification_type_info* types) {
size_t sz = sizeof(u1) + sizeof(u2);
for (int i = 0; i < appends; ++i) {
sz += types->size();
types = types->next();
}
return sz;
}
static size_t max_size() {
return sizeof(u1) + sizeof(u2) + 3 * verification_type_info::max_size();
}
size_t size() const { return calculate_size(number_of_types(), types()); }
int offset_delta() const {
return Bytes::get_Java_u2(offset_delta_addr()) + 1;
}
void set_offset_delta(int offset_delta) {
Bytes::put_Java_u2(offset_delta_addr(), offset_delta - 1);
}
void set_appends(int appends) {
assert(appends > 0 && appends < 4, "Bad number of appends");
set_frame_type(appends_to_frame_type(appends));
}
int number_of_types() const {
int appends = frame_type_to_appends(frame_type());
assert(appends > 0 && appends < 4, "Invalid number of appends in frame");
return appends;
}
verification_type_info* types() const {
return verification_type_info::at(types_addr());
}
bool is_valid_offset(int offset) const { return true; }
bool verify_subtype(address start, address end) const {
verification_type_info* vti = types();
if ((address)vti < end && vti->verify(start, end)) {
int nof = number_of_types();
vti = vti->next();
if (nof < 2 || vti->verify(start, end)) {
vti = vti->next();
if (nof < 3 || vti->verify(start, end)) {
return true;
}
}
}
return false;
}
#ifdef ASSERT
void print_on(outputStream* st) const {
st->print("append_frame(%d,", offset_delta());
verification_type_info* vti = types();
for (int i = 0; i < number_of_types(); ++i) {
vti->print_on(st);
if (i != number_of_types() - 1) {
st->print(",");
}
vti = vti->next();
}
st->print(")");
}
#endif
};
class full_frame : public stack_map_frame {
private:
address offset_delta_addr() const { return frame_type_addr() + sizeof(u1); }
address num_locals_addr() const { return offset_delta_addr() + sizeof(u2); }
address locals_addr() const { return num_locals_addr() + sizeof(u2); }
address stack_slots_addr(address end_of_locals) const {
return end_of_locals; }
address stack_addr(address end_of_locals) const {
return stack_slots_addr(end_of_locals) + sizeof(u2); }
enum { _frame_id = 255 };
public:
static bool is_frame_type(u1 tag) {
return tag == _frame_id;
}
static full_frame* at(address addr) {
assert(is_frame_type(*addr), "Wrong frame id");
return (full_frame*)addr;
}
static full_frame* create_at(
address addr, int offset_delta, int num_locals,
verification_type_info* locals,
int stack_slots, verification_type_info* stack) {
full_frame* sm = (full_frame*)addr;
sm->set_frame_type(_frame_id);
sm->set_offset_delta(offset_delta);
sm->set_num_locals(num_locals);
if (locals != NULL) {
verification_type_info* cur = sm->locals();
for (int i = 0; i < num_locals; ++i) {
cur->copy_from(locals);
cur = cur->next();
locals = locals->next();
}
address end_of_locals = (address)cur;
sm->set_stack_slots(end_of_locals, stack_slots);
cur = sm->stack(end_of_locals);
for (int i = 0; i < stack_slots; ++i) {
cur->copy_from(stack);
cur = cur->next();
stack = stack->next();
}
}
return sm;
}
static size_t calculate_size(
int num_locals, verification_type_info* locals,
int stack_slots, verification_type_info* stack) {
size_t sz = sizeof(u1) + sizeof(u2) + sizeof(u2) + sizeof(u2);
verification_type_info* vti = locals;
for (int i = 0; i < num_locals; ++i) {
sz += vti->size();
vti = vti->next();
}
vti = stack;
for (int i = 0; i < stack_slots; ++i) {
sz += vti->size();
vti = vti->next();
}
return sz;
}
static size_t max_size(int locals, int stack) {
return sizeof(u1) + 3 * sizeof(u2) +
(locals + stack) * verification_type_info::max_size();
}
size_t size() const {
address eol = end_of_locals();
return calculate_size(num_locals(), locals(), stack_slots(eol), stack(eol));
}
int offset_delta() const {
return Bytes::get_Java_u2(offset_delta_addr()) + 1;
}
int num_locals() const { return Bytes::get_Java_u2(num_locals_addr()); }
verification_type_info* locals() const {
return verification_type_info::at(locals_addr());
}
address end_of_locals() const {
verification_type_info* vti = locals();
for (int i = 0; i < num_locals(); ++i) {
vti = vti->next();
}
return (address)vti;
}
int stack_slots(address end_of_locals) const {
return Bytes::get_Java_u2(stack_slots_addr(end_of_locals));
}
verification_type_info* stack(address end_of_locals) const {
return verification_type_info::at(stack_addr(end_of_locals));
}
void set_offset_delta(int offset_delta) {
Bytes::put_Java_u2(offset_delta_addr(), offset_delta - 1);
}
void set_num_locals(int num_locals) {
Bytes::put_Java_u2(num_locals_addr(), num_locals);
}
void set_stack_slots(address end_of_locals, int stack_slots) {
Bytes::put_Java_u2(stack_slots_addr(end_of_locals), stack_slots);
}
// These return only the locals. Extra processing is required for stack
// types of full frames.
int number_of_types() const { return num_locals(); }
verification_type_info* types() const { return locals(); }
bool is_valid_offset(int offset) { return true; }
bool verify_subtype(address start, address end) const {
verification_type_info* vti = types();
if ((address)vti >= end) {
return false;
}
int count = number_of_types();
for (int i = 0; i < count; ++i) {
if (!vti->verify(start, end)) {
return false;
}
vti = vti->next();
}
address eol = (address)vti;
if (eol + sizeof(u2) > end) {
return false;
}
count = stack_slots(eol);
vti = stack(eol);
for (int i = 0; i < stack_slots(eol); ++i) {
if (!vti->verify(start, end)) {
return false;
}
vti = vti->next();
}
return true;
}
#ifdef ASSERT
void print_on(outputStream* st) const {
st->print("full_frame(%d,{", offset_delta());
verification_type_info* vti = locals();
for (int i = 0; i < num_locals(); ++i) {
vti->print_on(st);
if (i != num_locals() - 1) {
st->print(",");
}
vti = vti->next();
}
st->print("},{");
address end_of_locals = (address)vti;
vti = stack(end_of_locals);
int ss = stack_slots(end_of_locals);
for (int i = 0; i < ss; ++i) {
vti->print_on(st);
if (i != ss - 1) {
st->print(",");
}
vti = vti->next();
}
st->print("})");
}
#endif
};
#define VIRTUAL_DISPATCH(stack_frame_type, func_name, args) \
stack_frame_type* item_##stack_frame_type = as_##stack_frame_type(); \
if (item_##stack_frame_type != NULL) { \
return item_##stack_frame_type->func_name args; \
}
#define VOID_VIRTUAL_DISPATCH(stack_frame_type, func_name, args) \
stack_frame_type* item_##stack_frame_type = as_##stack_frame_type(); \
if (item_##stack_frame_type != NULL) { \
item_##stack_frame_type->func_name args; \
return; \
}
size_t stack_map_frame::size() const {
FOR_EACH_STACKMAP_FRAME_TYPE(VIRTUAL_DISPATCH, size, ());
return 0;
}
int stack_map_frame::offset_delta() const {
FOR_EACH_STACKMAP_FRAME_TYPE(VIRTUAL_DISPATCH, offset_delta, ());
return 0;
}
void stack_map_frame::set_offset_delta(int offset_delta) {
FOR_EACH_STACKMAP_FRAME_TYPE(
VOID_VIRTUAL_DISPATCH, set_offset_delta, (offset_delta));
}
int stack_map_frame::number_of_types() const {
FOR_EACH_STACKMAP_FRAME_TYPE(VIRTUAL_DISPATCH, number_of_types, ());
return 0;
}
verification_type_info* stack_map_frame::types() const {
FOR_EACH_STACKMAP_FRAME_TYPE(VIRTUAL_DISPATCH, types, ());
return NULL;
}
bool stack_map_frame::is_valid_offset(int offset) const {
FOR_EACH_STACKMAP_FRAME_TYPE(VIRTUAL_DISPATCH, is_valid_offset, (offset));
return true;
}
bool stack_map_frame::verify(address start, address end) const {
if (frame_type_addr() >= start && frame_type_addr() < end) {
FOR_EACH_STACKMAP_FRAME_TYPE(
VIRTUAL_DISPATCH, verify_subtype, (start, end));
}
return false;
}
#ifdef ASSERT
void stack_map_frame::print_on(outputStream* st) const {
FOR_EACH_STACKMAP_FRAME_TYPE(VOID_VIRTUAL_DISPATCH, print_on, (st));
}
#endif
#undef VIRTUAL_DISPATCH
#undef VOID_VIRTUAL_DISPATCH
#define AS_SUBTYPE_DEF(stack_frame_type, arg1, arg2) \
stack_frame_type* stack_map_frame::as_##stack_frame_type() const { \
if (stack_frame_type::is_frame_type(frame_type())) { \
return (stack_frame_type*)this; \
} else { \
return NULL; \
} \
}
FOR_EACH_STACKMAP_FRAME_TYPE(AS_SUBTYPE_DEF, x, x)
#undef AS_SUBTYPE_DEF
class stack_map_table_attribute {
private:
address name_index_addr() const {
return (address)this; }
address attribute_length_addr() const {
return name_index_addr() + sizeof(u2); }
address number_of_entries_addr() const {
return attribute_length_addr() + sizeof(u4); }
address entries_addr() const {
return number_of_entries_addr() + sizeof(u2); }
protected:
// No constructors - should be 'private', but GCC issues a warning if it is
stack_map_table_attribute() {}
stack_map_table_attribute(const stack_map_table_attribute&) {}
public:
static stack_map_table_attribute* at(address addr) {
return (stack_map_table_attribute*)addr;
}
u2 name_index() const {
return Bytes::get_Java_u2(name_index_addr()); }
u4 attribute_length() const {
return Bytes::get_Java_u4(attribute_length_addr()); }
u2 number_of_entries() const {
return Bytes::get_Java_u2(number_of_entries_addr()); }
stack_map_frame* entries() const {
return stack_map_frame::at(entries_addr());
}
static size_t header_size() {
return sizeof(u2) + sizeof(u4);
}
void set_name_index(u2 idx) {
Bytes::put_Java_u2(name_index_addr(), idx);
}
void set_attribute_length(u4 len) {
Bytes::put_Java_u4(attribute_length_addr(), len);
}
void set_number_of_entries(u2 num) {
Bytes::put_Java_u2(number_of_entries_addr(), num);
}
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,12 +26,12 @@
# include "incls/_systemDictionary.cpp.incl"
Dictionary* SystemDictionary::_dictionary = NULL;
PlaceholderTable* SystemDictionary::_placeholders = NULL;
Dictionary* SystemDictionary::_shared_dictionary = NULL;
LoaderConstraintTable* SystemDictionary::_loader_constraints = NULL;
ResolutionErrorTable* SystemDictionary::_resolution_errors = NULL;
SymbolPropertyTable* SystemDictionary::_invoke_method_table = NULL;
Dictionary* SystemDictionary::_dictionary = NULL;
PlaceholderTable* SystemDictionary::_placeholders = NULL;
Dictionary* SystemDictionary::_shared_dictionary = NULL;
LoaderConstraintTable* SystemDictionary::_loader_constraints = NULL;
ResolutionErrorTable* SystemDictionary::_resolution_errors = NULL;
SymbolPropertyTable* SystemDictionary::_invoke_method_table = NULL;
int SystemDictionary::_number_of_modifications = 0;
@ -1727,8 +1727,7 @@ void SystemDictionary::always_strong_classes_do(OopClosure* blk) {
placeholders_do(blk);
// Visit extra methods
if (invoke_method_table() != NULL)
invoke_method_table()->oops_do(blk);
invoke_method_table()->oops_do(blk);
// Loader constraints. We must keep the symbolOop used in the name alive.
constraints()->always_strong_classes_do(blk);
@ -1766,8 +1765,7 @@ void SystemDictionary::oops_do(OopClosure* f) {
dictionary()->oops_do(f);
// Visit extra methods
if (invoke_method_table() != NULL)
invoke_method_table()->oops_do(f);
invoke_method_table()->oops_do(f);
// Partially loaded classes
placeholders()->oops_do(f);
@ -1841,8 +1839,7 @@ void SystemDictionary::placeholders_do(void f(symbolOop, oop)) {
void SystemDictionary::methods_do(void f(methodOop)) {
dictionary()->methods_do(f);
if (invoke_method_table() != NULL)
invoke_method_table()->methods_do(f);
invoke_method_table()->methods_do(f);
}
// ----------------------------------------------------------------------------
@ -1870,12 +1867,12 @@ void SystemDictionary::initialize(TRAPS) {
// Allocate arrays
assert(dictionary() == NULL,
"SystemDictionary should only be initialized once");
_dictionary = new Dictionary(_nof_buckets);
_placeholders = new PlaceholderTable(_nof_buckets);
_dictionary = new Dictionary(_nof_buckets);
_placeholders = new PlaceholderTable(_nof_buckets);
_number_of_modifications = 0;
_loader_constraints = new LoaderConstraintTable(_loader_constraint_size);
_resolution_errors = new ResolutionErrorTable(_resolution_error_size);
// _invoke_method_table is allocated lazily in find_method_handle_invoke()
_loader_constraints = new LoaderConstraintTable(_loader_constraint_size);
_resolution_errors = new ResolutionErrorTable(_resolution_error_size);
_invoke_method_table = new SymbolPropertyTable(_invoke_method_size);
// Allocate private object used as system class loader lock
_system_loader_lock_obj = oopFactory::new_system_objArray(0, CHECK);
@ -2346,10 +2343,6 @@ methodOop SystemDictionary::find_method_handle_invoke(symbolHandle name,
KlassHandle accessing_klass,
TRAPS) {
if (!EnableMethodHandles) return NULL;
if (invoke_method_table() == NULL) {
// create this side table lazily
_invoke_method_table = new SymbolPropertyTable(_invoke_method_size);
}
vmSymbols::SID name_id = vmSymbols::find_sid(name());
assert(name_id != vmSymbols::NO_SID, "must be a known name");
unsigned int hash = invoke_method_table()->compute_hash(signature, name_id);
@ -2562,7 +2555,9 @@ Handle SystemDictionary::make_dynamic_call_site(Handle bootstrap_method,
}
Handle SystemDictionary::find_bootstrap_method(methodHandle caller_method, int caller_bci,
int cache_index, TRAPS) {
int cache_index,
Handle& argument_info_result,
TRAPS) {
Handle empty;
constantPoolHandle pool;
@ -2576,7 +2571,7 @@ Handle SystemDictionary::find_bootstrap_method(methodHandle caller_method, int c
constantTag tag = pool->tag_at(constant_pool_index);
if (tag.is_invoke_dynamic()) {
// JVM_CONSTANT_InvokeDynamic is an ordered pair of [bootm, name&type]
// JVM_CONSTANT_InvokeDynamic is an ordered pair of [bootm, name&type], plus optional arguments
// The bootm, being a JVM_CONSTANT_MethodHandle, has its own cache entry.
int bsm_index = pool->invoke_dynamic_bootstrap_method_ref_index_at(constant_pool_index);
if (bsm_index != 0) {
@ -2592,9 +2587,38 @@ Handle SystemDictionary::find_bootstrap_method(methodHandle caller_method, int c
tty->print_cr("bootstrap method for "PTR_FORMAT" at %d retrieved as "PTR_FORMAT":",
(intptr_t) caller_method(), caller_bci, (intptr_t) bsm_oop);
}
assert(bsm_oop->is_oop()
&& java_dyn_MethodHandle::is_instance(bsm_oop), "must be sane");
return Handle(THREAD, bsm_oop);
assert(bsm_oop->is_oop(), "must be sane");
// caller must verify that it is of type MethodHandle
Handle bsm(THREAD, bsm_oop);
bsm_oop = NULL; // safety
// Extract the optional static arguments.
Handle argument_info; // either null, or one arg, or Object[]{arg...}
int argc = pool->invoke_dynamic_argument_count_at(constant_pool_index);
if (TraceInvokeDynamic) {
tty->print_cr("find_bootstrap_method: [%d/%d] CONSTANT_InvokeDynamic: %d[%d]",
constant_pool_index, cache_index, bsm_index, argc);
}
if (argc > 0) {
objArrayHandle arg_array;
if (argc > 1) {
objArrayOop arg_array_oop = oopFactory::new_objArray(SystemDictionary::Object_klass(), argc, CHECK_(empty));
arg_array = objArrayHandle(THREAD, arg_array_oop);
argument_info = arg_array;
}
for (int arg_i = 0; arg_i < argc; arg_i++) {
int arg_index = pool->invoke_dynamic_argument_index_at(constant_pool_index, arg_i);
oop arg_oop = pool->resolve_possibly_cached_constant_at(arg_index, CHECK_(empty));
if (arg_array.is_null()) {
argument_info = Handle(THREAD, arg_oop);
} else {
arg_array->obj_at_put(arg_i, arg_oop);
}
}
}
argument_info_result = argument_info; // return argument_info to caller
return bsm;
}
// else null BSM; fall through
} else if (tag.is_name_and_type()) {
@ -2607,14 +2631,14 @@ Handle SystemDictionary::find_bootstrap_method(methodHandle caller_method, int c
// Fall through to pick up the per-class bootstrap method.
// This mechanism may go away in the PFD.
assert(AllowTransitionalJSR292, "else the verifier should have stopped us already");
argument_info_result = empty; // return no argument_info to caller
oop bsm_oop = instanceKlass::cast(caller_method->method_holder())->bootstrap_method();
if (bsm_oop != NULL) {
if (TraceMethodHandles) {
tty->print_cr("bootstrap method for "PTR_FORMAT" registered as "PTR_FORMAT":",
(intptr_t) caller_method(), (intptr_t) bsm_oop);
}
assert(bsm_oop->is_oop()
&& java_dyn_MethodHandle::is_instance(bsm_oop), "must be sane");
assert(bsm_oop->is_oop(), "must be sane");
return Handle(THREAD, bsm_oop);
}

View File

@ -496,6 +496,7 @@ public:
static Handle find_bootstrap_method(methodHandle caller_method,
int caller_bci, // N.B. must be an invokedynamic
int cache_index, // must be corresponding main_entry
Handle &argument_info_result, // static BSM arguments, if any
TRAPS);
// Utility for printing loader "name" as part of tracing constraints

View File

@ -1909,7 +1909,7 @@ void ClassVerifier::verify_invoke_instructions(
unsigned int types = (opcode == Bytecodes::_invokeinterface
? 1 << JVM_CONSTANT_InterfaceMethodref
: opcode == Bytecodes::_invokedynamic
? (1 << JVM_CONSTANT_NameAndType
? ((AllowTransitionalJSR292 ? 1 << JVM_CONSTANT_NameAndType : 0)
|1 << JVM_CONSTANT_InvokeDynamic)
: 1 << JVM_CONSTANT_Methodref);
verify_cp_type(index, cp, types, CHECK_VERIFY(this));

View File

@ -914,3 +914,14 @@ void CodeCache::print() {
}
#endif // PRODUCT
void CodeCache::print_bounds(outputStream* st) {
st->print_cr("Code Cache [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
_heap->low_boundary(),
_heap->high(),
_heap->high_boundary());
st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT
" adapters=" UINT32_FORMAT " free_code_cache=" SIZE_FORMAT,
CodeCache::nof_blobs(), CodeCache::nof_nmethods(),
CodeCache::nof_adapters(), CodeCache::unallocated_capacity());
}

View File

@ -137,6 +137,7 @@ class CodeCache : AllStatic {
static void print_internals();
static void verify(); // verifies the code cache
static void print_trace(const char* event, CodeBlob* cb, int size = 0) PRODUCT_RETURN;
static void print_bounds(outputStream* st); // Prints a summary of the bounds of the code cache
// The full limits of the codeCache
static address low_bound() { return (address) _heap->low_boundary(); }

View File

@ -354,12 +354,8 @@ void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
double CMSStats::time_until_cms_gen_full() const {
size_t cms_free = _cms_gen->cmsSpace()->free();
GenCollectedHeap* gch = GenCollectedHeap::heap();
size_t expected_promotion = gch->get_gen(0)->capacity();
if (HandlePromotionFailure) {
expected_promotion = MIN2(
(size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average(),
expected_promotion);
}
size_t expected_promotion = MIN2(gch->get_gen(0)->capacity(),
(size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
if (cms_free > expected_promotion) {
// Start a cms collection if there isn't enough space to promote
// for the next minor collection. Use the padded average as
@ -865,57 +861,18 @@ size_t ConcurrentMarkSweepGeneration::max_available() const {
return free() + _virtual_space.uncommitted_size();
}
bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(
size_t max_promotion_in_bytes,
bool younger_handles_promotion_failure) const {
// This is the most conservative test. Full promotion is
// guaranteed if this is used. The multiplicative factor is to
// account for the worst case "dilatation".
double adjusted_max_promo_bytes = _dilatation_factor * max_promotion_in_bytes;
if (adjusted_max_promo_bytes > (double)max_uintx) { // larger than size_t
adjusted_max_promo_bytes = (double)max_uintx;
bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
size_t available = max_available();
size_t av_promo = (size_t)gc_stats()->avg_promoted()->padded_average();
bool res = (available >= av_promo) || (available >= max_promotion_in_bytes);
if (PrintGC && Verbose) {
gclog_or_tty->print_cr(
"CMS: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
"max_promo("SIZE_FORMAT")",
res? "":" not", available, res? ">=":"<",
av_promo, max_promotion_in_bytes);
}
bool result = (max_contiguous_available() >= (size_t)adjusted_max_promo_bytes);
if (younger_handles_promotion_failure && !result) {
// Full promotion is not guaranteed because fragmentation
// of the cms generation can prevent the full promotion.
result = (max_available() >= (size_t)adjusted_max_promo_bytes);
if (!result) {
// With promotion failure handling the test for the ability
// to support the promotion does not have to be guaranteed.
// Use an average of the amount promoted.
result = max_available() >= (size_t)
gc_stats()->avg_promoted()->padded_average();
if (PrintGC && Verbose && result) {
gclog_or_tty->print_cr(
"\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
" max_available: " SIZE_FORMAT
" avg_promoted: " SIZE_FORMAT,
max_available(), (size_t)
gc_stats()->avg_promoted()->padded_average());
}
} else {
if (PrintGC && Verbose) {
gclog_or_tty->print_cr(
"\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
" max_available: " SIZE_FORMAT
" adj_max_promo_bytes: " SIZE_FORMAT,
max_available(), (size_t)adjusted_max_promo_bytes);
}
}
} else {
if (PrintGC && Verbose) {
gclog_or_tty->print_cr(
"\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe"
" contiguous_available: " SIZE_FORMAT
" adj_max_promo_bytes: " SIZE_FORMAT,
max_contiguous_available(), (size_t)adjusted_max_promo_bytes);
}
}
return result;
return res;
}
// At a promotion failure dump information on block layout in heap
@ -6091,23 +6048,14 @@ void CMSCollector::sweep(bool asynch) {
assert(_collectorState == Resizing, "Change of collector state to"
" Resizing must be done under the freelistLocks (plural)");
// Now that sweeping has been completed, if the GCH's
// incremental_collection_will_fail flag is set, clear it,
// Now that sweeping has been completed, we clear
// the incremental_collection_failed flag,
// thus inviting a younger gen collection to promote into
// this generation. If such a promotion may still fail,
// the flag will be set again when a young collection is
// attempted.
// I think the incremental_collection_will_fail flag's use
// is specific to a 2 generation collection policy, so i'll
// assert that that's the configuration we are operating within.
// The use of the flag can and should be generalized appropriately
// in the future to deal with a general n-generation system.
GenCollectedHeap* gch = GenCollectedHeap::heap();
assert(gch->collector_policy()->is_two_generation_policy(),
"Resetting of incremental_collection_will_fail flag"
" may be incorrect otherwise");
gch->clear_incremental_collection_will_fail();
gch->clear_incremental_collection_failed(); // Worth retrying as fresh space may have been freed up
gch->update_full_collections_completed(_collection_count_start);
}

View File

@ -1185,8 +1185,7 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
virtual void par_promote_alloc_done(int thread_num);
virtual void par_oop_since_save_marks_iterate_done(int thread_num);
virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes,
bool younger_handles_promotion_failure) const;
virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes) const;
// Inform this (non-young) generation that a promotion failure was
// encountered during a collection of a younger generation that

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2006, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -272,12 +272,16 @@ void ConcurrentMarkSweepThread::desynchronize(bool is_cms_thread) {
}
}
// Wait until the next synchronous GC or a timeout, whichever is earlier.
void ConcurrentMarkSweepThread::wait_on_cms_lock(long t) {
// Wait until the next synchronous GC, a concurrent full gc request,
// or a timeout, whichever is earlier.
void ConcurrentMarkSweepThread::wait_on_cms_lock(long t_millis) {
MutexLockerEx x(CGC_lock,
Mutex::_no_safepoint_check_flag);
if (_should_terminate || _collector->_full_gc_requested) {
return;
}
set_CMS_flag(CMS_cms_wants_token); // to provoke notifies
CGC_lock->wait(Mutex::_no_safepoint_check_flag, t);
CGC_lock->wait(Mutex::_no_safepoint_check_flag, t_millis);
clear_CMS_flag(CMS_cms_wants_token);
assert(!CMS_flag_is_set(CMS_cms_has_token | CMS_cms_wants_token),
"Should not be set");
@ -289,7 +293,8 @@ void ConcurrentMarkSweepThread::sleepBeforeNextCycle() {
icms_wait();
return;
} else {
// Wait until the next synchronous GC or a timeout, whichever is earlier
// Wait until the next synchronous GC, a concurrent full gc
// request or a timeout, whichever is earlier.
wait_on_cms_lock(CMSWaitDuration);
}
// Check if we should start a CMS collection cycle

View File

@ -120,8 +120,10 @@ class ConcurrentMarkSweepThread: public ConcurrentGCThread {
}
// Wait on CMS lock until the next synchronous GC
// or given timeout, whichever is earlier.
void wait_on_cms_lock(long t); // milliseconds
// or given timeout, whichever is earlier. A timeout value
// of 0 indicates that there is no upper bound on the wait time.
// A concurrent full gc request terminates the wait.
void wait_on_cms_lock(long t_millis);
// The CMS thread will yield during the work portion of its cycle
// only when requested to. Both synchronous and asychronous requests

View File

@ -2418,6 +2418,8 @@ void ConcurrentMark::clear_marking_state() {
for (int i = 0; i < (int)_max_task_num; ++i) {
OopTaskQueue* queue = _task_queues->queue(i);
queue->set_empty();
// Clear any partial regions from the CMTasks
_tasks[i]->clear_aborted_region();
}
}
@ -2706,7 +2708,6 @@ void ConcurrentMark::abort() {
clear_marking_state();
for (int i = 0; i < (int)_max_task_num; ++i) {
_tasks[i]->clear_region_fields();
_tasks[i]->clear_aborted_region();
}
_has_aborted = true;
@ -2985,7 +2986,7 @@ void CMTask::reset(CMBitMap* nextMarkBitMap) {
_nextMarkBitMap = nextMarkBitMap;
clear_region_fields();
clear_aborted_region();
assert(_aborted_region.is_empty(), "should have been cleared");
_calls = 0;
_elapsed_time_ms = 0.0;

View File

@ -175,7 +175,7 @@ G1BlockOffsetArray::set_remainder_to_point_to_start_incl(size_t start_card, size
}
assert(start_card > _array->index_for(_bottom), "Cannot be first card");
assert(_array->offset_array(start_card-1) <= N_words,
"Offset card has an unexpected value");
"Offset card has an unexpected value");
size_t start_card_for_region = start_card;
u_char offset = max_jubyte;
for (int i = 0; i < BlockOffsetArray::N_powers; i++) {
@ -577,6 +577,16 @@ void G1BlockOffsetArray::alloc_block_work2(HeapWord** threshold_, size_t* index_
#endif
}
void
G1BlockOffsetArray::set_for_starts_humongous(HeapWord* new_end) {
assert(_end == new_end, "_end should have already been updated");
// The first BOT entry should have offset 0.
_array->set_offset_array(_array->index_for(_bottom), 0);
// The rest should point to the first one.
set_remainder_to_point_to_start(_bottom + N_words, new_end);
}
//////////////////////////////////////////////////////////////////////
// G1BlockOffsetArrayContigSpace
//////////////////////////////////////////////////////////////////////
@ -626,3 +636,12 @@ void G1BlockOffsetArrayContigSpace::zero_bottom_entry() {
"Precondition of call");
_array->set_offset_array(bottom_index, 0);
}
void
G1BlockOffsetArrayContigSpace::set_for_starts_humongous(HeapWord* new_end) {
G1BlockOffsetArray::set_for_starts_humongous(new_end);
// Make sure _next_offset_threshold and _next_offset_index point to new_end.
_next_offset_threshold = new_end;
_next_offset_index = _array->index_for(new_end);
}

View File

@ -436,6 +436,8 @@ public:
}
void check_all_cards(size_t left_card, size_t right_card) const;
virtual void set_for_starts_humongous(HeapWord* new_end);
};
// A subtype of BlockOffsetArray that takes advantage of the fact
@ -484,4 +486,6 @@ class G1BlockOffsetArrayContigSpace: public G1BlockOffsetArray {
HeapWord* block_start_unsafe(const void* addr);
HeapWord* block_start_unsafe_const(const void* addr) const;
virtual void set_for_starts_humongous(HeapWord* new_end);
};

View File

@ -791,7 +791,7 @@ class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
int _worker_i;
public:
RebuildRSOutOfRegionClosure(G1CollectedHeap* g1, int worker_i = 0) :
_cl(g1->g1_rem_set()->as_HRInto_G1RemSet(), worker_i),
_cl(g1->g1_rem_set(), worker_i),
_worker_i(worker_i),
_g1h(g1)
{ }
@ -890,7 +890,7 @@ void G1CollectedHeap::do_collection(bool explicit_gc,
abandon_cur_alloc_region();
abandon_gc_alloc_regions();
assert(_cur_alloc_region == NULL, "Invariant.");
g1_rem_set()->as_HRInto_G1RemSet()->cleanupHRRS();
g1_rem_set()->cleanupHRRS();
tear_down_region_lists();
set_used_regions_to_need_zero_fill();
@ -1506,15 +1506,11 @@ jint G1CollectedHeap::initialize() {
}
// Also create a G1 rem set.
if (G1UseHRIntoRS) {
if (mr_bs()->is_a(BarrierSet::CardTableModRef)) {
_g1_rem_set = new HRInto_G1RemSet(this, (CardTableModRefBS*)mr_bs());
} else {
vm_exit_during_initialization("G1 requires a cardtable mod ref bs.");
return JNI_ENOMEM;
}
if (mr_bs()->is_a(BarrierSet::CardTableModRef)) {
_g1_rem_set = new G1RemSet(this, (CardTableModRefBS*)mr_bs());
} else {
_g1_rem_set = new StupidG1RemSet(this);
vm_exit_during_initialization("G1 requires a cardtable mod ref bs.");
return JNI_ENOMEM;
}
// Carve out the G1 part of the heap.
@ -2706,8 +2702,7 @@ size_t G1CollectedHeap::max_pending_card_num() {
}
size_t G1CollectedHeap::cards_scanned() {
HRInto_G1RemSet* g1_rset = (HRInto_G1RemSet*) g1_rem_set();
return g1_rset->cardsScanned();
return g1_rem_set()->cardsScanned();
}
void
@ -3850,6 +3845,54 @@ G1ParScanThreadState::print_termination_stats(int i,
undo_waste() * HeapWordSize / K);
}
#ifdef ASSERT
bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
assert(ref != NULL, "invariant");
assert(UseCompressedOops, "sanity");
assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref));
oop p = oopDesc::load_decode_heap_oop(ref);
assert(_g1h->is_in_g1_reserved(p),
err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
return true;
}
bool G1ParScanThreadState::verify_ref(oop* ref) const {
assert(ref != NULL, "invariant");
if (has_partial_array_mask(ref)) {
// Must be in the collection set--it's already been copied.
oop p = clear_partial_array_mask(ref);
assert(_g1h->obj_in_cs(p),
err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
} else {
oop p = oopDesc::load_decode_heap_oop(ref);
assert(_g1h->is_in_g1_reserved(p),
err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
}
return true;
}
bool G1ParScanThreadState::verify_task(StarTask ref) const {
if (ref.is_narrow()) {
return verify_ref((narrowOop*) ref);
} else {
return verify_ref((oop*) ref);
}
}
#endif // ASSERT
void G1ParScanThreadState::trim_queue() {
StarTask ref;
do {
// Drain the overflow stack first, so other threads can steal.
while (refs()->pop_overflow(ref)) {
deal_with_reference(ref);
}
while (refs()->pop_local(ref)) {
deal_with_reference(ref);
}
} while (!refs()->is_empty());
}
G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
_g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
_par_scan_state(par_scan_state) { }
@ -4052,39 +4095,44 @@ public:
: _g1h(g1h), _par_scan_state(par_scan_state),
_queues(queues), _terminator(terminator) {}
void do_void() {
G1ParScanThreadState* pss = par_scan_state();
while (true) {
pss->trim_queue();
void do_void();
StarTask stolen_task;
if (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) {
// slightly paranoid tests; I'm trying to catch potential
// problems before we go into push_on_queue to know where the
// problem is coming from
assert((oop*)stolen_task != NULL, "Error");
if (stolen_task.is_narrow()) {
assert(UseCompressedOops, "Error");
narrowOop* p = (narrowOop*) stolen_task;
assert(has_partial_array_mask(p) ||
_g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "Error");
pss->push_on_queue(p);
} else {
oop* p = (oop*) stolen_task;
assert(has_partial_array_mask(p) || _g1h->is_in_g1_reserved(*p), "Error");
pss->push_on_queue(p);
}
continue;
}
pss->start_term_time();
if (terminator()->offer_termination()) break;
pss->end_term_time();
}
pss->end_term_time();
pss->retire_alloc_buffers();
}
private:
inline bool offer_termination();
};
bool G1ParEvacuateFollowersClosure::offer_termination() {
G1ParScanThreadState* const pss = par_scan_state();
pss->start_term_time();
const bool res = terminator()->offer_termination();
pss->end_term_time();
return res;
}
void G1ParEvacuateFollowersClosure::do_void() {
StarTask stolen_task;
G1ParScanThreadState* const pss = par_scan_state();
pss->trim_queue();
do {
while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) {
assert(pss->verify_task(stolen_task), "sanity");
if (stolen_task.is_narrow()) {
pss->deal_with_reference((narrowOop*) stolen_task);
} else {
pss->deal_with_reference((oop*) stolen_task);
}
// We've just processed a reference and we might have made
// available new entries on the queues. So we have to make sure
// we drain the queues as necessary.
pss->trim_queue();
}
} while (!offer_termination());
pss->retire_alloc_buffers();
}
class G1ParTask : public AbstractGangTask {
protected:
G1CollectedHeap* _g1h;
@ -4182,8 +4230,7 @@ public:
pss.print_termination_stats(i);
}
assert(pss.refs_to_scan() == 0, "Task queue should be empty");
assert(pss.overflowed_refs_to_scan() == 0, "Overflow queue should be empty");
assert(pss.refs()->is_empty(), "should be empty");
double end_time_ms = os::elapsedTime() * 1000.0;
_g1h->g1_policy()->record_gc_worker_end_time(i, end_time_ms);
}

View File

@ -1651,49 +1651,17 @@ public:
size_t alloc_buffer_waste() const { return _alloc_buffer_waste; }
size_t undo_waste() const { return _undo_waste; }
template <class T> void push_on_queue(T* ref) {
assert(ref != NULL, "invariant");
assert(has_partial_array_mask(ref) ||
_g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(ref)), "invariant");
#ifdef ASSERT
if (has_partial_array_mask(ref)) {
oop p = clear_partial_array_mask(ref);
// Verify that we point into the CS
assert(_g1h->obj_in_cs(p), "Should be in CS");
}
#endif
bool verify_ref(narrowOop* ref) const;
bool verify_ref(oop* ref) const;
bool verify_task(StarTask ref) const;
#endif // ASSERT
template <class T> void push_on_queue(T* ref) {
assert(verify_ref(ref), "sanity");
refs()->push(ref);
}
void pop_from_queue(StarTask& ref) {
if (refs()->pop_local(ref)) {
assert((oop*)ref != NULL, "pop_local() returned true");
assert(UseCompressedOops || !ref.is_narrow(), "Error");
assert(has_partial_array_mask((oop*)ref) ||
_g1h->is_in_g1_reserved(ref.is_narrow() ? oopDesc::load_decode_heap_oop((narrowOop*)ref)
: oopDesc::load_decode_heap_oop((oop*)ref)),
"invariant");
} else {
StarTask null_task;
ref = null_task;
}
}
void pop_from_overflow_queue(StarTask& ref) {
StarTask new_ref;
refs()->pop_overflow(new_ref);
assert((oop*)new_ref != NULL, "pop() from a local non-empty stack");
assert(UseCompressedOops || !new_ref.is_narrow(), "Error");
assert(has_partial_array_mask((oop*)new_ref) ||
_g1h->is_in_g1_reserved(new_ref.is_narrow() ? oopDesc::load_decode_heap_oop((narrowOop*)new_ref)
: oopDesc::load_decode_heap_oop((oop*)new_ref)),
"invariant");
ref = new_ref;
}
int refs_to_scan() { return (int)refs()->size(); }
int overflowed_refs_to_scan() { return (int)refs()->overflow_stack()->size(); }
template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
if (G1DeferredRSUpdate) {
deferred_rs_update(from, p, tid);
@ -1804,7 +1772,6 @@ public:
}
}
private:
template <class T> void deal_with_reference(T* ref_to_scan) {
if (has_partial_array_mask(ref_to_scan)) {
_partial_scan_cl->do_oop_nv(ref_to_scan);
@ -1818,59 +1785,15 @@ private:
}
}
public:
void trim_queue() {
// I've replicated the loop twice, first to drain the overflow
// queue, second to drain the task queue. This is better than
// having a single loop, which checks both conditions and, inside
// it, either pops the overflow queue or the task queue, as each
// loop is tighter. Also, the decision to drain the overflow queue
// first is not arbitrary, as the overflow queue is not visible
// to the other workers, whereas the task queue is. So, we want to
// drain the "invisible" entries first, while allowing the other
// workers to potentially steal the "visible" entries.
while (refs_to_scan() > 0 || overflowed_refs_to_scan() > 0) {
while (overflowed_refs_to_scan() > 0) {
StarTask ref_to_scan;
assert((oop*)ref_to_scan == NULL, "Constructed above");
pop_from_overflow_queue(ref_to_scan);
// We shouldn't have pushed it on the queue if it was not
// pointing into the CSet.
assert((oop*)ref_to_scan != NULL, "Follows from inner loop invariant");
if (ref_to_scan.is_narrow()) {
assert(UseCompressedOops, "Error");
narrowOop* p = (narrowOop*)ref_to_scan;
assert(!has_partial_array_mask(p) &&
_g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity");
deal_with_reference(p);
} else {
oop* p = (oop*)ref_to_scan;
assert((has_partial_array_mask(p) && _g1h->is_in_g1_reserved(clear_partial_array_mask(p))) ||
_g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity");
deal_with_reference(p);
}
}
while (refs_to_scan() > 0) {
StarTask ref_to_scan;
assert((oop*)ref_to_scan == NULL, "Constructed above");
pop_from_queue(ref_to_scan);
if ((oop*)ref_to_scan != NULL) {
if (ref_to_scan.is_narrow()) {
assert(UseCompressedOops, "Error");
narrowOop* p = (narrowOop*)ref_to_scan;
assert(!has_partial_array_mask(p) &&
_g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity");
deal_with_reference(p);
} else {
oop* p = (oop*)ref_to_scan;
assert((has_partial_array_mask(p) && _g1h->obj_in_cs(clear_partial_array_mask(p))) ||
_g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity");
deal_with_reference(p);
}
}
}
void deal_with_reference(StarTask ref) {
assert(verify_task(ref), "sanity");
if (ref.is_narrow()) {
deal_with_reference((narrowOop*)ref);
} else {
deal_with_reference((oop*)ref);
}
}
public:
void trim_queue();
};

View File

@ -25,8 +25,6 @@
class HeapRegion;
class G1CollectedHeap;
class G1RemSet;
class HRInto_G1RemSet;
class G1RemSet;
class ConcurrentMark;
class DirtyCardToOopClosure;
class CMBitMap;

View File

@ -97,13 +97,6 @@ public:
}
};
void
StupidG1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
int worker_i) {
IntoCSRegionClosure rc(_g1, oc);
_g1->heap_region_iterate(&rc);
}
class VerifyRSCleanCardOopClosure: public OopClosure {
G1CollectedHeap* _g1;
public:
@ -119,8 +112,9 @@ public:
}
};
HRInto_G1RemSet::HRInto_G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
: G1RemSet(g1), _ct_bs(ct_bs), _g1p(_g1->g1_policy()),
G1RemSet::G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
: _g1(g1), _conc_refine_cards(0),
_ct_bs(ct_bs), _g1p(_g1->g1_policy()),
_cg1r(g1->concurrent_g1_refine()),
_traversal_in_progress(false),
_cset_rs_update_cl(NULL),
@ -134,7 +128,7 @@ HRInto_G1RemSet::HRInto_G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
}
}
HRInto_G1RemSet::~HRInto_G1RemSet() {
G1RemSet::~G1RemSet() {
delete _seq_task;
for (uint i = 0; i < n_workers(); i++) {
assert(_cset_rs_update_cl[i] == NULL, "it should be");
@ -277,7 +271,7 @@ public:
// p threads
// Then thread t will start at region t * floor (n/p)
HeapRegion* HRInto_G1RemSet::calculateStartRegion(int worker_i) {
HeapRegion* G1RemSet::calculateStartRegion(int worker_i) {
HeapRegion* result = _g1p->collection_set();
if (ParallelGCThreads > 0) {
size_t cs_size = _g1p->collection_set_size();
@ -290,7 +284,7 @@ HeapRegion* HRInto_G1RemSet::calculateStartRegion(int worker_i) {
return result;
}
void HRInto_G1RemSet::scanRS(OopsInHeapRegionClosure* oc, int worker_i) {
void G1RemSet::scanRS(OopsInHeapRegionClosure* oc, int worker_i) {
double rs_time_start = os::elapsedTime();
HeapRegion *startRegion = calculateStartRegion(worker_i);
@ -340,7 +334,7 @@ public:
}
};
void HRInto_G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, int worker_i) {
void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, int worker_i) {
double start = os::elapsedTime();
// Apply the given closure to all remaining log entries.
RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq);
@ -439,12 +433,11 @@ public:
}
};
void HRInto_G1RemSet::cleanupHRRS() {
void G1RemSet::cleanupHRRS() {
HeapRegionRemSet::cleanup();
}
void
HRInto_G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
int worker_i) {
#if CARD_REPEAT_HISTO
ct_freq_update_histo_and_reset();
@ -508,8 +501,7 @@ HRInto_G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
_cset_rs_update_cl[worker_i] = NULL;
}
void HRInto_G1RemSet::
prepare_for_oops_into_collection_set_do() {
void G1RemSet::prepare_for_oops_into_collection_set_do() {
#if G1_REM_SET_LOGGING
PrintRSClosure cl;
_g1->collection_set_iterate(&cl);
@ -581,7 +573,7 @@ public:
// RSet updating,
// * the post-write barrier shouldn't be logging updates to young
// regions (but there is a situation where this can happen - see
// the comment in HRInto_G1RemSet::concurrentRefineOneCard below -
// the comment in G1RemSet::concurrentRefineOneCard below -
// that should not be applicable here), and
// * during actual RSet updating, the filtering of cards in young
// regions in HeapRegion::oops_on_card_seq_iterate_careful is
@ -601,7 +593,7 @@ public:
}
};
void HRInto_G1RemSet::cleanup_after_oops_into_collection_set_do() {
void G1RemSet::cleanup_after_oops_into_collection_set_do() {
guarantee( _cards_scanned != NULL, "invariant" );
_total_cards_scanned = 0;
for (uint i = 0; i < n_workers(); ++i)
@ -692,12 +684,12 @@ public:
}
};
void HRInto_G1RemSet::scrub(BitMap* region_bm, BitMap* card_bm) {
void G1RemSet::scrub(BitMap* region_bm, BitMap* card_bm) {
ScrubRSClosure scrub_cl(region_bm, card_bm);
_g1->heap_region_iterate(&scrub_cl);
}
void HRInto_G1RemSet::scrub_par(BitMap* region_bm, BitMap* card_bm,
void G1RemSet::scrub_par(BitMap* region_bm, BitMap* card_bm,
int worker_num, int claim_val) {
ScrubRSClosure scrub_cl(region_bm, card_bm);
_g1->heap_region_par_iterate_chunked(&scrub_cl, worker_num, claim_val);
@ -741,7 +733,7 @@ public:
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
};
bool HRInto_G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
bool check_for_refs_into_cset) {
// Construct the region representing the card.
HeapWord* start = _ct_bs->addr_for(card_ptr);
@ -820,7 +812,7 @@ bool HRInto_G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i
return trigger_cl.value();
}
bool HRInto_G1RemSet::concurrentRefineOneCard(jbyte* card_ptr, int worker_i,
bool G1RemSet::concurrentRefineOneCard(jbyte* card_ptr, int worker_i,
bool check_for_refs_into_cset) {
// If the card is no longer dirty, nothing to do.
if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
@ -995,7 +987,7 @@ public:
}
};
void HRInto_G1RemSet::print_summary_info() {
void G1RemSet::print_summary_info() {
G1CollectedHeap* g1 = G1CollectedHeap::heap();
#if CARD_REPEAT_HISTO
@ -1029,30 +1021,26 @@ void HRInto_G1RemSet::print_summary_info() {
g1->concurrent_g1_refine()->threads_do(&p);
gclog_or_tty->print_cr("");
if (G1UseHRIntoRS) {
HRRSStatsIter blk;
g1->heap_region_iterate(&blk);
gclog_or_tty->print_cr(" Total heap region rem set sizes = " SIZE_FORMAT "K."
" Max = " SIZE_FORMAT "K.",
blk.total_mem_sz()/K, blk.max_mem_sz()/K);
gclog_or_tty->print_cr(" Static structures = " SIZE_FORMAT "K,"
" free_lists = " SIZE_FORMAT "K.",
HeapRegionRemSet::static_mem_size()/K,
HeapRegionRemSet::fl_mem_size()/K);
gclog_or_tty->print_cr(" %d occupied cards represented.",
blk.occupied());
gclog_or_tty->print_cr(" Max sz region = [" PTR_FORMAT ", " PTR_FORMAT " )"
", cap = " SIZE_FORMAT "K, occ = " SIZE_FORMAT "K.",
blk.max_mem_sz_region()->bottom(), blk.max_mem_sz_region()->end(),
(blk.max_mem_sz_region()->rem_set()->mem_size() + K - 1)/K,
(blk.max_mem_sz_region()->rem_set()->occupied() + K - 1)/K);
gclog_or_tty->print_cr(" Did %d coarsenings.",
HeapRegionRemSet::n_coarsenings());
}
HRRSStatsIter blk;
g1->heap_region_iterate(&blk);
gclog_or_tty->print_cr(" Total heap region rem set sizes = " SIZE_FORMAT "K."
" Max = " SIZE_FORMAT "K.",
blk.total_mem_sz()/K, blk.max_mem_sz()/K);
gclog_or_tty->print_cr(" Static structures = " SIZE_FORMAT "K,"
" free_lists = " SIZE_FORMAT "K.",
HeapRegionRemSet::static_mem_size()/K,
HeapRegionRemSet::fl_mem_size()/K);
gclog_or_tty->print_cr(" %d occupied cards represented.",
blk.occupied());
gclog_or_tty->print_cr(" Max sz region = [" PTR_FORMAT ", " PTR_FORMAT " )"
", cap = " SIZE_FORMAT "K, occ = " SIZE_FORMAT "K.",
blk.max_mem_sz_region()->bottom(), blk.max_mem_sz_region()->end(),
(blk.max_mem_sz_region()->rem_set()->mem_size() + K - 1)/K,
(blk.max_mem_sz_region()->rem_set()->occupied() + K - 1)/K);
gclog_or_tty->print_cr(" Did %d coarsenings.", HeapRegionRemSet::n_coarsenings());
}
void HRInto_G1RemSet::prepare_for_verify() {
void G1RemSet::prepare_for_verify() {
if (G1HRRSFlushLogBuffersOnVerify &&
(VerifyBeforeGC || VerifyAfterGC)
&& !_g1->full_collection()) {

View File

@ -27,107 +27,18 @@
class G1CollectedHeap;
class CardTableModRefBarrierSet;
class HRInto_G1RemSet;
class ConcurrentG1Refine;
// A G1RemSet in which each heap region has a rem set that records the
// external heap references into it. Uses a mod ref bs to track updates,
// so that they can be used to update the individual region remsets.
class G1RemSet: public CHeapObj {
protected:
G1CollectedHeap* _g1;
unsigned _conc_refine_cards;
size_t n_workers();
public:
G1RemSet(G1CollectedHeap* g1) :
_g1(g1), _conc_refine_cards(0)
{}
// Invoke "blk->do_oop" on all pointers into the CS in object in regions
// outside the CS (having invoked "blk->set_region" to set the "from"
// region correctly beforehand.) The "worker_i" param is for the
// parallel case where the number of the worker thread calling this
// function can be helpful in partitioning the work to be done. It
// should be the same as the "i" passed to the calling thread's
// work(i) function. In the sequential case this param will be ingored.
virtual void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
int worker_i) = 0;
// Prepare for and cleanup after an oops_into_collection_set_do
// call. Must call each of these once before and after (in sequential
// code) any threads call oops into collection set do. (This offers an
// opportunity to sequential setup and teardown of structures needed by a
// parallel iteration over the CS's RS.)
virtual void prepare_for_oops_into_collection_set_do() = 0;
virtual void cleanup_after_oops_into_collection_set_do() = 0;
// If "this" is of the given subtype, return "this", else "NULL".
virtual HRInto_G1RemSet* as_HRInto_G1RemSet() { return NULL; }
// Record, if necessary, the fact that *p (where "p" is in region "from",
// and is, a fortiori, required to be non-NULL) has changed to its new value.
virtual void write_ref(HeapRegion* from, oop* p) = 0;
virtual void write_ref(HeapRegion* from, narrowOop* p) = 0;
virtual void par_write_ref(HeapRegion* from, oop* p, int tid) = 0;
virtual void par_write_ref(HeapRegion* from, narrowOop* p, int tid) = 0;
// Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region
// or card, respectively, such that a region or card with a corresponding
// 0 bit contains no part of any live object. Eliminates any remembered
// set entries that correspond to dead heap ranges.
virtual void scrub(BitMap* region_bm, BitMap* card_bm) = 0;
// Like the above, but assumes is called in parallel: "worker_num" is the
// parallel thread id of the current thread, and "claim_val" is the
// value that should be used to claim heap regions.
virtual void scrub_par(BitMap* region_bm, BitMap* card_bm,
int worker_num, int claim_val) = 0;
// Refine the card corresponding to "card_ptr". If "sts" is non-NULL,
// join and leave around parts that must be atomic wrt GC. (NULL means
// being done at a safepoint.)
// With some implementations of this routine, when check_for_refs_into_cset
// is true, a true result may be returned if the given card contains oops
// that have references into the current collection set.
virtual bool concurrentRefineOneCard(jbyte* card_ptr, int worker_i,
bool check_for_refs_into_cset) {
return false;
}
// Print any relevant summary info.
virtual void print_summary_info() {}
// Prepare remebered set for verification.
virtual void prepare_for_verify() {};
};
// The simplest possible G1RemSet: iterates over all objects in non-CS
// regions, searching for pointers into the CS.
class StupidG1RemSet: public G1RemSet {
public:
StupidG1RemSet(G1CollectedHeap* g1) : G1RemSet(g1) {}
void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
int worker_i);
void prepare_for_oops_into_collection_set_do() {}
void cleanup_after_oops_into_collection_set_do() {}
// Nothing is necessary in the version below.
void write_ref(HeapRegion* from, oop* p) {}
void write_ref(HeapRegion* from, narrowOop* p) {}
void par_write_ref(HeapRegion* from, oop* p, int tid) {}
void par_write_ref(HeapRegion* from, narrowOop* p, int tid) {}
void scrub(BitMap* region_bm, BitMap* card_bm) {}
void scrub_par(BitMap* region_bm, BitMap* card_bm,
int worker_num, int claim_val) {}
};
// A G1RemSet in which each heap region has a rem set that records the
// external heap references into it. Uses a mod ref bs to track updates,
// so that they can be used to update the individual region remsets.
class HRInto_G1RemSet: public G1RemSet {
protected:
enum SomePrivateConstants {
UpdateRStoMergeSync = 0,
@ -175,27 +86,31 @@ public:
// scanned.
void cleanupHRRS();
HRInto_G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs);
~HRInto_G1RemSet();
G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs);
~G1RemSet();
// Invoke "blk->do_oop" on all pointers into the CS in objects in regions
// outside the CS (having invoked "blk->set_region" to set the "from"
// region correctly beforehand.) The "worker_i" param is for the
// parallel case where the number of the worker thread calling this
// function can be helpful in partitioning the work to be done. It
// should be the same as the "i" passed to the calling thread's
// work(i) function. In the sequential case this param will be ingored.
void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
int worker_i);
// Prepare for and cleanup after an oops_into_collection_set_do
// call. Must call each of these once before and after (in sequential
// code) any threads call oops_into_collection_set_do. (This offers an
// opportunity to sequential setup and teardown of structures needed by a
// parallel iteration over the CS's RS.)
void prepare_for_oops_into_collection_set_do();
void cleanup_after_oops_into_collection_set_do();
void scanRS(OopsInHeapRegionClosure* oc, int worker_i);
template <class T> void scanNewRefsRS_work(OopsInHeapRegionClosure* oc, int worker_i);
void scanNewRefsRS(OopsInHeapRegionClosure* oc, int worker_i) {
if (UseCompressedOops) {
scanNewRefsRS_work<narrowOop>(oc, worker_i);
} else {
scanNewRefsRS_work<oop>(oc, worker_i);
}
}
void updateRS(DirtyCardQueue* into_cset_dcq, int worker_i);
HeapRegion* calculateStartRegion(int i);
HRInto_G1RemSet* as_HRInto_G1RemSet() { return this; }
void scanRS(OopsInHeapRegionClosure* oc, int worker_i);
void updateRS(DirtyCardQueue* into_cset_dcq, int worker_i);
HeapRegion* calculateStartRegion(int i);
CardTableModRefBS* ct_bs() { return _ct_bs; }
size_t cardsScanned() { return _total_cards_scanned; }
@ -219,17 +134,31 @@ public:
bool self_forwarded(oop obj);
// Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region
// or card, respectively, such that a region or card with a corresponding
// 0 bit contains no part of any live object. Eliminates any remembered
// set entries that correspond to dead heap ranges.
void scrub(BitMap* region_bm, BitMap* card_bm);
// Like the above, but assumes is called in parallel: "worker_num" is the
// parallel thread id of the current thread, and "claim_val" is the
// value that should be used to claim heap regions.
void scrub_par(BitMap* region_bm, BitMap* card_bm,
int worker_num, int claim_val);
// If check_for_refs_into_cset is true then a true result is returned
// if the card contains oops that have references into the current
// collection set.
// Refine the card corresponding to "card_ptr". If "sts" is non-NULL,
// join and leave around parts that must be atomic wrt GC. (NULL means
// being done at a safepoint.)
// If check_for_refs_into_cset is true, a true result is returned
// if the given card contains oops that have references into the
// current collection set.
virtual bool concurrentRefineOneCard(jbyte* card_ptr, int worker_i,
bool check_for_refs_into_cset);
// Print any relevant summary info.
virtual void print_summary_info();
// Prepare remembered set for verification.
virtual void prepare_for_verify();
};
@ -250,13 +179,13 @@ public:
class UpdateRSOopClosure: public OopClosure {
HeapRegion* _from;
HRInto_G1RemSet* _rs;
G1RemSet* _rs;
int _worker_i;
template <class T> void do_oop_work(T* p);
public:
UpdateRSOopClosure(HRInto_G1RemSet* rs, int worker_i = 0) :
UpdateRSOopClosure(G1RemSet* rs, int worker_i = 0) :
_from(NULL), _rs(rs), _worker_i(worker_i) {
guarantee(_rs != NULL, "Requires an HRIntoG1RemSet");
}

View File

@ -30,16 +30,18 @@ inline size_t G1RemSet::n_workers() {
}
}
template <class T> inline void HRInto_G1RemSet::write_ref_nv(HeapRegion* from, T* p) {
template <class T>
inline void G1RemSet::write_ref_nv(HeapRegion* from, T* p) {
par_write_ref_nv(from, p, 0);
}
inline bool HRInto_G1RemSet::self_forwarded(oop obj) {
inline bool G1RemSet::self_forwarded(oop obj) {
bool result = (obj->is_forwarded() && (obj->forwardee()== obj));
return result;
}
template <class T> inline void HRInto_G1RemSet::par_write_ref_nv(HeapRegion* from, T* p, int tid) {
template <class T>
inline void G1RemSet::par_write_ref_nv(HeapRegion* from, T* p, int tid) {
oop obj = oopDesc::load_decode_heap_oop(p);
#ifdef ASSERT
// can't do because of races
@ -77,7 +79,7 @@ template <class T> inline void HRInto_G1RemSet::par_write_ref_nv(HeapRegion* fro
// Deferred updates to the CSet are either discarded (in the normal case),
// or processed (if an evacuation failure occurs) at the end
// of the collection.
// See HRInto_G1RemSet::cleanup_after_oops_into_collection_set_do().
// See G1RemSet::cleanup_after_oops_into_collection_set_do().
} else {
#if G1_REM_SET_LOGGING
gclog_or_tty->print_cr("Adding " PTR_FORMAT " (" PTR_FORMAT ") to RS"
@ -91,12 +93,14 @@ template <class T> inline void HRInto_G1RemSet::par_write_ref_nv(HeapRegion* fro
}
}
template <class T> inline void UpdateRSOopClosure::do_oop_work(T* p) {
template <class T>
inline void UpdateRSOopClosure::do_oop_work(T* p) {
assert(_from != NULL, "from region must be non-NULL");
_rs->par_write_ref(_from, p, _worker_i);
}
template <class T> inline void UpdateRSetImmediate::do_oop_work(T* p) {
template <class T>
inline void UpdateRSetImmediate::do_oop_work(T* p) {
assert(_from->is_in_reserved(p), "paranoia");
T heap_oop = oopDesc::load_heap_oop(p);
if (!oopDesc::is_null(heap_oop) && !_from->is_survivor()) {

View File

@ -40,9 +40,6 @@
develop(intx, G1PolicyVerbose, 0, \
"The verbosity level on G1 policy decisions") \
\
develop(bool, G1UseHRIntoRS, true, \
"Determines whether the 'advanced' HR Into rem set is used.") \
\
develop(intx, G1MarkingVerboseLevel, 0, \
"Level (0-4) of verboseness of the marking code") \
\

View File

@ -377,10 +377,26 @@ void HeapRegion::calc_gc_efficiency() {
}
// </PREDICTION>
void HeapRegion::set_startsHumongous() {
void HeapRegion::set_startsHumongous(HeapWord* new_end) {
assert(end() == _orig_end,
"Should be normal before the humongous object allocation");
assert(top() == bottom(), "should be empty");
_humongous_type = StartsHumongous;
_humongous_start_region = this;
assert(end() == _orig_end, "Should be normal before alloc.");
set_end(new_end);
_offsets.set_for_starts_humongous(new_end);
}
void HeapRegion::set_continuesHumongous(HeapRegion* start) {
assert(end() == _orig_end,
"Should be normal before the humongous object allocation");
assert(top() == bottom(), "should be empty");
assert(start->startsHumongous(), "pre-condition");
_humongous_type = ContinuesHumongous;
_humongous_start_region = start;
}
bool HeapRegion::claimHeapRegion(jint claimValue) {
@ -500,23 +516,6 @@ CompactibleSpace* HeapRegion::next_compaction_space() const {
return blk.result();
}
void HeapRegion::set_continuesHumongous(HeapRegion* start) {
// The order is important here.
start->add_continuingHumongousRegion(this);
_humongous_type = ContinuesHumongous;
_humongous_start_region = start;
}
void HeapRegion::add_continuingHumongousRegion(HeapRegion* cont) {
// Must join the blocks of the current H region seq with the block of the
// added region.
offsets()->join_blocks(bottom(), cont->bottom());
arrayOop obj = (arrayOop)(bottom());
obj->set_length((int) (obj->length() + cont->capacity()/jintSize));
set_end(cont->end());
set_top(cont->end());
}
void HeapRegion::save_marks() {
set_saved_mark();
}

View File

@ -395,14 +395,12 @@ class HeapRegion: public G1OffsetTableContigSpace {
// Causes the current region to represent a humongous object spanning "n"
// regions.
virtual void set_startsHumongous();
void set_startsHumongous(HeapWord* new_end);
// The regions that continue a humongous sequence should be added using
// this method, in increasing address order.
void set_continuesHumongous(HeapRegion* start);
void add_continuingHumongousRegion(HeapRegion* cont);
// If the region has a remembered set, return a pointer to it.
HeapRegionRemSet* rem_set() const {
return _rem_set;
@ -733,13 +731,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
FilterOutOfRegionClosure* cl,
bool filter_young);
// The region "mr" is entirely in "this", and starts and ends at block
// boundaries. The caller declares that all the contained blocks are
// coalesced into one.
void declare_filled_region_to_BOT(MemRegion mr) {
_offsets.single_block(mr.start(), mr.end());
}
// A version of block start that is guaranteed to find *some* block
// boundary at or before "p", but does not object iteration, and may
// therefore be used safely when the heap is unparseable.

View File

@ -1159,9 +1159,7 @@ HeapRegionRemSetIterator() :
_hrrs(NULL),
_g1h(G1CollectedHeap::heap()),
_bosa(NULL),
_sparse_iter(size_t(G1CollectedHeap::heap()->reserved_region().start())
>> CardTableModRefBS::card_shift)
{}
_sparse_iter() { }
void HeapRegionRemSetIterator::initialize(const HeapRegionRemSet* hrrs) {
_hrrs = hrrs;

View File

@ -91,34 +91,118 @@ HeapRegionSeq::alloc_obj_from_region_index(int ind, size_t word_size) {
}
if (sumSizes >= word_size) {
_alloc_search_start = cur;
// Mark the allocated regions as allocated.
// We need to initialize the region(s) we just discovered. This is
// a bit tricky given that it can happen concurrently with
// refinement threads refining cards on these regions and
// potentially wanting to refine the BOT as they are scanning
// those cards (this can happen shortly after a cleanup; see CR
// 6991377). So we have to set up the region(s) carefully and in
// a specific order.
// Currently, allocs_are_zero_filled() returns false. The zero
// filling infrastructure will be going away soon (see CR 6977804).
// So no need to do anything else here.
bool zf = G1CollectedHeap::heap()->allocs_are_zero_filled();
assert(!zf, "not supported");
// This will be the "starts humongous" region.
HeapRegion* first_hr = _regions.at(first);
for (int i = first; i < cur; i++) {
HeapRegion* hr = _regions.at(i);
if (zf)
hr->ensure_zero_filled();
{
MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
first_hr->set_zero_fill_allocated();
}
// The header of the new object will be placed at the bottom of
// the first region.
HeapWord* new_obj = first_hr->bottom();
// This will be the new end of the first region in the series that
// should also match the end of the last region in the seriers.
// (Note: sumSizes = "region size" x "number of regions we found").
HeapWord* new_end = new_obj + sumSizes;
// This will be the new top of the first region that will reflect
// this allocation.
HeapWord* new_top = new_obj + word_size;
// First, we need to zero the header of the space that we will be
// allocating. When we update top further down, some refinement
// threads might try to scan the region. By zeroing the header we
// ensure that any thread that will try to scan the region will
// come across the zero klass word and bail out.
//
// NOTE: It would not have been correct to have used
// CollectedHeap::fill_with_object() and make the space look like
// an int array. The thread that is doing the allocation will
// later update the object header to a potentially different array
// type and, for a very short period of time, the klass and length
// fields will be inconsistent. This could cause a refinement
// thread to calculate the object size incorrectly.
Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
// We will set up the first region as "starts humongous". This
// will also update the BOT covering all the regions to reflect
// that there is a single object that starts at the bottom of the
// first region.
first_hr->set_startsHumongous(new_end);
// Then, if there are any, we will set up the "continues
// humongous" regions.
HeapRegion* hr = NULL;
for (int i = first + 1; i < cur; ++i) {
hr = _regions.at(i);
{
MutexLockerEx x(ZF_mon, Mutex::_no_safepoint_check_flag);
hr->set_zero_fill_allocated();
}
size_t sz = hr->capacity() / HeapWordSize;
HeapWord* tmp = hr->allocate(sz);
assert(tmp != NULL, "Humongous allocation failure");
MemRegion mr = MemRegion(tmp, sz);
CollectedHeap::fill_with_object(mr);
hr->declare_filled_region_to_BOT(mr);
if (i == first) {
first_hr->set_startsHumongous();
hr->set_continuesHumongous(first_hr);
}
// If we have "continues humongous" regions (hr != NULL), then the
// end of the last one should match new_end.
assert(hr == NULL || hr->end() == new_end, "sanity");
// Up to this point no concurrent thread would have been able to
// do any scanning on any region in this series. All the top
// fields still point to bottom, so the intersection between
// [bottom,top] and [card_start,card_end] will be empty. Before we
// update the top fields, we'll do a storestore to make sure that
// no thread sees the update to top before the zeroing of the
// object header and the BOT initialization.
OrderAccess::storestore();
// Now that the BOT and the object header have been initialized,
// we can update top of the "starts humongous" region.
assert(first_hr->bottom() < new_top && new_top <= first_hr->end(),
"new_top should be in this region");
first_hr->set_top(new_top);
// Now, we will update the top fields of the "continues humongous"
// regions. The reason we need to do this is that, otherwise,
// these regions would look empty and this will confuse parts of
// G1. For example, the code that looks for a consecutive number
// of empty regions will consider them empty and try to
// re-allocate them. We can extend is_empty() to also include
// !continuesHumongous(), but it is easier to just update the top
// fields here.
hr = NULL;
for (int i = first + 1; i < cur; ++i) {
hr = _regions.at(i);
if ((i + 1) == cur) {
// last continues humongous region
assert(hr->bottom() < new_top && new_top <= hr->end(),
"new_top should fall on this region");
hr->set_top(new_top);
} else {
assert(i > first, "sanity");
hr->set_continuesHumongous(first_hr);
// not last one
assert(new_top > hr->end(), "new_top should be above this region");
hr->set_top(hr->end());
}
}
HeapWord* first_hr_bot = first_hr->bottom();
HeapWord* obj_end = first_hr_bot + word_size;
first_hr->set_top(obj_end);
return first_hr_bot;
// If we have continues humongous regions (hr != NULL), then the
// end of the last one should match new_end and its top should
// match new_top.
assert(hr == NULL ||
(hr->end() == new_end && hr->top() == new_top), "sanity");
return new_obj;
} else {
// If we started from the beginning, we want to know why we can't alloc.
return NULL;

View File

@ -308,7 +308,7 @@ void RSHashTable::add_entry(SparsePRTEntry* e) {
assert(e2->num_valid_cards() > 0, "Postcondition.");
}
CardIdx_t /* RSHashTable:: */ RSHashTableIter::find_first_card_in_list() {
CardIdx_t RSHashTableIter::find_first_card_in_list() {
CardIdx_t res;
while (_bl_ind != RSHashTable::NullEntry) {
res = _rsht->entry(_bl_ind)->card(0);
@ -322,14 +322,11 @@ CardIdx_t /* RSHashTable:: */ RSHashTableIter::find_first_card_in_list() {
return SparsePRTEntry::NullEntry;
}
size_t /* RSHashTable:: */ RSHashTableIter::compute_card_ind(CardIdx_t ci) {
return
_heap_bot_card_ind
+ (_rsht->entry(_bl_ind)->r_ind() * HeapRegion::CardsPerRegion)
+ ci;
size_t RSHashTableIter::compute_card_ind(CardIdx_t ci) {
return (_rsht->entry(_bl_ind)->r_ind() * HeapRegion::CardsPerRegion) + ci;
}
bool /* RSHashTable:: */ RSHashTableIter::has_next(size_t& card_index) {
bool RSHashTableIter::has_next(size_t& card_index) {
_card_ind++;
CardIdx_t ci;
if (_card_ind < SparsePRTEntry::cards_num() &&

View File

@ -169,7 +169,6 @@ class RSHashTableIter VALUE_OBJ_CLASS_SPEC {
int _bl_ind; // [-1, 0.._rsht->_capacity)
short _card_ind; // [0..SparsePRTEntry::cards_num())
RSHashTable* _rsht;
size_t _heap_bot_card_ind;
// If the bucket list pointed to by _bl_ind contains a card, sets
// _bl_ind to the index of that entry, and returns the card.
@ -183,13 +182,11 @@ class RSHashTableIter VALUE_OBJ_CLASS_SPEC {
size_t compute_card_ind(CardIdx_t ci);
public:
RSHashTableIter(size_t heap_bot_card_ind) :
RSHashTableIter() :
_tbl_ind(RSHashTable::NullEntry),
_bl_ind(RSHashTable::NullEntry),
_card_ind((SparsePRTEntry::cards_num() - 1)),
_rsht(NULL),
_heap_bot_card_ind(heap_bot_card_ind)
{}
_rsht(NULL) {}
void init(RSHashTable* rsht) {
_rsht = rsht;
@ -280,20 +277,11 @@ public:
bool contains_card(RegionIdx_t region_id, CardIdx_t card_index) const {
return _next->contains_card(region_id, card_index);
}
#if 0
void verify_is_cleared();
void print();
#endif
};
class SparsePRTIter: public /* RSHashTable:: */RSHashTableIter {
class SparsePRTIter: public RSHashTableIter {
public:
SparsePRTIter(size_t heap_bot_card_ind) :
/* RSHashTable:: */RSHashTableIter(heap_bot_card_ind)
{}
void init(const SparsePRT* sprt) {
RSHashTableIter::init(sprt->cur());
}

View File

@ -310,10 +310,16 @@ heapRegionSeq.hpp heapRegion.hpp
heapRegionSeq.inline.hpp heapRegionSeq.hpp
instanceKlass.cpp g1RemSet.inline.hpp
instanceRefKlass.cpp g1RemSet.inline.hpp
klass.hpp g1OopClosures.hpp
memoryService.cpp g1MemoryPool.hpp
objArrayKlass.cpp g1RemSet.inline.hpp
ptrQueue.cpp allocation.hpp
ptrQueue.cpp allocation.inline.hpp
ptrQueue.cpp mutex.hpp

View File

@ -133,6 +133,7 @@ parallelScavengeHeap.cpp psMarkSweep.hpp
parallelScavengeHeap.cpp psParallelCompact.hpp
parallelScavengeHeap.cpp psPromotionManager.hpp
parallelScavengeHeap.cpp psScavenge.hpp
parallelScavengeHeap.cpp vmError.hpp
parallelScavengeHeap.cpp vmThread.hpp
parallelScavengeHeap.cpp vmPSOperations.hpp

View File

@ -846,7 +846,7 @@ void ParNewGeneration::collect(bool full,
// from this generation, pass on collection; let the next generation
// do it.
if (!collection_attempt_is_safe()) {
gch->set_incremental_collection_will_fail();
gch->set_incremental_collection_failed(); // slight lie, in that we did not even attempt one
return;
}
assert(to()->is_empty(), "Else not collection_attempt_is_safe");
@ -935,8 +935,6 @@ void ParNewGeneration::collect(bool full,
assert(to()->is_empty(), "to space should be empty now");
} else {
assert(HandlePromotionFailure,
"Should only be here if promotion failure handling is on");
assert(_promo_failure_scan_stack.is_empty(), "post condition");
_promo_failure_scan_stack.clear(true); // Clear cached segments.
@ -947,7 +945,7 @@ void ParNewGeneration::collect(bool full,
// All the spaces are in play for mark-sweep.
swap_spaces(); // Make life simpler for CMS || rescan; see 6483690.
from()->set_next_compaction_space(to());
gch->set_incremental_collection_will_fail();
gch->set_incremental_collection_failed();
// Inform the next generation that a promotion failure occurred.
_next_gen->promotion_failure_occurred();
@ -1092,11 +1090,6 @@ oop ParNewGeneration::copy_to_survivor_space_avoiding_promotion_undo(
old, m, sz);
if (new_obj == NULL) {
if (!HandlePromotionFailure) {
// A failed promotion likely means the MaxLiveObjectEvacuationRatio flag
// is incorrectly set. In any case, its seriously wrong to be here!
vm_exit_out_of_memory(sz*wordSize, "promotion");
}
// promotion failed, forward to self
_promotion_failed = true;
new_obj = old;
@ -1206,12 +1199,6 @@ oop ParNewGeneration::copy_to_survivor_space_with_undo(
old, m, sz);
if (new_obj == NULL) {
if (!HandlePromotionFailure) {
// A failed promotion likely means the MaxLiveObjectEvacuationRatio
// flag is incorrectly set. In any case, its seriously wrong to be
// here!
vm_exit_out_of_memory(sz*wordSize, "promotion");
}
// promotion failed, forward to self
forward_ptr = old->forward_to_atomic(old);
new_obj = old;

View File

@ -805,7 +805,8 @@ HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
if (young_gen()->is_in_reserved(addr)) {
assert(young_gen()->is_in(addr),
"addr should be in allocated part of young gen");
if (Debugging) return NULL; // called from find() in debug.cpp
// called from os::print_location by find or VMError
if (Debugging || VMError::fatal_error_in_progress()) return NULL;
Unimplemented();
} else if (old_gen()->is_in_reserved(addr)) {
assert(old_gen()->is_in(addr),

View File

@ -301,6 +301,7 @@ c1_MacroAssembler.hpp assembler.hpp
c1_MacroAssembler.hpp assembler_<arch>.inline.hpp
c1_MacroAssembler_<arch>.cpp arrayOop.hpp
c1_MacroAssembler_<arch>.cpp basicLock.hpp
c1_MacroAssembler_<arch>.cpp biasedLocking.hpp
c1_MacroAssembler_<arch>.cpp c1_MacroAssembler.hpp
c1_MacroAssembler_<arch>.cpp c1_Runtime1.hpp
@ -309,7 +310,6 @@ c1_MacroAssembler_<arch>.cpp interpreter.hpp
c1_MacroAssembler_<arch>.cpp markOop.hpp
c1_MacroAssembler_<arch>.cpp os.hpp
c1_MacroAssembler_<arch>.cpp stubRoutines.hpp
c1_MacroAssembler_<arch>.cpp synchronizer.hpp
c1_MacroAssembler_<arch>.cpp systemDictionary.hpp
c1_MacroAssembler_<arch>.hpp generate_platform_dependent_include

View File

@ -300,10 +300,17 @@ barrierSet.hpp oopsHierarchy.hpp
barrierSet.inline.hpp barrierSet.hpp
barrierSet.inline.hpp cardTableModRefBS.hpp
basicLock.cpp basicLock.hpp
basicLock.cpp synchronizer.hpp
basicLock.hpp handles.hpp
basicLock.hpp markOop.hpp
basicLock.hpp top.hpp
biasedLocking.cpp basicLock.hpp
biasedLocking.cpp biasedLocking.hpp
biasedLocking.cpp klass.inline.hpp
biasedLocking.cpp markOop.hpp
biasedLocking.cpp synchronizer.hpp
biasedLocking.cpp task.hpp
biasedLocking.cpp vframe.hpp
biasedLocking.cpp vmThread.hpp
@ -404,13 +411,13 @@ bytecodeInterpreter_<arch>.cpp vframeArray.hpp
bytecodeInterpreterWithChecks.cpp bytecodeInterpreter.cpp
bytecodeInterpreter.hpp allocation.hpp
bytecodeInterpreter.hpp basicLock.hpp
bytecodeInterpreter.hpp bytes_<arch>.hpp
bytecodeInterpreter.hpp frame.hpp
bytecodeInterpreter.hpp globalDefinitions.hpp
bytecodeInterpreter.hpp globals.hpp
bytecodeInterpreter.hpp methodDataOop.hpp
bytecodeInterpreter.hpp methodOop.hpp
bytecodeInterpreter.hpp synchronizer.hpp
bytecodeInterpreter.inline.hpp bytecodeInterpreter.hpp
bytecodeInterpreter.inline.hpp stubRoutines.hpp
@ -1265,6 +1272,7 @@ constantPoolOop.cpp javaClasses.hpp
constantPoolOop.cpp linkResolver.hpp
constantPoolOop.cpp objArrayKlass.hpp
constantPoolOop.cpp oop.inline.hpp
constantPoolOop.cpp oopFactory.hpp
constantPoolOop.cpp signature.hpp
constantPoolOop.cpp symbolTable.hpp
constantPoolOop.cpp systemDictionary.hpp
@ -1667,10 +1675,10 @@ frame.cpp stubRoutines.hpp
frame.cpp universe.inline.hpp
frame.hpp assembler.hpp
frame.hpp basicLock.hpp
frame.hpp methodOop.hpp
frame.hpp monitorChunk.hpp
frame.hpp registerMap.hpp
frame.hpp synchronizer.hpp
frame.hpp top.hpp
frame.inline.hpp bytecodeInterpreter.hpp
@ -2120,6 +2128,7 @@ interfaceSupport.hpp vmThread.hpp
interfaceSupport_<os_family>.hpp generate_platform_dependent_include
interp_masm_<arch_model>.cpp arrayOop.hpp
interp_masm_<arch_model>.cpp basicLock.hpp
interp_masm_<arch_model>.cpp biasedLocking.hpp
interp_masm_<arch_model>.cpp interp_masm_<arch_model>.hpp
interp_masm_<arch_model>.cpp interpreterRuntime.hpp
@ -2131,7 +2140,6 @@ interp_masm_<arch_model>.cpp markOop.hpp
interp_masm_<arch_model>.cpp methodDataOop.hpp
interp_masm_<arch_model>.cpp methodOop.hpp
interp_masm_<arch_model>.cpp sharedRuntime.hpp
interp_masm_<arch_model>.cpp synchronizer.hpp
interp_masm_<arch_model>.cpp thread_<os_family>.inline.hpp
interp_masm_<arch_model>.hpp assembler_<arch>.inline.hpp
@ -3094,25 +3102,26 @@ objArrayOop.cpp oop.inline.hpp
objArrayOop.hpp arrayOop.hpp
objectMonitor.cpp dtrace.hpp
objectMonitor.cpp handles.inline.hpp
objectMonitor.cpp interfaceSupport.hpp
objectMonitor.cpp markOop.hpp
objectMonitor.cpp mutexLocker.hpp
objectMonitor.cpp objectMonitor.hpp
objectMonitor.cpp objectMonitor.inline.hpp
objectMonitor.cpp oop.inline.hpp
objectMonitor.cpp osThread.hpp
objectMonitor.cpp os_<os_family>.inline.hpp
objectMonitor.cpp preserveException.hpp
objectMonitor.cpp resourceArea.hpp
objectMonitor.cpp stubRoutines.hpp
objectMonitor.cpp thread.hpp
objectMonitor.cpp thread_<os_family>.inline.hpp
objectMonitor.cpp threadService.hpp
objectMonitor.cpp vmSymbols.hpp
objectMonitor.hpp os.hpp
objectMonitor_<os_family>.cpp dtrace.hpp
objectMonitor_<os_family>.cpp interfaceSupport.hpp
objectMonitor_<os_family>.cpp objectMonitor.hpp
objectMonitor_<os_family>.cpp objectMonitor.inline.hpp
objectMonitor_<os_family>.cpp oop.inline.hpp
objectMonitor_<os_family>.cpp osThread.hpp
objectMonitor_<os_family>.cpp os_<os_family>.inline.hpp
objectMonitor_<os_family>.cpp threadService.hpp
objectMonitor_<os_family>.cpp thread_<os_family>.inline.hpp
objectMonitor_<os_family>.cpp vmSymbols.hpp
objectMonitor_<os_family>.hpp generate_platform_dependent_include
objectMonitor_<os_family>.hpp os_<os_family>.inline.hpp
objectMonitor_<os_family>.hpp thread_<os_family>.inline.hpp
objectMonitor_<os_family>.hpp top.hpp
objectMonitor_<os_family>.inline.hpp generate_platform_dependent_include
objectMonitor.hpp perfData.hpp
oop.cpp copy.hpp
oop.cpp handles.inline.hpp
@ -3231,6 +3240,7 @@ orderAccess.hpp allocation.hpp
orderAccess.hpp os.hpp
orderAccess_<os_arch>.inline.hpp orderAccess.hpp
orderAccess_<os_arch>.inline.hpp vm_version_<arch>.hpp
os.cpp allocation.inline.hpp
os.cpp arguments.hpp
@ -3328,7 +3338,6 @@ os_<os_family>.cpp mutex_<os_family>.inline.hpp
os_<os_family>.cpp nativeInst_<arch>.hpp
os_<os_family>.cpp no_precompiled_headers
os_<os_family>.cpp objectMonitor.hpp
os_<os_family>.cpp objectMonitor.inline.hpp
os_<os_family>.cpp oop.inline.hpp
os_<os_family>.cpp osThread.hpp
os_<os_family>.cpp os_share_<os_family>.hpp
@ -3388,6 +3397,12 @@ ostream.cpp xmlstream.hpp
ostream.hpp allocation.hpp
ostream.hpp timer.hpp
// include thread.hpp to prevent cyclic includes
park.cpp thread.hpp
park.hpp debug.hpp
park.hpp globalDefinitions.hpp
pcDesc.cpp debugInfoRec.hpp
pcDesc.cpp nmethod.hpp
pcDesc.cpp pcDesc.hpp
@ -3600,7 +3615,9 @@ relocInfo_<arch>.hpp generate_platform_dependent_include
relocator.cpp bytecodes.hpp
relocator.cpp handles.inline.hpp
relocator.cpp oop.inline.hpp
relocator.cpp oopFactory.hpp
relocator.cpp relocator.hpp
relocator.cpp stackMapTableFormat.hpp
relocator.cpp universe.inline.hpp
relocator.hpp bytecodes.hpp
@ -3907,6 +3924,8 @@ stackMapTable.hpp globalDefinitions.hpp
stackMapTable.hpp methodOop.hpp
stackMapTable.hpp stackMapFrame.hpp
stackMapTableFormat.hpp verificationType.hpp
stackValue.cpp debugInfo.hpp
stackValue.cpp frame.inline.hpp
stackValue.cpp handles.inline.hpp
@ -4062,10 +4081,10 @@ synchronizer.cpp preserveException.hpp
synchronizer.cpp resourceArea.hpp
synchronizer.cpp stubRoutines.hpp
synchronizer.cpp synchronizer.hpp
synchronizer.cpp threadService.hpp
synchronizer.cpp thread_<os_family>.inline.hpp
synchronizer.cpp vmSymbols.hpp
synchronizer.hpp basicLock.hpp
synchronizer.hpp handles.hpp
synchronizer.hpp markOop.hpp
synchronizer.hpp perfData.hpp
@ -4237,7 +4256,6 @@ thread.cpp memprofiler.hpp
thread.cpp mutexLocker.hpp
thread.cpp objArrayOop.hpp
thread.cpp objectMonitor.hpp
thread.cpp objectMonitor.inline.hpp
thread.cpp oop.inline.hpp
thread.cpp oopFactory.hpp
thread.cpp osThread.hpp
@ -4275,6 +4293,7 @@ thread.hpp mutexLocker.hpp
thread.hpp oop.hpp
thread.hpp os.hpp
thread.hpp osThread.hpp
thread.hpp park.hpp
thread.hpp safepoint.hpp
thread.hpp stubRoutines.hpp
thread.hpp threadLocalAllocBuffer.hpp
@ -4586,6 +4605,7 @@ vframeArray.hpp frame.inline.hpp
vframeArray.hpp growableArray.hpp
vframeArray.hpp monitorChunk.hpp
vframe_hp.cpp basicLock.hpp
vframe_hp.cpp codeCache.hpp
vframe_hp.cpp debugInfoRec.hpp
vframe_hp.cpp handles.inline.hpp
@ -4599,7 +4619,6 @@ vframe_hp.cpp pcDesc.hpp
vframe_hp.cpp scopeDesc.hpp
vframe_hp.cpp signature.hpp
vframe_hp.cpp stubRoutines.hpp
vframe_hp.cpp synchronizer.hpp
vframe_hp.cpp vframeArray.hpp
vframe_hp.cpp vframe_hp.hpp
@ -4751,6 +4770,7 @@ workgroup.cpp os.hpp
workgroup.cpp workgroup.hpp
workgroup.hpp taskqueue.hpp
workgroup.hpp thread_<os_family>.inline.hpp
xmlstream.cpp allocation.hpp

View File

@ -154,6 +154,7 @@ jvmtiExtensions.hpp allocation.hpp
jvmtiExtensions.hpp jvmti.h
jvmtiExtensions.hpp jvmtiEnv.hpp
jvmtiImpl.cpp deoptimization.hpp
jvmtiImpl.cpp exceptions.hpp
jvmtiImpl.cpp handles.hpp
jvmtiImpl.cpp handles.inline.hpp
@ -184,6 +185,13 @@ jvmtiImpl.hpp stackValueCollection.hpp
jvmtiImpl.hpp systemDictionary.hpp
jvmtiImpl.hpp vm_operations.hpp
jvmtiRawMonitor.cpp interfaceSupport.hpp
jvmtiRawMonitor.cpp jvmtiRawMonitor.hpp
jvmtiRawMonitor.cpp thread.hpp
jvmtiRawMonitor.hpp growableArray.hpp
jvmtiRawMonitor.hpp objectMonitor.hpp
jvmtiTagMap.cpp biasedLocking.hpp
jvmtiTagMap.cpp javaCalls.hpp
jvmtiTagMap.cpp jniHandles.hpp

View File

@ -35,6 +35,7 @@ jvmtiClassFileReconstituter.hpp jvmtiEnv.hpp
// jvmtiCodeBlobEvents is jck optional, please put deps in includeDB_features
jvmtiEnter.cpp jvmtiEnter.hpp
jvmtiEnter.cpp jvmtiRawMonitor.hpp
jvmtiEnter.cpp jvmtiUtil.hpp
jvmtiEnter.hpp interfaceSupport.hpp
@ -44,6 +45,7 @@ jvmtiEnter.hpp resourceArea.hpp
jvmtiEnter.hpp systemDictionary.hpp
jvmtiEnterTrace.cpp jvmtiEnter.hpp
jvmtiEnterTrace.cpp jvmtiRawMonitor.hpp
jvmtiEnterTrace.cpp jvmtiUtil.hpp
jvmtiEnv.cpp arguments.hpp
@ -66,11 +68,11 @@ jvmtiEnv.cpp jvmtiExtensions.hpp
jvmtiEnv.cpp jvmtiGetLoadedClasses.hpp
jvmtiEnv.cpp jvmtiImpl.hpp
jvmtiEnv.cpp jvmtiManageCapabilities.hpp
jvmtiEnv.cpp jvmtiRawMonitor.hpp
jvmtiEnv.cpp jvmtiRedefineClasses.hpp
jvmtiEnv.cpp jvmtiTagMap.hpp
jvmtiEnv.cpp jvmtiThreadState.inline.hpp
jvmtiEnv.cpp jvmtiUtil.hpp
jvmtiEnv.cpp objectMonitor.inline.hpp
jvmtiEnv.cpp osThread.hpp
jvmtiEnv.cpp preserveException.hpp
jvmtiEnv.cpp reflectionUtils.hpp
@ -87,6 +89,7 @@ jvmtiEnv.cpp vmThread.hpp
jvmtiEnv.hpp jvmtiEnvBase.hpp
jvmtiEnvBase.cpp biasedLocking.hpp
jvmtiEnvBase.cpp deoptimization.hpp
jvmtiEnvBase.cpp interfaceSupport.hpp
jvmtiEnvBase.cpp jfieldIDWorkaround.hpp
jvmtiEnvBase.cpp jvmtiEnv.hpp
@ -178,11 +181,13 @@ jvmtiExport.cpp jvmtiEventController.inline.hpp
jvmtiExport.cpp jvmtiExport.hpp
jvmtiExport.cpp jvmtiImpl.hpp
jvmtiExport.cpp jvmtiManageCapabilities.hpp
jvmtiExport.cpp jvmtiRawMonitor.hpp
jvmtiExport.cpp jvmtiTagMap.hpp
jvmtiExport.cpp jvmtiThreadState.inline.hpp
jvmtiExport.cpp nmethod.hpp
jvmtiExport.cpp objArrayKlass.hpp
jvmtiExport.cpp objArrayOop.hpp
jvmtiExport.cpp objectMonitor.hpp
jvmtiExport.cpp objectMonitor.inline.hpp
jvmtiExport.cpp pcDesc.hpp
jvmtiExport.cpp resourceArea.hpp
@ -210,6 +215,8 @@ jvmtiManageCapabilities.cpp jvmtiManageCapabilities.hpp
jvmtiManageCapabilities.hpp allocation.hpp
jvmtiManageCapabilities.hpp jvmti.h
// jvmtiRawMonitor is jck optional, please put deps in includeDB_features
jvmtiRedefineClasses.cpp bitMap.inline.hpp
jvmtiRedefineClasses.cpp codeCache.hpp
jvmtiRedefineClasses.cpp deoptimization.hpp

View File

@ -716,12 +716,13 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_invokedynamic(JavaThread* thread)) {
assert(constantPoolCacheOopDesc::is_secondary_index(site_index), "proper format");
// there is a second CPC entries that is of interest; it caches signature info:
int main_index = pool->cache()->secondary_entry_at(site_index)->main_entry_index();
int pool_index = pool->cache()->entry_at(main_index)->constant_pool_index();
// first resolve the signature to a MH.invoke methodOop
if (!pool->cache()->entry_at(main_index)->is_resolved(bytecode)) {
JvmtiHideSingleStepping jhss(thread);
CallInfo info;
LinkResolver::resolve_invoke(info, Handle(), pool,
CallInfo callinfo;
LinkResolver::resolve_invoke(callinfo, Handle(), pool,
site_index, bytecode, CHECK);
// The main entry corresponds to a JVM_CONSTANT_InvokeDynamic, and serves
// as a common reference point for all invokedynamic call sites with
@ -729,8 +730,8 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_invokedynamic(JavaThread* thread)) {
// as if it were an invokevirtual of MethodHandle.invoke.
pool->cache()->entry_at(main_index)->set_method(
bytecode,
info.resolved_method(),
info.vtable_index());
callinfo.resolved_method(),
callinfo.vtable_index());
}
// The method (f2 entry) of the main entry is the MH.invoke for the
@ -740,9 +741,10 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_invokedynamic(JavaThread* thread)) {
assert(signature_invoker.not_null() && signature_invoker->is_method() && signature_invoker->is_method_handle_invoke(),
"correct result from LinkResolver::resolve_invokedynamic");
Handle info; // optional argument(s) in JVM_CONSTANT_InvokeDynamic
Handle bootm = SystemDictionary::find_bootstrap_method(caller_method, caller_bci,
main_index, CHECK);
if (bootm.is_null()) {
main_index, info, CHECK);
if (!java_dyn_MethodHandle::is_instance(bootm())) {
THROW_MSG(vmSymbols::java_lang_IllegalStateException(),
"no bootstrap method found for invokedynamic");
}
@ -753,8 +755,6 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_invokedynamic(JavaThread* thread)) {
symbolHandle call_site_name(THREAD, pool->name_ref_at(site_index));
Handle info; // NYI: Other metadata from a new kind of CP entry. (Annotations?)
Handle call_site
= SystemDictionary::make_dynamic_call_site(bootm,
// Callee information:

View File

@ -659,9 +659,6 @@ HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size,
}
return result; // could be null if we are out of space
} else if (!gch->incremental_collection_will_fail()) {
// The gc_prologues have not executed yet. The value
// for incremental_collection_will_fail() is the remanent
// of the last collection.
// Do an incremental collection.
gch->do_collection(false /* full */,
false /* clear_all_soft_refs */,
@ -739,9 +736,8 @@ bool GenCollectorPolicy::should_try_older_generation_allocation(
GenCollectedHeap* gch = GenCollectedHeap::heap();
size_t gen0_capacity = gch->get_gen(0)->capacity_before_gc();
return (word_size > heap_word_size(gen0_capacity))
|| (GC_locker::is_active_and_needs_gc())
|| ( gch->last_incremental_collection_failed()
&& gch->incremental_collection_will_fail());
|| GC_locker::is_active_and_needs_gc()
|| gch->incremental_collection_failed();
}

View File

@ -510,7 +510,7 @@ void DefNewGeneration::collect(bool full,
// from this generation, pass on collection; let the next generation
// do it.
if (!collection_attempt_is_safe()) {
gch->set_incremental_collection_will_fail();
gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
return;
}
assert(to()->is_empty(), "Else not collection_attempt_is_safe");
@ -596,9 +596,8 @@ void DefNewGeneration::collect(bool full,
if (PrintGC && !PrintGCDetails) {
gch->print_heap_change(gch_prev_used);
}
assert(!gch->incremental_collection_failed(), "Should be clear");
} else {
assert(HandlePromotionFailure,
"Should not be here unless promotion failure handling is on");
assert(_promo_failure_scan_stack.is_empty(), "post condition");
_promo_failure_scan_stack.clear(true); // Clear cached segments.
@ -613,7 +612,7 @@ void DefNewGeneration::collect(bool full,
// and from-space.
swap_spaces(); // For uniformity wrt ParNewGeneration.
from()->set_next_compaction_space(to());
gch->set_incremental_collection_will_fail();
gch->set_incremental_collection_failed();
// Inform the next generation that a promotion failure occurred.
_next_gen->promotion_failure_occurred();
@ -700,12 +699,6 @@ oop DefNewGeneration::copy_to_survivor_space(oop old) {
if (obj == NULL) {
obj = _next_gen->promote(old, s);
if (obj == NULL) {
if (!HandlePromotionFailure) {
// A failed promotion likely means the MaxLiveObjectEvacuationRatio flag
// is incorrectly set. In any case, its seriously wrong to be here!
vm_exit_out_of_memory(s*wordSize, "promotion");
}
handle_promotion_failure(old);
return old;
}
@ -812,47 +805,43 @@ bool DefNewGeneration::collection_attempt_is_safe() {
assert(_next_gen != NULL,
"This must be the youngest gen, and not the only gen");
}
// Decide if there's enough room for a full promotion
// When using extremely large edens, we effectively lose a
// large amount of old space. Use the "MaxLiveObjectEvacuationRatio"
// flag to reduce the minimum evacuation space requirements. If
// there is not enough space to evacuate eden during a scavenge,
// the VM will immediately exit with an out of memory error.
// This flag has not been tested
// with collectors other than simple mark & sweep.
//
// Note that with the addition of promotion failure handling, the
// VM will not immediately exit but will undo the young generation
// collection. The parameter is left here for compatibility.
const double evacuation_ratio = MaxLiveObjectEvacuationRatio / 100.0;
// worst_case_evacuation is based on "used()". For the case where this
// method is called after a collection, this is still appropriate because
// the case that needs to be detected is one in which a full collection
// has been done and has overflowed into the young generation. In that
// case a minor collection will fail (the overflow of the full collection
// means there is no space in the old generation for any promotion).
size_t worst_case_evacuation = (size_t)(used() * evacuation_ratio);
return _next_gen->promotion_attempt_is_safe(worst_case_evacuation,
HandlePromotionFailure);
return _next_gen->promotion_attempt_is_safe(used());
}
void DefNewGeneration::gc_epilogue(bool full) {
DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)
assert(!GC_locker::is_active(), "We should not be executing here");
// Check if the heap is approaching full after a collection has
// been done. Generally the young generation is empty at
// a minimum at the end of a collection. If it is not, then
// the heap is approaching full.
GenCollectedHeap* gch = GenCollectedHeap::heap();
clear_should_allocate_from_space();
if (collection_attempt_is_safe()) {
gch->clear_incremental_collection_will_fail();
} else {
gch->set_incremental_collection_will_fail();
if (full) { // we seem to be running out of space
set_should_allocate_from_space();
if (full) {
DEBUG_ONLY(seen_incremental_collection_failed = false;)
if (!collection_attempt_is_safe()) {
gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state
set_should_allocate_from_space(); // we seem to be running out of space
} else {
gch->clear_incremental_collection_failed(); // We just did a full collection
clear_should_allocate_from_space(); // if set
}
} else {
#ifdef ASSERT
// It is possible that incremental_collection_failed() == true
// here, because an attempted scavenge did not succeed. The policy
// is normally expected to cause a full collection which should
// clear that condition, so we should not be here twice in a row
// with incremental_collection_failed() == true without having done
// a full collection in between.
if (!seen_incremental_collection_failed &&
gch->incremental_collection_failed()) {
seen_incremental_collection_failed = true;
} else if (seen_incremental_collection_failed) {
assert(!gch->incremental_collection_failed(), "Twice in a row");
seen_incremental_collection_failed = false;
}
#endif // ASSERT
}
if (ZapUnusedHeapArea) {

Some files were not shown because too many files have changed in this diff Show More