This commit is contained in:
John Coomes 2014-05-23 10:28:09 -07:00
commit e08330c0d5
114 changed files with 2770 additions and 1352 deletions

View File

@ -64,7 +64,11 @@ public class ciEnv extends VMObject {
}
public Compile compilerData() {
return new Compile(compilerDataField.getValue(this.getAddress()));
Address addr = compilerDataField.getValue(this.getAddress());
if (addr == null) {
return null;
}
return new Compile(addr);
}
public ciObjectFactory factory() {
@ -94,10 +98,7 @@ public class ciEnv extends VMObject {
Method method = task.method();
int entryBci = task.osrBci();
int compLevel = task.compLevel();
Klass holder = method.getMethodHolder();
out.print("compile " + holder.getName().asString() + " " +
OopUtilities.escapeString(method.getName().asString()) + " " +
method.getSignature().asString() + " " +
out.print("compile " + method.nameAsAscii() + " " +
entryBci + " " + compLevel);
Compile compiler = compilerData();
if (compiler != null) {

View File

@ -55,4 +55,9 @@ public class ciKlass extends ciType {
public ciKlass(Address addr) {
super(addr);
}
public void printValueOn(PrintStream tty) {
Klass k = (Klass)getMetadata();
k.printValueOn(tty);
}
}

View File

@ -90,17 +90,23 @@ public class ciMethod extends ciMetadata {
}
public void dumpReplayData(PrintStream out) {
Method method = (Method)getMetadata();
NMethod nm = method.getNativeMethod();
Klass holder = method.getMethodHolder();
out.println("ciMethod " +
holder.getName().asString() + " " +
OopUtilities.escapeString(method.getName().asString()) + " " +
method.getSignature().asString() + " " +
method.getInvocationCount() + " " +
method.getBackedgeCount() + " " +
interpreterInvocationCount() + " " +
interpreterThrowoutCount() + " " +
instructionsSize());
Method method = (Method)getMetadata();
NMethod nm = method.getNativeMethod();
out.println("ciMethod " +
nameAsAscii() + " " +
method.getInvocationCount() + " " +
method.getBackedgeCount() + " " +
interpreterInvocationCount() + " " +
interpreterThrowoutCount() + " " +
instructionsSize());
}
public void printValueOn(PrintStream tty) {
tty.print("ciMethod " + method().getName().asString() + method().getSignature().asString() + "@" + getAddress());
}
public String nameAsAscii() {
Method method = (Method)getMetadata();
return method.nameAsAscii();
}
}

View File

@ -31,7 +31,7 @@ import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.oops.*;
import sun.jvm.hotspot.types.*;
public class ciMethodData extends ciMetadata {
public class ciMethodData extends ciMetadata implements MethodDataInterface<ciKlass,ciMethod> {
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
@ -54,7 +54,9 @@ public class ciMethodData extends ciMetadata {
extraDataSizeField = new CIntField(type.getCIntegerField("_extra_data_size"), 0);
dataSizeField = new CIntField(type.getCIntegerField("_data_size"), 0);
stateField = new CIntField(type.getCIntegerField("_state"), 0);
sizeofMethodDataOopDesc = (int)db.lookupType("MethodData").getSize();;
Type typeMethodData = db.lookupType("MethodData");
sizeofMethodDataOopDesc = (int)typeMethodData.getSize();
parametersTypeDataDi = new CIntField(typeMethodData.getCIntegerField("_parameters_type_data_di"), 0);
}
private static AddressField origField;
@ -69,11 +71,28 @@ public class ciMethodData extends ciMetadata {
private static CIntField dataSizeField;
private static CIntField stateField;
private static int sizeofMethodDataOopDesc;
private static CIntField parametersTypeDataDi;
public ciMethodData(Address addr) {
super(addr);
}
public ciKlass getKlassAtAddress(Address addr) {
return (ciKlass)ciObjectFactory.getMetadata(addr);
}
public ciMethod getMethodAtAddress(Address addr) {
return (ciMethod)ciObjectFactory.getMetadata(addr);
}
public void printKlassValueOn(ciKlass klass, PrintStream st) {
klass.printValueOn(st);
}
public void printMethodValueOn(ciMethod method, PrintStream st) {
method.printValueOn(st);
}
private byte[] fetchDataAt(Address base, long size) {
byte[] result = new byte[(int)size];
for (int i = 0; i < size; i++) {
@ -110,6 +129,10 @@ public class ciMethodData extends ciMetadata {
return (int)dataSizeField.getValue(getAddress());
}
int extraDataSize() {
return (int)extraDataSizeField.getValue(getAddress());
}
int state() {
return (int)stateField.getValue(getAddress());
}
@ -122,6 +145,16 @@ public class ciMethodData extends ciMetadata {
return dataIndex >= dataSize();
}
ParametersTypeData<ciKlass,ciMethod> parametersTypeData() {
Address base = getAddress().addOffsetTo(origField.getOffset());
int di = (int)parametersTypeDataDi.getValue(base);
if (di == -1) {
return null;
}
DataLayout dataLayout = new DataLayout(dataField.getValue(getAddress()), di);
return new ParametersTypeData<ciKlass,ciMethod>(this, dataLayout);
}
ProfileData dataAt(int dataIndex) {
if (outOfBounds(dataIndex)) {
return null;
@ -139,15 +172,21 @@ public class ciMethodData extends ciMetadata {
case DataLayout.jumpDataTag:
return new JumpData(dataLayout);
case DataLayout.receiverTypeDataTag:
return new ciReceiverTypeData(dataLayout);
return new ReceiverTypeData<ciKlass,ciMethod>(this, dataLayout);
case DataLayout.virtualCallDataTag:
return new ciVirtualCallData(dataLayout);
return new VirtualCallData<ciKlass,ciMethod>(this, dataLayout);
case DataLayout.retDataTag:
return new RetData(dataLayout);
case DataLayout.branchDataTag:
return new BranchData(dataLayout);
case DataLayout.multiBranchDataTag:
return new MultiBranchData(dataLayout);
case DataLayout.callTypeDataTag:
return new CallTypeData<ciKlass,ciMethod>(this, dataLayout);
case DataLayout.virtualCallTypeDataTag:
return new VirtualCallTypeData<ciKlass,ciMethod>(this, dataLayout);
case DataLayout.parametersTypeDataTag:
return new ParametersTypeData<ciKlass,ciMethod>(this, dataLayout);
}
}
@ -164,7 +203,23 @@ public class ciMethodData extends ciMetadata {
}
boolean isValid(ProfileData current) { return current != null; }
DataLayout limitDataPosition() {
return new DataLayout(dataField.getValue(getAddress()), dataSize());
}
DataLayout extraDataBase() {
return limitDataPosition();
}
DataLayout extraDataLimit() {
return new DataLayout(dataField.getValue(getAddress()), dataSize() + extraDataSize());
}
DataLayout nextExtra(DataLayout dataLayout) {
return new DataLayout(dataField.getValue(getAddress()), dataLayout.dp() + DataLayout.computeSizeInBytes(MethodData.extraNbCells(dataLayout)));
}
public void printDataOn(PrintStream st) {
if (parametersTypeData() != null) {
parametersTypeData().printDataOn(st);
}
ProfileData data = firstData();
for ( ; isValid(data); data = nextData(data)) {
st.print(dpToDi(data.dp()));
@ -172,16 +227,96 @@ public class ciMethodData extends ciMetadata {
// st->fillTo(6);
data.printDataOn(st);
}
st.println("--- Extra data:");
DataLayout dp = extraDataBase();
DataLayout end = extraDataLimit();
for (;; dp = nextExtra(dp)) {
switch(dp.tag()) {
case DataLayout.noTag:
continue;
case DataLayout.bitDataTag:
data = new BitData(dp);
break;
case DataLayout.speculativeTrapDataTag:
data = new SpeculativeTrapData<ciKlass,ciMethod>(this, dp);
break;
case DataLayout.argInfoDataTag:
data = new ArgInfoData(dp);
dp = end; // ArgInfoData is at the end of extra data section.
break;
default:
throw new InternalError("unexpected tag " + dp.tag());
}
st.print(dpToDi(data.dp()));
st.print(" ");
data.printDataOn(st);
if (dp == end) return;
}
}
int dumpReplayDataTypeHelper(PrintStream out, int round, int count, int index, ProfileData pdata, ciKlass k) {
if (k != null) {
if (round == 0) count++;
else out.print(" " + ((pdata.dp() + pdata.cellOffset(index)) / MethodData.cellSize) + " " + k.name());
}
return count;
}
int dumpReplayDataReceiverTypeHelper(PrintStream out, int round, int count, ReceiverTypeData<ciKlass,ciMethod> vdata) {
for (int i = 0; i < vdata.rowLimit(); i++) {
ciKlass k = vdata.receiver(i);
count = dumpReplayDataTypeHelper(out, round, count, vdata.receiverCellIndex(i), vdata, k);
}
return count;
}
int dumpReplayDataCallTypeHelper(PrintStream out, int round, int count, CallTypeDataInterface<ciKlass> callTypeData) {
if (callTypeData.hasArguments()) {
for (int i = 0; i < callTypeData.numberOfArguments(); i++) {
count = dumpReplayDataTypeHelper(out, round, count, callTypeData.argumentTypeIndex(i), (ProfileData)callTypeData, callTypeData.argumentType(i));
}
}
if (callTypeData.hasReturn()) {
count = dumpReplayDataTypeHelper(out, round, count, callTypeData.returnTypeIndex(), (ProfileData)callTypeData, callTypeData.returnType());
}
return count;
}
int dumpReplayDataExtraDataHelper(PrintStream out, int round, int count) {
DataLayout dp = extraDataBase();
DataLayout end = extraDataLimit();
for (;dp != end; dp = nextExtra(dp)) {
switch(dp.tag()) {
case DataLayout.noTag:
case DataLayout.argInfoDataTag:
return count;
case DataLayout.bitDataTag:
break;
case DataLayout.speculativeTrapDataTag: {
SpeculativeTrapData<ciKlass,ciMethod> data = new SpeculativeTrapData<ciKlass,ciMethod>(this, dp);
ciMethod m = data.method();
if (m != null) {
if (round == 0) {
count++;
} else {
out.print(" " + (dpToDi(data.dp() + data.cellOffset(SpeculativeTrapData.methodIndex())) / MethodData.cellSize) + " " + m.nameAsAscii());
}
}
break;
}
default:
throw new InternalError("bad tag " + dp.tag());
}
}
return count;
}
public void dumpReplayData(PrintStream out) {
MethodData mdo = (MethodData)getMetadata();
Method method = mdo.getMethod();
Klass holder = method.getMethodHolder();
out.print("ciMethodData " +
holder.getName().asString() + " " +
OopUtilities.escapeString(method.getName().asString()) + " " +
method.getSignature().asString() + " " +
method.nameAsAscii() + " " +
state() + " " + currentMileage());
byte[] orig = orig();
out.print(" orig " + orig.length);
@ -195,30 +330,28 @@ public class ciMethodData extends ciMetadata {
out.print(" 0x" + Long.toHexString(data[i]));
}
int count = 0;
ParametersTypeData<ciKlass,ciMethod> parameters = parametersTypeData();
for (int round = 0; round < 2; round++) {
if (round == 1) out.print(" oops " + count);
ProfileData pdata = firstData();
for ( ; isValid(pdata); pdata = nextData(pdata)) {
if (pdata instanceof ciReceiverTypeData) {
ciReceiverTypeData vdata = (ciReceiverTypeData)pdata;
for (int i = 0; i < vdata.rowLimit(); i++) {
ciKlass k = vdata.receiverAt(i);
if (k != null) {
if (round == 0) count++;
else out.print(" " + ((vdata.dp() + vdata.cellOffset(vdata.receiverCellIndex(i))) / MethodData.cellSize) + " " + k.name());
}
}
} else if (pdata instanceof ciVirtualCallData) {
ciVirtualCallData vdata = (ciVirtualCallData)pdata;
for (int i = 0; i < vdata.rowLimit(); i++) {
ciKlass k = vdata.receiverAt(i);
if (k != null) {
if (round == 0) count++;
else out.print(" " + ((vdata.dp() + vdata.cellOffset(vdata.receiverCellIndex(i))) / MethodData.cellSize + " " + k.name()));
}
}
if (pdata instanceof ReceiverTypeData) {
count = dumpReplayDataReceiverTypeHelper(out, round, count, (ReceiverTypeData<ciKlass,ciMethod>)pdata);
}
if (pdata instanceof CallTypeDataInterface) {
count = dumpReplayDataCallTypeHelper(out, round, count, (CallTypeDataInterface<ciKlass>)pdata);
}
}
if (parameters != null) {
for (int i = 0; i < parameters.numberOfParameters(); i++) {
count = dumpReplayDataTypeHelper(out, round, count, ParametersTypeData.typeIndex(i), parameters, parameters.type(i));
}
}
}
count = 0;
for (int round = 0; round < 2; round++) {
if (round == 1) out.print(" methods " + count);
count = dumpReplayDataExtraDataHelper(out, round, count);
}
out.println();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,31 +22,35 @@
*
*/
package sun.jvm.hotspot.ci;
package sun.jvm.hotspot.oops;
import java.io.*;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.oops.*;
import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*;
public class ciVirtualCallData extends VirtualCallData {
public ciVirtualCallData(DataLayout data) {
super(data);
public class ArgInfoData extends ArrayData {
public ArgInfoData(DataLayout layout) {
super(layout);
}
public Klass receiver(int row) {
throw new InternalError("should not call");
int numberOfArgs() {
return arrayLen();
}
public ciKlass receiverAt(int row) {
//assert((uint)row < rowLimit(), "oob");
ciMetadata recv = ciObjectFactory.getMetadata(addressAt(receiverCellIndex(row)));
if (recv != null && !(recv instanceof ciKlass)) {
System.err.println(recv);
int argModified(int arg) {
return arrayUintAt(arg);
}
public void printDataOn(PrintStream st) {
printShared(st, "ArgInfoData");
int nargs = numberOfArgs();
for (int i = 0; i < nargs; i++) {
st.print(" 0x" + Integer.toHexString(argModified(i)));
}
//assert(recv == NULL || recv->isKlass(), "wrong type");
return (ciKlass)recv;
st.println();
}
}

View File

@ -0,0 +1,108 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.oops;
import java.io.*;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*;
// CallTypeData
//
// A CallTypeData is used to access profiling information about a non
// virtual call for which we collect type information about arguments
// and return value.
public class CallTypeData<K,M> extends CounterData implements CallTypeDataInterface<K> {
final TypeStackSlotEntries<K,M> args;
final ReturnTypeEntry<K,M> ret;
int cellCountGlobalOffset() {
return CounterData.staticCellCount() + TypeEntriesAtCall.cellCountLocalOffset();
}
int cellCountNoHeader() {
return uintAt(cellCountGlobalOffset());
}
public CallTypeData(MethodDataInterface<K,M> methodData, DataLayout layout) {
super(layout);
args = new TypeStackSlotEntries<K,M>(methodData, this, CounterData.staticCellCount()+TypeEntriesAtCall.headerCellCount(), numberOfArguments());
ret = new ReturnTypeEntry<K,M>(methodData, this, cellCount() - ReturnTypeEntry.staticCellCount());
}
static int staticCellCount() {
return -1;
}
public int cellCount() {
return CounterData.staticCellCount() +
TypeEntriesAtCall.headerCellCount() +
intAt(cellCountGlobalOffset());
}
public int numberOfArguments() {
return cellCountNoHeader() / TypeStackSlotEntries.perArgCount();
}
public boolean hasArguments() {
return cellCountNoHeader() >= TypeStackSlotEntries.perArgCount();
}
public K argumentType(int i) {
return args.type(i);
}
public boolean hasReturn() {
return (cellCountNoHeader() % TypeStackSlotEntries.perArgCount()) != 0;
}
public K returnType() {
return ret.type();
}
public int argumentTypeIndex(int i) {
return args.typeIndex(i);
}
public int returnTypeIndex() {
return ret.typeIndex();
}
public void printDataOn(PrintStream st) {
super.printDataOn(st);
if (hasArguments()) {
tab(st);
st.print("argument types");
args.printDataOn(st);
}
if (hasReturn()) {
tab(st);
st.print("return type");
ret.printDataOn(st);
}
}
}

View File

@ -0,0 +1,35 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.oops;
public interface CallTypeDataInterface<K> {
int numberOfArguments();
boolean hasArguments();
K argumentType(int i);
boolean hasReturn();
K returnType();
int argumentTypeIndex(int i);
int returnTypeIndex();
}

View File

@ -41,6 +41,11 @@ public class DataLayout {
public static final int retDataTag = 6;
public static final int branchDataTag = 7;
public static final int multiBranchDataTag = 8;
public static final int argInfoDataTag = 9;
public static final int callTypeDataTag = 10;
public static final int virtualCallTypeDataTag = 11;
public static final int parametersTypeDataTag = 12;
public static final int speculativeTrapDataTag = 13;
// The _struct._flags word is formatted as [trapState:4 | flags:4].
// The trap state breaks down further as [recompile:1 | reason:3].
@ -61,8 +66,6 @@ public class DataLayout {
private int offset;
private boolean handlized;
public DataLayout(MethodData d, int o) {
data = d.getAddress();
offset = o;
@ -71,7 +74,6 @@ public class DataLayout {
public DataLayout(Address d, int o) {
data = d;
offset = o;
handlized = true;
}
public int dp() { return offset; }
@ -90,12 +92,7 @@ public class DataLayout {
}
public Address addressAt(int index) {
OopHandle handle;
if (handlized) {
return data.getAddressAt(offset + cellOffset(index));
} else {
return data.getOopHandleAt(offset + cellOffset(index));
}
return data.getAddressAt(offset + cellOffset(index));
}
// Every data layout begins with a header. This header
@ -128,7 +125,7 @@ public class DataLayout {
return 1;
}
static int computeSizeInBytes(int cellCount) {
static public int computeSizeInBytes(int cellCount) {
return headerSizeInBytes() + cellCount * MethodData.cellSize;
}

View File

@ -354,9 +354,7 @@ public class Method extends Metadata {
}
Klass holder = getMethodHolder();
out.println("ciMethod " +
holder.getName().asString() + " " +
OopUtilities.escapeString(getName().asString()) + " " +
getSignature().asString() + " " +
nameAsAscii() + " " +
getInvocationCount() + " " +
getBackedgeCount() + " " +
interpreterInvocationCount() + " " +
@ -371,4 +369,10 @@ public class Method extends Metadata {
public int interpreterInvocationCount() {
return getMethodCounters().interpreterInvocationCount();
}
public String nameAsAscii() {
return getMethodHolder().getName().asString() + " " +
OopUtilities.escapeString(getName().asString()) + " " +
getSignature().asString();
}
}

View File

@ -33,7 +33,7 @@ import sun.jvm.hotspot.utilities.*;
// A MethodData provides interpreter profiling information
public class MethodData extends Metadata {
public class MethodData extends Metadata implements MethodDataInterface<Klass,Method> {
static int TypeProfileWidth = 2;
static int BciProfileWidth = 2;
static int CompileThreshold;
@ -152,6 +152,8 @@ public class MethodData extends Metadata {
dataSize = new CIntField(type.getCIntegerField("_data_size"), 0);
data = type.getAddressField("_data[0]");
parametersTypeDataDi = new CIntField(type.getCIntegerField("_parameters_type_data_di"), 0);
sizeofMethodDataOopDesc = (int)type.getSize();;
Reason_many = db.lookupIntConstant("Deoptimization::Reason_many").intValue();
@ -191,6 +193,22 @@ public class MethodData extends Metadata {
super(addr);
}
public Klass getKlassAtAddress(Address addr) {
return (Klass)Metadata.instantiateWrapperFor(addr);
}
public Method getMethodAtAddress(Address addr) {
return (Method)Metadata.instantiateWrapperFor(addr);
}
public void printKlassValueOn(Klass klass, PrintStream st) {
klass.printValueOn(st);
}
public void printMethodValueOn(Method method, PrintStream st) {
method.printValueOn(st);
}
public boolean isMethodData() { return true; }
private static long baseOffset;
@ -198,7 +216,7 @@ public class MethodData extends Metadata {
private static MetadataField method;
private static CIntField dataSize;
private static AddressField data;
private static CIntField parametersTypeDataDi;
public static int sizeofMethodDataOopDesc;
public static int cellSize;
@ -225,6 +243,27 @@ public class MethodData extends Metadata {
}
}
int sizeInBytes() {
if (size == null) {
return 0;
} else {
return (int)size.getValue(getAddress());
}
}
int size() {
return (int)Oop.alignObjectSize(VM.getVM().alignUp(sizeInBytes(), VM.getVM().getBytesPerWord())/VM.getVM().getBytesPerWord());
}
ParametersTypeData<Klass,Method> parametersTypeData() {
int di = (int)parametersTypeDataDi.getValue(getAddress());
if (di == -1) {
return null;
}
DataLayout dataLayout = new DataLayout(this, di + (int)data.getOffset());
return new ParametersTypeData<Klass,Method>(this, dataLayout);
}
boolean outOfBounds(int dataIndex) {
return dataIndex >= dataSize();
}
@ -246,15 +285,21 @@ public class MethodData extends Metadata {
case DataLayout.jumpDataTag:
return new JumpData(dataLayout);
case DataLayout.receiverTypeDataTag:
return new ReceiverTypeData(dataLayout);
return new ReceiverTypeData<Klass,Method>(this, dataLayout);
case DataLayout.virtualCallDataTag:
return new VirtualCallData(dataLayout);
return new VirtualCallData<Klass,Method>(this, dataLayout);
case DataLayout.retDataTag:
return new RetData(dataLayout);
case DataLayout.branchDataTag:
return new BranchData(dataLayout);
case DataLayout.multiBranchDataTag:
return new MultiBranchData(dataLayout);
case DataLayout.callTypeDataTag:
return new CallTypeData<Klass,Method>(this, dataLayout);
case DataLayout.virtualCallTypeDataTag:
return new VirtualCallTypeData<Klass,Method>(this, dataLayout);
case DataLayout.parametersTypeDataTag:
return new ParametersTypeData<Klass,Method>(this, dataLayout);
}
}
@ -272,7 +317,42 @@ public class MethodData extends Metadata {
}
boolean isValid(ProfileData current) { return current != null; }
DataLayout limitDataPosition() {
return new DataLayout(this, dataSize() + (int)data.getOffset());
}
DataLayout extraDataBase() {
return limitDataPosition();
}
DataLayout extraDataLimit() {
return new DataLayout(this, sizeInBytes());
}
static public int extraNbCells(DataLayout dataLayout) {
int nbCells = 0;
switch(dataLayout.tag()) {
case DataLayout.bitDataTag:
case DataLayout.noTag:
nbCells = BitData.staticCellCount();
break;
case DataLayout.speculativeTrapDataTag:
nbCells = SpeculativeTrapData.staticCellCount();
break;
default:
throw new InternalError("unexpected tag " + dataLayout.tag());
}
return nbCells;
}
DataLayout nextExtra(DataLayout dataLayout) {
return new DataLayout(this, dataLayout.dp() + DataLayout.computeSizeInBytes(extraNbCells(dataLayout)));
}
public void printDataOn(PrintStream st) {
if (parametersTypeData() != null) {
parametersTypeData().printDataOn(st);
}
ProfileData data = firstData();
for ( ; isValid(data); data = nextData(data)) {
st.print(dpToDi(data.dp()));
@ -280,6 +360,31 @@ public class MethodData extends Metadata {
// st->fillTo(6);
data.printDataOn(st);
}
st.println("--- Extra data:");
DataLayout dp = extraDataBase();
DataLayout end = extraDataLimit();
for (;; dp = nextExtra(dp)) {
switch(dp.tag()) {
case DataLayout.noTag:
continue;
case DataLayout.bitDataTag:
data = new BitData(dp);
break;
case DataLayout.speculativeTrapDataTag:
data = new SpeculativeTrapData<Klass,Method>(this, dp);
break;
case DataLayout.argInfoDataTag:
data = new ArgInfoData(dp);
dp = end; // ArgInfoData is at the end of extra data section.
break;
default:
throw new InternalError("unexpected tag " + dp.tag());
}
st.print(dpToDi(data.dp()));
st.print(" ");
data.printDataOn(st);
if (dp == end) return;
}
}
private byte[] fetchDataAt(Address base, long offset, long size) {
@ -332,14 +437,71 @@ public class MethodData extends Metadata {
return 20000;
}
int dumpReplayDataTypeHelper(PrintStream out, int round, int count, int index, ProfileData pdata, Klass k) {
if (k != null) {
if (round == 0) count++;
else out.print(" " +
(dpToDi(pdata.dp() +
pdata.cellOffset(index)) / cellSize) + " " +
k.getName().asString());
}
return count;
}
int dumpReplayDataReceiverTypeHelper(PrintStream out, int round, int count, ReceiverTypeData<Klass,Method> vdata) {
for (int i = 0; i < vdata.rowLimit(); i++) {
Klass k = vdata.receiver(i);
count = dumpReplayDataTypeHelper(out, round, count, vdata.receiverCellIndex(i), vdata, k);
}
return count;
}
int dumpReplayDataCallTypeHelper(PrintStream out, int round, int count, CallTypeDataInterface<Klass> callTypeData) {
if (callTypeData.hasArguments()) {
for (int i = 0; i < callTypeData.numberOfArguments(); i++) {
count = dumpReplayDataTypeHelper(out, round, count, callTypeData.argumentTypeIndex(i), (ProfileData)callTypeData, callTypeData.argumentType(i));
}
}
if (callTypeData.hasReturn()) {
count = dumpReplayDataTypeHelper(out, round, count, callTypeData.returnTypeIndex(), (ProfileData)callTypeData, callTypeData.returnType());
}
return count;
}
int dumpReplayDataExtraDataHelper(PrintStream out, int round, int count) {
DataLayout dp = extraDataBase();
DataLayout end = extraDataLimit();
for (;dp != end; dp = nextExtra(dp)) {
switch(dp.tag()) {
case DataLayout.noTag:
case DataLayout.argInfoDataTag:
return count;
case DataLayout.bitDataTag:
break;
case DataLayout.speculativeTrapDataTag: {
SpeculativeTrapData<Klass,Method> data = new SpeculativeTrapData<Klass,Method>(this, dp);
Method m = data.method();
if (m != null) {
if (round == 0) {
count++;
} else {
out.print(" " + (dpToDi(data.dp() + data.cellOffset(SpeculativeTrapData.methodIndex())) / cellSize) + " " + m.nameAsAscii());
}
}
break;
}
default:
throw new InternalError("bad tag " + dp.tag());
}
}
return count;
}
public void dumpReplayData(PrintStream out) {
Method method = getMethod();
Klass holder = method.getMethodHolder();
out.print("ciMethodData " +
holder.getName().asString() + " " +
OopUtilities.escapeString(method.getName().asString()) + " " +
method.getSignature().asString() + " " +
"2" + " " +
out.print("ciMethodData " + method.nameAsAscii()
+ " " + "2" + " " +
currentMileage());
byte[] orig = orig();
out.print(" orig " + orig.length);
@ -353,36 +515,28 @@ public class MethodData extends Metadata {
out.print(" 0x" + Long.toHexString(data[i]));
}
int count = 0;
ParametersTypeData<Klass,Method> parameters = parametersTypeData();
for (int round = 0; round < 2; round++) {
if (round == 1) out.print(" oops " + count);
ProfileData pdata = firstData();
for ( ; isValid(pdata); pdata = nextData(pdata)) {
if (pdata instanceof ReceiverTypeData) {
ReceiverTypeData vdata = (ReceiverTypeData)pdata;
for (int i = 0; i < vdata.rowLimit(); i++) {
Klass k = vdata.receiver(i);
if (k != null) {
if (round == 0) count++;
else out.print(" " +
(dpToDi(vdata.dp() +
vdata.cellOffset(vdata.receiverCellIndex(i))) / cellSize) + " " +
k.getName().asString());
}
}
} else if (pdata instanceof VirtualCallData) {
VirtualCallData vdata = (VirtualCallData)pdata;
for (int i = 0; i < vdata.rowLimit(); i++) {
Klass k = vdata.receiver(i);
if (k != null) {
if (round == 0) count++;
else out.print(" " +
(dpToDi(vdata.dp() +
vdata.cellOffset(vdata.receiverCellIndex(i))) / cellSize) + " " +
k.getName().asString());
}
}
count = dumpReplayDataReceiverTypeHelper(out, round, count, (ReceiverTypeData<Klass,Method>)pdata);
}
if (pdata instanceof CallTypeDataInterface) {
count = dumpReplayDataCallTypeHelper(out, round, count, (CallTypeDataInterface<Klass>)pdata);
}
}
if (parameters != null) {
for (int i = 0; i < parameters.numberOfParameters(); i++) {
count = dumpReplayDataTypeHelper(out, round, count, ParametersTypeData.typeIndex(i), parameters, parameters.type(i));
}
}
}
count = 0;
for (int round = 0; round < 2; round++) {
if (round == 1) out.print(" methods " + count);
count = dumpReplayDataExtraDataHelper(out, round, count);
}
out.println();
}

View File

@ -0,0 +1,39 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.oops;
import java.io.*;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*;
public interface MethodDataInterface<K, M> {
K getKlassAtAddress(Address addr);
M getMethodAtAddress(Address addr);
void printKlassValueOn(K klass, PrintStream st);
void printMethodValueOn(M klass, PrintStream st);
}

View File

@ -0,0 +1,74 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.oops;
import java.io.*;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*;
// ParametersTypeData
//
// A ParametersTypeData is used to access profiling information about
// types of parameters to a method
public class ParametersTypeData<K,M> extends ArrayData {
final TypeStackSlotEntries<K,M> parameters;
static int stackSlotLocalOffset(int i) {
return arrayStartOffSet + TypeStackSlotEntries.stackSlotLocalOffset(i);
}
static int typeLocalOffset(int i) {
return arrayStartOffSet + TypeStackSlotEntries.typeLocalOffset(i);
}
public ParametersTypeData(MethodDataInterface<K,M> methodData, DataLayout layout) {
super(layout);
parameters = new TypeStackSlotEntries<K,M>(methodData, this, 1, numberOfParameters());
}
public int numberOfParameters() {
return arrayLen() / TypeStackSlotEntries.perArgCount();
}
int stackSlot(int i) {
return parameters.stackSlot(i);
}
public K type(int i) {
return parameters.type(i);
}
static public int typeIndex(int i) {
return typeLocalOffset(i);
}
public void printDataOn(PrintStream st) {
st.print("parameter types");
parameters.printDataOn(st);
}
}

View File

@ -37,13 +37,15 @@ import sun.jvm.hotspot.utilities.*;
// dynamic type check. It consists of a counter which counts the total times
// that the check is reached, and a series of (Klass, count) pairs
// which are used to store a type profile for the receiver of the check.
public class ReceiverTypeData extends CounterData {
public class ReceiverTypeData<K,M> extends CounterData {
static final int receiver0Offset = counterCellCount;
static final int count0Offset = receiver0Offset + 1;
static final int receiverTypeRowCellCount = (count0Offset + 1) - receiver0Offset;
final MethodDataInterface<K,M> methodData;
public ReceiverTypeData(DataLayout layout) {
public ReceiverTypeData(MethodDataInterface<K,M> methodData, DataLayout layout) {
super(layout);
this.methodData = methodData;
//assert(layout.tag() == DataLayout.receiverTypeDataTag ||
// layout.tag() == DataLayout.virtualCallDataTag, "wrong type");
}
@ -73,14 +75,14 @@ public class ReceiverTypeData extends CounterData {
// gc; it does not assert the receiver is a klass. During compaction of the
// perm gen, the klass may already have moved, so the isKlass() predicate
// would fail. The 'normal' version should be used whenever possible.
Klass receiverUnchecked(int row) {
K receiverUnchecked(int row) {
//assert(row < rowLimit(), "oob");
Address recv = addressAt(receiverCellIndex(row));
return (Klass)Metadata.instantiateWrapperFor(recv);
return methodData.getKlassAtAddress(recv);
}
public Klass receiver(int row) {
Klass recv = receiverUnchecked(row);
public K receiver(int row) {
K recv = receiverUnchecked(row);
//assert(recv == NULL || ((oop)recv).isKlass(), "wrong type");
return recv;
}
@ -111,7 +113,7 @@ public class ReceiverTypeData extends CounterData {
for (row = 0; row < rowLimit(); row++) {
if (receiver(row) != null) {
tab(st);
receiver(row).printValueOn(st);
methodData.printKlassValueOn(receiver(row), st);
st.println("(" + receiverCount(row) + ")");
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,32 +22,39 @@
*
*/
package sun.jvm.hotspot.ci;
package sun.jvm.hotspot.oops;
import java.io.*;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.oops.*;
import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*;
public class ciReceiverTypeData extends ReceiverTypeData {
public ciReceiverTypeData(DataLayout data) {
super(data);
// Type entry used for return from a call. A single cell to record the
// type.
public class ReturnTypeEntry<K,M> extends TypeEntries<K,M> {
static final int cellCount = 1;
ReturnTypeEntry(MethodDataInterface<K,M> methodData, ProfileData pd, int baseOff) {
super(methodData, pd, baseOff);
}
public Klass receiver(int row) {
throw new InternalError("should not call");
K type() {
return validKlass(baseOff);
}
public ciKlass receiverAt(int row) {
//assert((uint)row < rowLimit(), "oob");
ciMetadata recv = ciObjectFactory.getMetadata(addressAt(receiverCellIndex(row)));
if (recv != null && !(recv instanceof ciKlass)) {
System.err.println(recv);
}
//assert(recv == NULL || recv->isKlass(), "wrong type");
return (ciKlass)recv;
static int staticCellCount() {
return cellCount;
}
int typeIndex() {
return baseOff;
}
void printDataOn(PrintStream st) {
pd.tab(st);
printKlass(st, baseOff);
st.println();
}
}

View File

@ -0,0 +1,70 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.oops;
import java.io.*;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*;
// SpeculativeTrapData
//
// A SpeculativeTrapData is used to record traps due to type
// speculation. It records the root of the compilation.
public class SpeculativeTrapData<K, M> extends ProfileData {
static final int speculativeTrapMethod = 0;
static final int speculativeTrapCellCount = 1;
final MethodDataInterface<K, M> methodData;
public SpeculativeTrapData(MethodDataInterface<K,M> methodData, DataLayout layout) {
super(layout);
this.methodData = methodData;
}
static int staticCellCount() {
return speculativeTrapCellCount;
}
public int cellCount() {
return staticCellCount();
}
public M method() {
return methodData.getMethodAtAddress(addressAt(speculativeTrapMethod));
}
static public int methodIndex() {
return speculativeTrapMethod;
}
public void printDataOn(PrintStream st) {
printShared(st, "SpeculativeTrapData");
tab(st);
methodData.printMethodValueOn(method(), st);
st.println();
}
}

View File

@ -0,0 +1,97 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.oops;
import java.io.*;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*;
// Entries in a ProfileData object to record types: it can either be
// none (no profile), unknown (conflicting profile data) or a klass if
// a single one is seen. Whether a null reference was seen is also
// recorded. No counter is associated with the type and a single type
// is tracked (unlike VirtualCallData).
public abstract class TypeEntries<K,M> {
static final int nullSeen = 1;
static final int typeMask = ~nullSeen;
static final int typeUnknown = 2;
static final int statusBits = nullSeen | typeUnknown;
static final int typeKlassMask = ~statusBits;
final ProfileData pd;
final int baseOff;
final MethodDataInterface<K,M> methodData;
boolean wasNullSeen(int index) {
int v = pd.intptrAt(index);
return (v & nullSeen) != 0;
}
boolean isTypeUnknown(int index) {
int v = pd.intptrAt(index);
return (v & typeUnknown) != 0;
}
boolean isTypeNone(int index) {
int v = pd.intptrAt(index);
return (v & typeMask) == 0;
}
K validKlass(int index) {
if (!isTypeNone(index) &&
!isTypeUnknown(index)) {
return methodData.getKlassAtAddress(pd.addressAt(index).andWithMask(typeKlassMask));
} else {
return null;
}
}
void printKlass(PrintStream st, int index) {
if (isTypeNone(index)) {
st.print("none");
} else if (isTypeUnknown(index)) {
st.print("unknown");
} else {
methodData.printKlassValueOn(validKlass(index), st);
}
if (wasNullSeen(index)) {
st.print(" (null seen)");
}
}
TypeEntries(MethodDataInterface<K,M> methodData, ProfileData pd, int baseOff) {
this.pd = pd;
this.baseOff = baseOff;
this.methodData = methodData;
}
long intptrAt(int index) {
return pd.intptrAt(index);
}
}

View File

@ -0,0 +1,54 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.oops;
import java.io.*;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*;
// Entries to collect type information at a call: contains arguments
// (TypeStackSlotEntries), a return type (ReturnTypeEntry) and a
// number of cells.
public abstract class TypeEntriesAtCall {
static int stackSlotLocalOffset(int i) {
return headerCellCount() + TypeStackSlotEntries.stackSlotLocalOffset(i);
}
static int argumentTypeLocalOffset(int i) {
return headerCellCount() + TypeStackSlotEntries.typeLocalOffset(i);
}
static int headerCellCount() {
return 1;
}
static int cellCountLocalOffset() {
return 0;
}
}

View File

@ -0,0 +1,91 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.oops;
import java.io.*;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*;
// Type entries used for arguments passed at a call and parameters on
// method entry. 2 cells per entry: one for the type encoded as in
// TypeEntries and one initialized with the stack slot where the
// profiled object is to be found so that the interpreter can locate
// it quickly.
public class TypeStackSlotEntries<K,M> extends TypeEntries<K,M> {
static final int stackSlotEntry = 0;
static final int typeEntry = 1;
static final int perArgCellCount = 2;
int stackSlotOffset(int i) {
return baseOff + stackSlotLocalOffset(i);
}
final int numberOfEntries;
int typeOffsetInCells(int i) {
return baseOff + typeLocalOffset(i);
}
TypeStackSlotEntries(MethodDataInterface<K,M> methodData, ProfileData pd, int baseOff, int nbEntries) {
super(methodData, pd, baseOff);
numberOfEntries = nbEntries;
}
static int stackSlotLocalOffset(int i) {
return i * perArgCellCount + stackSlotEntry;
}
static int typeLocalOffset(int i) {
return i * perArgCellCount + typeEntry;
}
int stackSlot(int i) {
return pd.uintAt(stackSlotOffset(i));
}
K type(int i) {
return validKlass(typeOffsetInCells(i));
}
static int perArgCount() {
return perArgCellCount;
}
int typeIndex(int i) {
return typeOffsetInCells(i);
}
void printDataOn(PrintStream st) {
for (int i = 0; i < numberOfEntries; i++) {
pd.tab(st);
st.print(i + ": stack(" + stackSlot(i)+ ") ");
printKlass(st, typeOffsetInCells(i));
st.println();
}
}
}

View File

@ -35,9 +35,9 @@ import sun.jvm.hotspot.utilities.*;
//
// A VirtualCallData is used to access profiling information about a
// call. For now, it has nothing more than a ReceiverTypeData.
public class VirtualCallData extends ReceiverTypeData {
public VirtualCallData(DataLayout layout) {
super(layout);
public class VirtualCallData<K,M> extends ReceiverTypeData<K,M> {
public VirtualCallData(MethodDataInterface<K,M> methodData, DataLayout layout) {
super(methodData, layout);
//assert(layout.tag() == DataLayout.virtualCallDataTag, "wrong type");
}

View File

@ -0,0 +1,108 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.oops;
import java.io.*;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*;
// VirtualCallTypeData
//
// A VirtualCallTypeData is used to access profiling information about
// a virtual call for which we collect type information about
// arguments and return value.
public class VirtualCallTypeData<K,M> extends VirtualCallData implements CallTypeDataInterface<K> {
final TypeStackSlotEntries<K,M> args;
final ReturnTypeEntry<K,M> ret;
int cellCountGlobalOffset() {
return VirtualCallData.staticCellCount() + TypeEntriesAtCall.cellCountLocalOffset();
}
int cellCountNoHeader() {
return uintAt(cellCountGlobalOffset());
}
public VirtualCallTypeData(MethodDataInterface<K,M> methodData, DataLayout layout) {
super(methodData, layout);
args = new TypeStackSlotEntries<K,M>(methodData, this, VirtualCallData.staticCellCount()+TypeEntriesAtCall.headerCellCount(), numberOfArguments());
ret = new ReturnTypeEntry<K,M>(methodData, this, cellCount() - ReturnTypeEntry.staticCellCount());
}
static int staticCellCount() {
return -1;
}
public int cellCount() {
return VirtualCallData.staticCellCount() +
TypeEntriesAtCall.headerCellCount() +
intAt(cellCountGlobalOffset());
}
public int numberOfArguments() {
return cellCountNoHeader() / TypeStackSlotEntries.perArgCount();
}
public boolean hasArguments() {
return cellCountNoHeader() >= TypeStackSlotEntries.perArgCount();
}
public K argumentType(int i) {
return args.type(i);
}
public boolean hasReturn() {
return (cellCountNoHeader() % TypeStackSlotEntries.perArgCount()) != 0;
}
public K returnType() {
return ret.type();
}
public int argumentTypeIndex(int i) {
return args.typeIndex(i);
}
public int returnTypeIndex() {
return ret.typeIndex();
}
public void printDataOn(PrintStream st) {
super.printDataOn(st);
if (hasArguments()) {
tab(st);
st.print("argument types");
args.printDataOn(st);
}
if (hasReturn()) {
tab(st);
st.print("return type");
ret.printDataOn(st);
}
}
};

View File

@ -77,30 +77,40 @@ ifeq ($(INCLUDE_ALL_GCS), false)
CXXFLAGS += -DINCLUDE_ALL_GCS=0
CFLAGS += -DINCLUDE_ALL_GCS=0
Src_Files_EXCLUDE += \
cmsAdaptiveSizePolicy.cpp cmsCollectorPolicy.cpp \
cmsGCAdaptivePolicyCounters.cpp cmsLockVerifier.cpp compactibleFreeListSpace.cpp \
concurrentMarkSweepGeneration.cpp concurrentMarkSweepThread.cpp \
freeChunk.cpp adaptiveFreeList.cpp promotionInfo.cpp vmCMSOperations.cpp \
collectionSetChooser.cpp concurrentG1Refine.cpp concurrentG1RefineThread.cpp \
concurrentMark.cpp concurrentMarkThread.cpp dirtyCardQueue.cpp g1AllocRegion.cpp \
g1BlockOffsetTable.cpp g1CardCounts.cpp g1CollectedHeap.cpp g1CollectorPolicy.cpp \
g1ErgoVerbose.cpp g1GCPhaseTimes.cpp g1HRPrinter.cpp g1HotCardCache.cpp g1Log.cpp \
g1MMUTracker.cpp g1MarkSweep.cpp g1MemoryPool.cpp g1MonitoringSupport.cpp g1OopClosures.cpp \
g1RemSet.cpp g1RemSetSummary.cpp g1SATBCardTableModRefBS.cpp g1StringDedup.cpp g1StringDedupStat.cpp \
g1StringDedupTable.cpp g1StringDedupThread.cpp g1StringDedupQueue.cpp g1_globals.cpp heapRegion.cpp \
g1BiasedArray.cpp heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \
ptrQueue.cpp satbQueue.cpp sparsePRT.cpp survRateGroup.cpp vm_operations_g1.cpp g1CodeCacheRemSet.cpp \
adjoiningGenerations.cpp adjoiningVirtualSpaces.cpp asPSOldGen.cpp asPSYoungGen.cpp \
cardTableExtension.cpp gcTaskManager.cpp gcTaskThread.cpp objectStartArray.cpp \
parallelScavengeHeap.cpp parMarkBitMap.cpp pcTasks.cpp psAdaptiveSizePolicy.cpp \
psCompactionManager.cpp psGCAdaptivePolicyCounters.cpp psGenerationCounters.cpp \
psMarkSweep.cpp psMarkSweepDecorator.cpp psMemoryPool.cpp psOldGen.cpp \
psParallelCompact.cpp psPromotionLAB.cpp psPromotionManager.cpp psScavenge.cpp \
psTasks.cpp psVirtualspace.cpp psYoungGen.cpp vmPSOperations.cpp asParNewGeneration.cpp \
parCardTableModRefBS.cpp parGCAllocBuffer.cpp parNewGeneration.cpp mutableSpace.cpp \
gSpaceCounters.cpp allocationStats.cpp spaceCounters.cpp gcAdaptivePolicyCounters.cpp \
mutableNUMASpace.cpp immutableSpace.cpp yieldingWorkGroup.cpp hSpaceCounters.cpp
gc_impl := $(GAMMADIR)/src/share/vm/gc_implementation
gc_exclude := \
$(notdir $(wildcard $(gc_impl)/concurrentMarkSweep/*.cpp)) \
$(notdir $(wildcard $(gc_impl)/g1/*.cpp)) \
$(notdir $(wildcard $(gc_impl)/parallelScavenge/*.cpp)) \
$(notdir $(wildcard $(gc_impl)/parNew/*.cpp))
Src_Files_EXCLUDE += $(gc_exclude)
# Exclude everything in $(gc_impl)/shared except the files listed
# in $(gc_shared_keep).
gc_shared_all := $(notdir $(wildcard $(gc_impl)/shared/*.cpp))
gc_shared_keep := \
adaptiveSizePolicy.cpp \
ageTable.cpp \
collectorCounters.cpp \
cSpaceCounters.cpp \
gcPolicyCounters.cpp \
gcStats.cpp \
gcTimer.cpp \
gcTrace.cpp \
gcTraceSend.cpp \
gcTraceTime.cpp \
gcUtil.cpp \
generationCounters.cpp \
markSweep.cpp \
objectCountEventSender.cpp \
spaceDecorator.cpp \
vmGCOperations.cpp
Src_Files_EXCLUDE += $(filter-out $(gc_shared_keep),$(gc_shared_all))
# src/share/vm/services
Src_Files_EXCLUDE += \
g1MemoryPool.cpp \
psMemoryPool.cpp
endif
ifeq ($(INCLUDE_NMT), false)

View File

@ -3653,9 +3653,9 @@ class StubGenerator: public StubCodeGenerator {
const Register len_reg = I4; // cipher length
const Register keylen = I5; // reg for storing expanded key array length
// save cipher len before save_frame, to return in the end
__ mov(O4, L0);
__ save_frame(0);
// save cipher len to return in the end
__ mov(len_reg, L0);
// read expanded key length
__ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0);
@ -3778,9 +3778,9 @@ class StubGenerator: public StubCodeGenerator {
// re-init intial vector for next block, 8-byte alignment is guaranteed
__ stf(FloatRegisterImpl::D, F60, rvec, 0);
__ stf(FloatRegisterImpl::D, F62, rvec, 8);
__ restore();
__ retl();
__ delayed()->mov(L0, O0);
__ mov(L0, I0);
__ ret();
__ delayed()->restore();
__ align(OptoLoopAlignment);
__ BIND(L_cbcenc192);
@ -3869,9 +3869,9 @@ class StubGenerator: public StubCodeGenerator {
// re-init intial vector for next block, 8-byte alignment is guaranteed
__ stf(FloatRegisterImpl::D, F60, rvec, 0);
__ stf(FloatRegisterImpl::D, F62, rvec, 8);
__ restore();
__ retl();
__ delayed()->mov(L0, O0);
__ mov(L0, I0);
__ ret();
__ delayed()->restore();
__ align(OptoLoopAlignment);
__ BIND(L_cbcenc256);
@ -3962,9 +3962,9 @@ class StubGenerator: public StubCodeGenerator {
// re-init intial vector for next block, 8-byte alignment is guaranteed
__ stf(FloatRegisterImpl::D, F60, rvec, 0);
__ stf(FloatRegisterImpl::D, F62, rvec, 8);
__ restore();
__ retl();
__ delayed()->mov(L0, O0);
__ mov(L0, I0);
__ ret();
__ delayed()->restore();
return start;
}
@ -3992,9 +3992,9 @@ class StubGenerator: public StubCodeGenerator {
const Register original_key = I5; // original key array only required during decryption
const Register keylen = L6; // reg for storing expanded key array length
// save cipher len before save_frame, to return in the end
__ mov(O4, L0);
__ save_frame(0); //args are read from I* registers since we save the frame in the beginning
// save cipher len to return in the end
__ mov(len_reg, L7);
// load original key from SunJCE expanded decryption key
// Since we load original key buffer starting first element, 8-byte alignment is guaranteed
@ -4568,10 +4568,9 @@ class StubGenerator: public StubCodeGenerator {
// re-init intial vector for next block, 8-byte alignment is guaranteed
__ stx(L0, rvec, 0);
__ stx(L1, rvec, 8);
__ restore();
__ mov(L0, O0);
__ retl();
__ delayed()->nop();
__ mov(L7, I0);
__ ret();
__ delayed()->restore();
return start;
}

View File

@ -26,12 +26,9 @@
#ifndef OS_AIX_VM_THREAD_AIX_INLINE_HPP
#define OS_AIX_VM_THREAD_AIX_INLINE_HPP
#include "runtime/prefetch.hpp"
#include "runtime/thread.hpp"
#include "runtime/threadLocalStorage.hpp"
#include "prefetch_aix_ppc.inline.hpp"
// Contains inlined functions for class Thread and ThreadLocalStorage
inline void ThreadLocalStorage::pd_invalidate_all() {} // nothing to do

View File

@ -31,12 +31,6 @@
#include "runtime/thread.hpp"
#include "runtime/threadLocalStorage.hpp"
#ifdef TARGET_OS_ARCH_bsd_x86
# include "prefetch_bsd_x86.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_bsd_zero
# include "prefetch_bsd_zero.inline.hpp"
#endif
// Contains inlined functions for class Thread and ThreadLocalStorage

View File

@ -29,24 +29,8 @@
#error "This file should only be included from thread.inline.hpp"
#endif
#include "runtime/prefetch.hpp"
#include "runtime/thread.hpp"
#include "runtime/threadLocalStorage.hpp"
#ifdef TARGET_OS_ARCH_linux_x86
# include "prefetch_linux_x86.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_linux_sparc
# include "prefetch_linux_sparc.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_linux_zero
# include "prefetch_linux_zero.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_linux_arm
# include "prefetch_linux_arm.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_linux_ppc
# include "prefetch_linux_ppc.inline.hpp"
#endif
// Contains inlined functions for class Thread and ThreadLocalStorage

View File

@ -30,15 +30,8 @@
#endif
#include "runtime/atomic.inline.hpp"
#include "runtime/prefetch.hpp"
#include "runtime/thread.hpp"
#include "runtime/threadLocalStorage.hpp"
#ifdef TARGET_OS_ARCH_solaris_x86
# include "prefetch_solaris_x86.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_solaris_sparc
# include "prefetch_solaris_sparc.inline.hpp"
#endif
// Thread::current is "hot" it's called > 128K times in the 1st 500 msecs of
// startup.

View File

@ -5005,7 +5005,11 @@ bool os::check_heap(bool force) {
// wrong; at these points, eax contains the address of the offending block (I think).
// To get to the exlicit error message(s) below, just continue twice.
HANDLE heap = GetProcessHeap();
{ HeapLock(heap);
// If we fail to lock the heap, then gflags.exe has been used
// or some other special heap flag has been set that prevents
// locking. We don't try to walk a heap we can't lock.
if (HeapLock(heap) != 0) {
PROCESS_HEAP_ENTRY phe;
phe.lpData = NULL;
while (HeapWalk(heap, &phe) != 0) {

View File

@ -29,12 +29,8 @@
#error "This file should only be included from thread.inline.hpp"
#endif
#include "runtime/prefetch.hpp"
#include "runtime/thread.hpp"
#include "runtime/threadLocalStorage.hpp"
#ifdef TARGET_OS_ARCH_windows_x86
# include "prefetch_windows_x86.inline.hpp"
#endif
// Contains inlined functions for class Thread and ThreadLocalStorage

View File

@ -158,6 +158,9 @@ void BCEscapeAnalyzer::clear_bits(ArgumentMap vars, VectorSet &bm) {
void BCEscapeAnalyzer::set_method_escape(ArgumentMap vars) {
clear_bits(vars, _arg_local);
if (vars.contains_allocated()) {
_allocated_escapes = true;
}
}
void BCEscapeAnalyzer::set_global_escape(ArgumentMap vars, bool merge) {

View File

@ -177,7 +177,7 @@ void ciReceiverTypeData::translate_receiver_data_from(const ProfileData* data) {
void ciTypeStackSlotEntries::translate_type_data_from(const TypeStackSlotEntries* entries) {
for (int i = 0; i < _number_of_entries; i++) {
for (int i = 0; i < number_of_entries(); i++) {
intptr_t k = entries->type(i);
TypeStackSlotEntries::set_type(i, translate_klass(k));
}
@ -242,7 +242,6 @@ ciProfileData* ciMethodData::next_data(ciProfileData* current) {
}
ciProfileData* ciMethodData::bci_to_extra_data(int bci, ciMethod* m, bool& two_free_slots) {
// bci_to_extra_data(bci) ...
DataLayout* dp = data_layout_at(data_size());
DataLayout* end = data_layout_at(data_size() + extra_data_size());
two_free_slots = false;
@ -506,6 +505,63 @@ void ciMethodData::print_impl(outputStream* st) {
ciMetadata::print_impl(st);
}
void ciMethodData::dump_replay_data_type_helper(outputStream* out, int round, int& count, ProfileData* pdata, ByteSize offset, ciKlass* k) {
if (k != NULL) {
if (round == 0) {
count++;
} else {
out->print(" %d %s", (int)(dp_to_di(pdata->dp() + in_bytes(offset)) / sizeof(intptr_t)), k->name()->as_quoted_ascii());
}
}
}
template<class T> void ciMethodData::dump_replay_data_receiver_type_helper(outputStream* out, int round, int& count, T* vdata) {
for (uint i = 0; i < vdata->row_limit(); i++) {
dump_replay_data_type_helper(out, round, count, vdata, vdata->receiver_offset(i), vdata->receiver(i));
}
}
template<class T> void ciMethodData::dump_replay_data_call_type_helper(outputStream* out, int round, int& count, T* call_type_data) {
if (call_type_data->has_arguments()) {
for (int i = 0; i < call_type_data->number_of_arguments(); i++) {
dump_replay_data_type_helper(out, round, count, call_type_data, call_type_data->argument_type_offset(i), call_type_data->valid_argument_type(i));
}
}
if (call_type_data->has_return()) {
dump_replay_data_type_helper(out, round, count, call_type_data, call_type_data->return_type_offset(), call_type_data->valid_return_type());
}
}
void ciMethodData::dump_replay_data_extra_data_helper(outputStream* out, int round, int& count) {
DataLayout* dp = data_layout_at(data_size());
DataLayout* end = data_layout_at(data_size() + extra_data_size());
for (;dp < end; dp = MethodData::next_extra(dp)) {
switch(dp->tag()) {
case DataLayout::no_tag:
case DataLayout::arg_info_data_tag:
return;
case DataLayout::bit_data_tag:
break;
case DataLayout::speculative_trap_data_tag: {
ciSpeculativeTrapData* data = new ciSpeculativeTrapData(dp);
ciMethod* m = data->method();
if (m != NULL) {
if (round == 0) {
count++;
} else {
out->print(" %d ", (int)(dp_to_di(((address)dp) + in_bytes(ciSpeculativeTrapData::method_offset())) / sizeof(intptr_t)));
m->dump_name_as_ascii(out);
}
}
break;
}
default:
fatal(err_msg("bad tag = %d", dp->tag()));
}
}
}
void ciMethodData::dump_replay_data(outputStream* out) {
ResourceMark rm;
MethodData* mdo = get_MethodData();
@ -527,7 +583,7 @@ void ciMethodData::dump_replay_data(outputStream* out) {
}
// dump the MDO data as raw data
int elements = data_size() / sizeof(intptr_t);
int elements = (data_size() + extra_data_size()) / sizeof(intptr_t);
out->print(" data %d", elements);
for (int i = 0; i < elements; i++) {
// We could use INTPTR_FORMAT here but that's a zero justified
@ -544,37 +600,35 @@ void ciMethodData::dump_replay_data(outputStream* out) {
// and emit pairs of offset and klass name so that they can be
// reconstructed at runtime. The first round counts the number of
// oop references and the second actually emits them.
int count = 0;
for (int round = 0; round < 2; round++) {
ciParametersTypeData* parameters = parameters_type_data();
for (int count = 0, round = 0; round < 2; round++) {
if (round == 1) out->print(" oops %d", count);
ProfileData* pdata = first_data();
for ( ; is_valid(pdata); pdata = next_data(pdata)) {
if (pdata->is_ReceiverTypeData()) {
ciReceiverTypeData* vdata = (ciReceiverTypeData*)pdata;
for (uint i = 0; i < vdata->row_limit(); i++) {
ciKlass* k = vdata->receiver(i);
if (k != NULL) {
if (round == 0) {
count++;
} else {
out->print(" %d %s", (int)(dp_to_di(vdata->dp() + in_bytes(vdata->receiver_offset(i))) / sizeof(intptr_t)), k->name()->as_quoted_ascii());
}
}
}
} else if (pdata->is_VirtualCallData()) {
if (pdata->is_VirtualCallData()) {
ciVirtualCallData* vdata = (ciVirtualCallData*)pdata;
for (uint i = 0; i < vdata->row_limit(); i++) {
ciKlass* k = vdata->receiver(i);
if (k != NULL) {
if (round == 0) {
count++;
} else {
out->print(" %d %s", (int)(dp_to_di(vdata->dp() + in_bytes(vdata->receiver_offset(i))) / sizeof(intptr_t)), k->name()->as_quoted_ascii());
}
}
dump_replay_data_receiver_type_helper<ciVirtualCallData>(out, round, count, vdata);
if (pdata->is_VirtualCallTypeData()) {
ciVirtualCallTypeData* call_type_data = (ciVirtualCallTypeData*)pdata;
dump_replay_data_call_type_helper<ciVirtualCallTypeData>(out, round, count, call_type_data);
}
} else if (pdata->is_ReceiverTypeData()) {
ciReceiverTypeData* vdata = (ciReceiverTypeData*)pdata;
dump_replay_data_receiver_type_helper<ciReceiverTypeData>(out, round, count, vdata);
} else if (pdata->is_CallTypeData()) {
ciCallTypeData* call_type_data = (ciCallTypeData*)pdata;
dump_replay_data_call_type_helper<ciCallTypeData>(out, round, count, call_type_data);
}
}
if (parameters != NULL) {
for (int i = 0; i < parameters->number_of_parameters(); i++) {
dump_replay_data_type_helper(out, round, count, parameters, ParametersTypeData::type_offset(i), parameters->valid_parameter_type(i));
}
}
}
for (int count = 0, round = 0; round < 2; round++) {
if (round == 1) out->print(" methods %d", count);
dump_replay_data_extra_data_helper(out, round, count);
}
out->cr();
}
@ -586,6 +640,10 @@ void ciMethodData::print() {
void ciMethodData::print_data_on(outputStream* st) {
ResourceMark rm;
ciParametersTypeData* parameters = parameters_type_data();
if (parameters != NULL) {
parameters->print_data_on(st);
}
ciProfileData* data;
for (data = first_data(); is_valid(data); data = next_data(data)) {
st->print("%d", dp_to_di(data->dp()));
@ -607,6 +665,9 @@ void ciMethodData::print_data_on(outputStream* st) {
data = new ciArgInfoData(dp);
dp = end; // ArgInfoData is at the end of extra data section.
break;
case DataLayout::speculative_trap_data_tag:
data = new ciSpeculativeTrapData(dp);
break;
default:
fatal(err_msg("unexpected tag %d", dp->tag()));
}
@ -631,7 +692,7 @@ void ciTypeEntries::print_ciklass(outputStream* st, intptr_t k) {
}
void ciTypeStackSlotEntries::print_data_on(outputStream* st) const {
for (int i = 0; i < _number_of_entries; i++) {
for (int i = 0; i < number_of_entries(); i++) {
_pd->tab(st);
st->print("%d: stack (%u) ", i, stack_slot(i));
print_ciklass(st, type(i));
@ -650,12 +711,12 @@ void ciCallTypeData::print_data_on(outputStream* st, const char* extra) const {
print_shared(st, "ciCallTypeData", extra);
if (has_arguments()) {
tab(st, true);
st->print("argument types");
st->print_cr("argument types");
args()->print_data_on(st);
}
if (has_return()) {
tab(st, true);
st->print("return type");
st->print_cr("return type");
ret()->print_data_on(st);
}
}

View File

@ -45,7 +45,7 @@ class ciArgInfoData;
class ciCallTypeData;
class ciVirtualCallTypeData;
class ciParametersTypeData;
class ciSpeculativeTrapData;;
class ciSpeculativeTrapData;
typedef ProfileData ciProfileData;
@ -175,7 +175,7 @@ public:
}
#ifndef PRODUCT
void print_data_on(outputStream* st, const char* extra) const;
void print_data_on(outputStream* st, const char* extra = NULL) const;
#endif
};
@ -202,7 +202,7 @@ public:
}
void translate_receiver_data_from(const ProfileData* data);
#ifndef PRODUCT
void print_data_on(outputStream* st, const char* extra) const;
void print_data_on(outputStream* st, const char* extra = NULL) const;
void print_receiver_data_on(outputStream* st) const;
#endif
};
@ -227,7 +227,7 @@ public:
rtd_super()->translate_receiver_data_from(data);
}
#ifndef PRODUCT
void print_data_on(outputStream* st, const char* extra) const;
void print_data_on(outputStream* st, const char* extra = NULL) const;
#endif
};
@ -289,7 +289,7 @@ public:
}
#ifndef PRODUCT
void print_data_on(outputStream* st, const char* extra) const;
void print_data_on(outputStream* st, const char* extra = NULL) const;
#endif
};
@ -338,7 +338,7 @@ public:
}
#ifndef PRODUCT
void print_data_on(outputStream* st, const char* extra) const;
void print_data_on(outputStream* st, const char* extra = NULL) const;
#endif
};
@ -349,15 +349,15 @@ public:
virtual void translate_from(const ProfileData* data);
ciMethod* method() const {
return (ciMethod*)intptr_at(method_offset);
return (ciMethod*)intptr_at(speculative_trap_method);
}
void set_method(ciMethod* m) {
set_intptr_at(method_offset, (intptr_t)m);
set_intptr_at(speculative_trap_method, (intptr_t)m);
}
#ifndef PRODUCT
void print_data_on(outputStream* st, const char* extra) const;
void print_data_on(outputStream* st, const char* extra = NULL) const;
#endif
};
@ -406,8 +406,8 @@ private:
// Coherent snapshot of original header.
MethodData _orig;
// Dedicated area dedicated to parameters. Null if no parameter
// profiling for this method.
// Area dedicated to parameters. NULL if no parameter profiling for
// this method.
DataLayout* _parameters;
ciMethodData(MethodData* md);
@ -467,6 +467,11 @@ private:
void load_extra_data();
ciProfileData* bci_to_extra_data(int bci, ciMethod* m, bool& two_free_slots);
void dump_replay_data_type_helper(outputStream* out, int round, int& count, ProfileData* pdata, ByteSize offset, ciKlass* k);
template<class T> void dump_replay_data_call_type_helper(outputStream* out, int round, int& count, T* call_type_data);
template<class T> void dump_replay_data_receiver_type_helper(outputStream* out, int round, int& count, T* call_type_data);
void dump_replay_data_extra_data_helper(outputStream* out, int round, int& count);
public:
bool is_method_data() const { return true; }

View File

@ -48,11 +48,14 @@ typedef struct _ciMethodDataRecord {
intptr_t* _data;
char* _orig_data;
jobject* _oops_handles;
int* _oops_offsets;
Klass** _classes;
Method** _methods;
int* _classes_offsets;
int* _methods_offsets;
int _data_length;
int _orig_data_length;
int _oops_length;
int _classes_length;
int _methods_length;
} ciMethodDataRecord;
typedef struct _ciMethodRecord {
@ -565,7 +568,7 @@ class CompileReplay : public StackObj {
rec->_instructions_size = parse_int("instructions_size");
}
// ciMethodData <klass> <name> <signature> <state> <current mileage> orig <length> # # ... data <length> # # ... oops <length>
// ciMethodData <klass> <name> <signature> <state> <current mileage> orig <length> # # ... data <length> # # ... oops <length> # ... methods <length>
void process_ciMethodData(TRAPS) {
Method* method = parse_method(CHECK);
if (had_error()) return;
@ -602,21 +605,34 @@ class CompileReplay : public StackObj {
if (rec->_data == NULL) {
return;
}
if (!parse_tag_and_count("oops", rec->_oops_length)) {
if (!parse_tag_and_count("oops", rec->_classes_length)) {
return;
}
rec->_oops_handles = NEW_RESOURCE_ARRAY(jobject, rec->_oops_length);
rec->_oops_offsets = NEW_RESOURCE_ARRAY(int, rec->_oops_length);
for (int i = 0; i < rec->_oops_length; i++) {
rec->_classes = NEW_RESOURCE_ARRAY(Klass*, rec->_classes_length);
rec->_classes_offsets = NEW_RESOURCE_ARRAY(int, rec->_classes_length);
for (int i = 0; i < rec->_classes_length; i++) {
int offset = parse_int("offset");
if (had_error()) {
return;
}
Klass* k = parse_klass(CHECK);
rec->_oops_offsets[i] = offset;
KlassHandle *kh = NEW_C_HEAP_OBJ(KlassHandle, mtCompiler);
::new ((void*)kh) KlassHandle(THREAD, k);
rec->_oops_handles[i] = (jobject)kh;
rec->_classes_offsets[i] = offset;
rec->_classes[i] = k;
}
if (!parse_tag_and_count("methods", rec->_methods_length)) {
return;
}
rec->_methods = NEW_RESOURCE_ARRAY(Method*, rec->_methods_length);
rec->_methods_offsets = NEW_RESOURCE_ARRAY(int, rec->_methods_length);
for (int i = 0; i < rec->_methods_length; i++) {
int offset = parse_int("offset");
if (had_error()) {
return;
}
Method* m = parse_method(CHECK);
rec->_methods_offsets[i] = offset;
rec->_methods[i] = m;
}
}
@ -1105,14 +1121,22 @@ void ciReplay::initialize(ciMethodData* m) {
m->_state = rec->_state;
m->_current_mileage = rec->_current_mileage;
if (rec->_data_length != 0) {
assert(m->_data_size == rec->_data_length * (int)sizeof(rec->_data[0]), "must agree");
assert(m->_data_size + m->_extra_data_size == rec->_data_length * (int)sizeof(rec->_data[0]) ||
m->_data_size == rec->_data_length * (int)sizeof(rec->_data[0]), "must agree");
// Write the correct ciObjects back into the profile data
ciEnv* env = ciEnv::current();
for (int i = 0; i < rec->_oops_length; i++) {
KlassHandle *h = (KlassHandle *)rec->_oops_handles[i];
*(ciMetadata**)(rec->_data + rec->_oops_offsets[i]) =
env->get_metadata((*h)());
for (int i = 0; i < rec->_classes_length; i++) {
Klass *k = rec->_classes[i];
// In case this class pointer is is tagged, preserve the tag
// bits
rec->_data[rec->_classes_offsets[i]] =
ciTypeEntries::with_status(env->get_metadata(k)->as_klass(), rec->_data[rec->_classes_offsets[i]]);
}
for (int i = 0; i < rec->_methods_length; i++) {
Method *m = rec->_methods[i];
*(ciMetadata**)(rec->_data + rec->_methods_offsets[i]) =
env->get_metadata(m);
}
// Copy the updated profile data into place as intptr_ts
#ifdef _LP64

View File

@ -2805,7 +2805,7 @@ void ClassFileParser::parse_classfile_bootstrap_methods_attribute(u4 attribute_b
"Short length on BootstrapMethods in class file %s",
CHECK);
guarantee_property(attribute_byte_length > sizeof(u2),
guarantee_property(attribute_byte_length >= sizeof(u2),
"Invalid BootstrapMethods attribute length %u in class file %s",
attribute_byte_length,
CHECK);

View File

@ -549,6 +549,8 @@ ClassLoaderData* ClassLoaderDataGraph::_head = NULL;
ClassLoaderData* ClassLoaderDataGraph::_unloading = NULL;
ClassLoaderData* ClassLoaderDataGraph::_saved_head = NULL;
bool ClassLoaderDataGraph::_should_purge = false;
// Add a new class loader data node to the list. Assign the newly created
// ClassLoaderData into the java/lang/ClassLoader object as a hidden field
ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_anonymous, TRAPS) {
@ -675,32 +677,6 @@ GrowableArray<ClassLoaderData*>* ClassLoaderDataGraph::new_clds() {
return array;
}
// For profiling and hsfind() only. Otherwise, this is unsafe (and slow). This
// is done lock free to avoid lock inversion problems. It is safe because
// new ClassLoaderData are added to the end of the CLDG, and only removed at
// safepoint. The _unloading list can be deallocated concurrently with CMS so
// this doesn't look in metaspace for classes that have been unloaded.
bool ClassLoaderDataGraph::contains(const void* x) {
if (DumpSharedSpaces) {
// There are only two metaspaces to worry about.
ClassLoaderData* ncld = ClassLoaderData::the_null_class_loader_data();
return (ncld->ro_metaspace()->contains(x) || ncld->rw_metaspace()->contains(x));
}
if (UseSharedSpaces && MetaspaceShared::is_in_shared_space(x)) {
return true;
}
for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
if (cld->metaspace_or_null() != NULL && cld->metaspace_or_null()->contains(x)) {
return true;
}
}
// Do not check unloading list because deallocation can be concurrent.
return false;
}
#ifndef PRODUCT
bool ClassLoaderDataGraph::contains_loader_data(ClassLoaderData* loader_data) {
for (ClassLoaderData* data = _head; data != NULL; data = data->next()) {
@ -759,6 +735,7 @@ bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure) {
}
void ClassLoaderDataGraph::purge() {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
ClassLoaderData* list = _unloading;
_unloading = NULL;
ClassLoaderData* next = list;

View File

@ -66,6 +66,7 @@ class ClassLoaderDataGraph : public AllStatic {
static ClassLoaderData* _unloading;
// CMS support.
static ClassLoaderData* _saved_head;
static bool _should_purge;
static ClassLoaderData* add(Handle class_loader, bool anonymous, TRAPS);
static void post_class_unload_events(void);
@ -87,12 +88,20 @@ class ClassLoaderDataGraph : public AllStatic {
static void remember_new_clds(bool remember) { _saved_head = (remember ? _head : NULL); }
static GrowableArray<ClassLoaderData*>* new_clds();
static void set_should_purge(bool b) { _should_purge = b; }
static void purge_if_needed() {
// Only purge the CLDG for CMS if concurrent sweep is complete.
if (_should_purge) {
purge();
// reset for next time.
set_should_purge(false);
}
}
static void dump_on(outputStream * const out) PRODUCT_RETURN;
static void dump() { dump_on(tty); }
static void verify();
// expensive test for pointer in metaspace for debugging
static bool contains(const void* x);
#ifndef PRODUCT
static bool contains_loader_data(ClassLoaderData* loader_data);
#endif

View File

@ -604,7 +604,6 @@ Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name,
Ticks class_load_start_time = Ticks::now();
// UseNewReflection
// Fix for 4474172; see evaluation for more details
class_loader = Handle(THREAD, java_lang_ClassLoader::non_reflection_class_loader(class_loader()));
ClassLoaderData *loader_data = register_loader(class_loader, CHECK_NULL);
@ -898,7 +897,6 @@ Klass* SystemDictionary::find(Symbol* class_name,
Handle protection_domain,
TRAPS) {
// UseNewReflection
// The result of this call should be consistent with the result
// of the call to resolve_instance_class_or_null().
// See evaluation 6790209 and 4474172 for more details.

View File

@ -390,7 +390,7 @@ public:
return k;
}
static Klass* check_klass_Opt_Only_JDK14NewRef(Klass* k) {
assert(JDK_Version::is_gte_jdk14x_version() && UseNewReflection, "JDK 1.4 only");
assert(JDK_Version::is_gte_jdk14x_version(), "JDK 1.4 only");
// despite the optional loading, if you use this it must be present:
return check_klass(k);
}

View File

@ -211,9 +211,9 @@ bool Verifier::is_eligible_for_verification(instanceKlassHandle klass, bool shou
// reflection implementation, not just those associated with
// sun/reflect/SerializationConstructorAccessor.
// NOTE: this is called too early in the bootstrapping process to be
// guarded by Universe::is_gte_jdk14x_version()/UseNewReflection.
// guarded by Universe::is_gte_jdk14x_version().
// Also for lambda generated code, gte jdk8
(!is_reflect || VerifyReflectionBytecodes));
(!is_reflect));
}
Symbol* Verifier::inference_verify(

View File

@ -1429,6 +1429,10 @@ Klass* Dependencies::check_unique_concrete_method(Klass* ctxk, Method* uniqm,
// Include m itself in the set, unless it is abstract.
// If this set has exactly one element, return that element.
Method* Dependencies::find_unique_concrete_method(Klass* ctxk, Method* m) {
// Return NULL if m is marked old; must have been a redefined method.
if (m->is_old()) {
return NULL;
}
ClassHierarchyWalker wf(m);
assert(wf.check_method_context(ctxk, m), "proper context");
wf.record_witnesses(1);

View File

@ -86,7 +86,7 @@ bool Disassembler::load_library() {
{
// Match "jvm[^/]*" in jvm_path.
const char* base = buf;
const char* p = strrchr(buf, '/');
const char* p = strrchr(buf, *os::file_separator());
if (p != NULL) lib_offset = p - base + 1;
p = strstr(p ? p : base, "jvm");
if (p != NULL) jvm_offset = p - base;
@ -111,7 +111,7 @@ bool Disassembler::load_library() {
if (_library == NULL) {
// 3. <home>/jre/lib/<arch>/hsdis-<arch>.so
buf[lib_offset - 1] = '\0';
const char* p = strrchr(buf, '/');
const char* p = strrchr(buf, *os::file_separator());
if (p != NULL) {
lib_offset = p - buf + 1;
strcpy(&buf[lib_offset], hsdis_library_name);

View File

@ -33,6 +33,7 @@
#include "memory/allocation.inline.hpp"
#include "memory/blockOffsetTable.inline.hpp"
#include "memory/resourceArea.hpp"
#include "memory/space.inline.hpp"
#include "memory/universe.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/globals.hpp"

View File

@ -6362,7 +6362,9 @@ void CMSCollector::sweep(bool asynch) {
verify_overflow_empty();
if (should_unload_classes()) {
ClassLoaderDataGraph::purge();
// Delay purge to the beginning of the next safepoint. Metaspace::contains
// requires that the virtual spaces are stable and not deleted.
ClassLoaderDataGraph::set_should_purge(true);
}
_intra_sweep_timer.stop();

View File

@ -45,6 +45,7 @@
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/prefetch.inline.hpp"
#include "services/memTracker.hpp"
// Concurrent marking bit map wrapper
@ -819,7 +820,7 @@ void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurren
// false before we start remark. At this point we should also be
// in a STW phase.
assert(!concurrent_marking_in_progress(), "invariant");
assert(_finger == _heap_end,
assert(out_of_regions(),
err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT,
p2i(_finger), p2i(_heap_end)));
update_g1_committed(true);
@ -978,7 +979,9 @@ void ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
if (concurrent()) {
SuspendibleThreadSet::leave();
}
_first_overflow_barrier_sync.enter();
bool barrier_aborted = !_first_overflow_barrier_sync.enter();
if (concurrent()) {
SuspendibleThreadSet::join();
}
@ -986,7 +989,17 @@ void ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
// more work
if (verbose_low()) {
gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id);
if (barrier_aborted) {
gclog_or_tty->print_cr("[%u] aborted first barrier", worker_id);
} else {
gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id);
}
}
if (barrier_aborted) {
// If the barrier aborted we ignore the overflow condition and
// just abort the whole marking phase as quickly as possible.
return;
}
// If we're executing the concurrent phase of marking, reset the marking
@ -1026,14 +1039,20 @@ void ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
if (concurrent()) {
SuspendibleThreadSet::leave();
}
_second_overflow_barrier_sync.enter();
bool barrier_aborted = !_second_overflow_barrier_sync.enter();
if (concurrent()) {
SuspendibleThreadSet::join();
}
// at this point everything should be re-initialized and ready to go
if (verbose_low()) {
gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id);
if (barrier_aborted) {
gclog_or_tty->print_cr("[%u] aborted second barrier", worker_id);
} else {
gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id);
}
}
}
@ -3240,6 +3259,8 @@ void ConcurrentMark::abort() {
for (uint i = 0; i < _max_worker_id; ++i) {
_tasks[i]->clear_region_fields();
}
_first_overflow_barrier_sync.abort();
_second_overflow_barrier_sync.abort();
_has_aborted = true;
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();

View File

@ -542,8 +542,12 @@ protected:
// frequently.
HeapRegion* claim_region(uint worker_id);
// It determines whether we've run out of regions to scan
bool out_of_regions() { return _finger == _heap_end; }
// It determines whether we've run out of regions to scan. Note that
// the finger can point past the heap end in case the heap was expanded
// to satisfy an allocation without doing a GC. This is fine, because all
// objects in those regions will be considered live anyway because of
// SATB guarantees (i.e. their TAMS will be equal to bottom).
bool out_of_regions() { return _finger >= _heap_end; }
// Returns the task with the given id
CMTask* task(int id) {

View File

@ -62,6 +62,7 @@
#include "memory/referenceProcessor.hpp"
#include "oops/oop.inline.hpp"
#include "oops/oop.pcgc.inline.hpp"
#include "runtime/prefetch.inline.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "runtime/vmThread.hpp"
#include "utilities/globalDefinitions.hpp"
@ -5401,7 +5402,7 @@ public:
if (_g1h->is_in_g1_reserved(p)) {
_par_scan_state->push_on_queue(p);
} else {
assert(!ClassLoaderDataGraph::contains((address)p),
assert(!Metaspace::contains((const void*)p),
err_msg("Otherwise need to call _copy_metadata_obj_cl->do_oop(p) "
PTR_FORMAT, p));
_copy_non_heap_obj_cl->do_oop(p);

View File

@ -31,6 +31,7 @@
#include "gc_implementation/g1/g1RemSet.hpp"
#include "gc_implementation/g1/g1RemSet.inline.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
#include "runtime/prefetch.inline.hpp"
/*
* This really ought to be an inline function, but apparently the C++

View File

@ -96,7 +96,15 @@ void G1SATBCardTableModRefBS::g1_mark_as_young(const MemRegion& mr) {
jbyte *const first = byte_for(mr.start());
jbyte *const last = byte_after(mr.last());
memset(first, g1_young_gen, last - first);
// Below we may use an explicit loop instead of memset() because on
// certain platforms memset() can give concurrent readers phantom zeros.
if (UseMemSetInBOT) {
memset(first, g1_young_gen, last - first);
} else {
for (jbyte* i = first; i < last; i++) {
*i = g1_young_gen;
}
}
}
#ifndef PRODUCT

View File

@ -32,6 +32,7 @@
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "memory/genOopClosures.inline.hpp"
#include "memory/iterator.hpp"
#include "memory/space.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/orderAccess.inline.hpp"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -194,23 +194,16 @@ bool RSHashTable::add_card(RegionIdx_t region_ind, CardIdx_t card_index) {
}
bool RSHashTable::get_cards(RegionIdx_t region_ind, CardIdx_t* cards) {
int ind = (int) (region_ind & capacity_mask());
int cur_ind = _buckets[ind];
SparsePRTEntry* cur;
while (cur_ind != NullEntry &&
(cur = entry(cur_ind))->r_ind() != region_ind) {
cur_ind = cur->next_index();
SparsePRTEntry* entry = get_entry(region_ind);
if (entry == NULL) {
return false;
}
if (cur_ind == NullEntry) return false;
// Otherwise...
assert(cur->r_ind() == region_ind, "Postcondition of loop + test above.");
assert(cur->num_valid_cards() > 0, "Inv");
cur->copy_cards(cards);
entry->copy_cards(cards);
return true;
}
SparsePRTEntry* RSHashTable::get_entry(RegionIdx_t region_ind) {
SparsePRTEntry* RSHashTable::get_entry(RegionIdx_t region_ind) const {
int ind = (int) (region_ind & capacity_mask());
int cur_ind = _buckets[ind];
SparsePRTEntry* cur;
@ -246,28 +239,9 @@ bool RSHashTable::delete_entry(RegionIdx_t region_ind) {
return true;
}
SparsePRTEntry*
RSHashTable::entry_for_region_ind(RegionIdx_t region_ind) const {
assert(occupied_entries() < capacity(), "Precondition");
int ind = (int) (region_ind & capacity_mask());
int cur_ind = _buckets[ind];
SparsePRTEntry* cur;
while (cur_ind != NullEntry &&
(cur = entry(cur_ind))->r_ind() != region_ind) {
cur_ind = cur->next_index();
}
if (cur_ind != NullEntry) {
assert(cur->r_ind() == region_ind, "Loop postcondition + test");
return cur;
} else {
return NULL;
}
}
SparsePRTEntry*
RSHashTable::entry_for_region_ind_create(RegionIdx_t region_ind) {
SparsePRTEntry* res = entry_for_region_ind(region_ind);
SparsePRTEntry* res = get_entry(region_ind);
if (res == NULL) {
int new_ind = alloc_entry();
assert(0 <= new_ind && (size_t)new_ind < capacity(), "There should be room.");
@ -365,7 +339,7 @@ bool RSHashTableIter::has_next(size_t& card_index) {
}
bool RSHashTable::contains_card(RegionIdx_t region_index, CardIdx_t card_index) const {
SparsePRTEntry* e = entry_for_region_ind(region_index);
SparsePRTEntry* e = get_entry(region_index);
return (e != NULL && e->contains_card(card_index));
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -119,12 +119,6 @@ class RSHashTable : public CHeapObj<mtGC> {
int _free_region;
int _free_list;
// Requires that the caller hold a lock preventing parallel modifying
// operations, and that the the table be less than completely full. If
// an entry for "region_ind" is already in the table, finds it and
// returns its address; otherwise returns "NULL."
SparsePRTEntry* entry_for_region_ind(RegionIdx_t region_ind) const;
// Requires that the caller hold a lock preventing parallel modifying
// operations, and that the the table be less than completely full. If
// an entry for "region_ind" is already in the table, finds it and
@ -158,7 +152,7 @@ public:
void add_entry(SparsePRTEntry* e);
SparsePRTEntry* get_entry(RegionIdx_t region_id);
SparsePRTEntry* get_entry(RegionIdx_t region_id) const;
void clear();

View File

@ -30,6 +30,7 @@
#include "gc_implementation/parallelScavenge/psYoungGen.hpp"
#include "oops/oop.inline.hpp"
#include "oops/oop.psgc.inline.hpp"
#include "runtime/prefetch.inline.hpp"
// Checks an individual oop for missing precise marks. Mark
// may be either dirty or newgen.

View File

@ -32,6 +32,7 @@
#include "gc_implementation/shared/markSweep.inline.hpp"
#include "gc_implementation/shared/spaceDecorator.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/prefetch.inline.hpp"
PSMarkSweepDecorator* PSMarkSweepDecorator::_destination_decorator = NULL;

View File

@ -950,7 +950,6 @@ void LinkResolver::linktime_resolve_special_method(methodHandle& resolved_method
// reflection implementation, not just those associated with
// sun/reflect/SerializationConstructorAccessor.
bool is_reflect = JDK_Version::is_gte_jdk14x_version() &&
UseNewReflection &&
klass_to_check->is_subclass_of(
SystemDictionary::reflect_MagicAccessorImpl_klass());

View File

@ -75,7 +75,7 @@ bool MetaspaceObj::is_shared() const {
}
bool MetaspaceObj::is_metaspace_object() const {
return ClassLoaderDataGraph::contains((void*)this);
return Metaspace::contains((void*)this);
}
void MetaspaceObj::print_address_on(outputStream* st) const {

View File

@ -42,6 +42,7 @@
#include "oops/instanceRefKlass.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/prefetch.inline.hpp"
#include "runtime/thread.inline.hpp"
#include "utilities/copy.hpp"
#include "utilities/globalDefinitions.hpp"

View File

@ -316,6 +316,8 @@ class VirtualSpaceNode : public CHeapObj<mtClass> {
MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
bool contains(const void* ptr) { return ptr >= low() && ptr < high(); }
size_t reserved_words() const { return _virtual_space.reserved_size() / BytesPerWord; }
size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
@ -557,6 +559,8 @@ class VirtualSpaceList : public CHeapObj<mtClass> {
void inc_virtual_space_count();
void dec_virtual_space_count();
bool contains(const void* ptr);
// Unlink empty VirtualSpaceNodes and free it.
void purge(ChunkManager* chunk_manager);
@ -641,8 +645,6 @@ class SpaceManager : public CHeapObj<mtClass> {
// Accessors
Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
void set_chunks_in_use(ChunkIndex index, Metachunk* v) {
// ensure lock-free iteration sees fully initialized node
OrderAccess::storestore();
_chunks_in_use[index] = v;
}
@ -757,8 +759,6 @@ class SpaceManager : public CHeapObj<mtClass> {
void print_on(outputStream* st) const;
void locked_print_chunks_in_use_on(outputStream* st) const;
bool contains(const void *ptr);
void verify();
void verify_chunk_size(Metachunk* chunk);
NOT_PRODUCT(void mangle_freed_chunks();)
@ -1078,6 +1078,7 @@ void ChunkManager::remove_chunk(Metachunk* chunk) {
// nodes with a 0 container_count. Remove Metachunks in
// the node from their respective freelists.
void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
assert_lock_strong(SpaceManager::expand_lock());
// Don't use a VirtualSpaceListIterator because this
// list is being changed and a straightforward use of an iterator is not safe.
@ -1111,8 +1112,8 @@ void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
}
#ifdef ASSERT
if (purged_vsl != NULL) {
// List should be stable enough to use an iterator here.
VirtualSpaceListIterator iter(virtual_space_list());
// List should be stable enough to use an iterator here.
VirtualSpaceListIterator iter(virtual_space_list());
while (iter.repeat()) {
VirtualSpaceNode* vsl = iter.get_next();
assert(vsl != purged_vsl, "Purge of vsl failed");
@ -1121,6 +1122,23 @@ void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
#endif
}
// This function looks at the mmap regions in the metaspace without locking.
// The chunks are added with store ordering and not deleted except for at
// unloading time during a safepoint.
bool VirtualSpaceList::contains(const void* ptr) {
// List should be stable enough to use an iterator here because removing virtual
// space nodes is only allowed at a safepoint.
VirtualSpaceListIterator iter(virtual_space_list());
while (iter.repeat()) {
VirtualSpaceNode* vsn = iter.get_next();
if (vsn->contains(ptr)) {
return true;
}
}
return false;
}
void VirtualSpaceList::retire_current_virtual_space() {
assert_lock_strong(SpaceManager::expand_lock());
@ -1210,6 +1228,8 @@ bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
} else {
assert(new_entry->reserved_words() == vs_word_size,
"Reserved memory size differs from requested memory size");
// ensure lock-free iteration sees fully initialized node
OrderAccess::storestore();
link_vs(new_entry);
return true;
}
@ -2434,21 +2454,6 @@ MetaWord* SpaceManager::allocate_work(size_t word_size) {
return result;
}
// This function looks at the chunks in the metaspace without locking.
// The chunks are added with store ordering and not deleted except for at
// unloading time.
bool SpaceManager::contains(const void *ptr) {
for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i))
{
Metachunk* curr = chunks_in_use(i);
while (curr != NULL) {
if (curr->contains(ptr)) return true;
curr = curr->next();
}
}
return false;
}
void SpaceManager::verify() {
// If there are blocks in the dictionary, then
// verification of chunks does not work since
@ -3538,11 +3543,15 @@ void Metaspace::print_on(outputStream* out) const {
}
bool Metaspace::contains(const void* ptr) {
if (vsm()->contains(ptr)) return true;
if (using_class_space()) {
return class_vsm()->contains(ptr);
if (UseSharedSpaces && MetaspaceShared::is_in_shared_space(ptr)) {
return true;
}
return false;
if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
return true;
}
return get_space_list(NonClassType)->contains(ptr);
}
void Metaspace::verify() {
@ -3787,5 +3796,4 @@ void TestVirtualSpaceNode_test() {
TestVirtualSpaceNodeTest::test();
TestVirtualSpaceNodeTest::test_is_available();
}
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -232,7 +232,8 @@ class Metaspace : public CHeapObj<mtClass> {
MetaWord* expand_and_allocate(size_t size,
MetadataType mdtype);
bool contains(const void* ptr);
static bool contains(const void* ptr);
void dump(outputStream* const out) const;
// Free empty virtualspaces

View File

@ -37,6 +37,7 @@
#include "oops/oop.inline.hpp"
#include "oops/oop.inline2.hpp"
#include "runtime/java.hpp"
#include "runtime/prefetch.inline.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "runtime/safepoint.hpp"
#include "utilities/copy.hpp"

View File

@ -33,24 +33,8 @@
#include "memory/watermark.hpp"
#include "oops/markOop.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/prefetch.hpp"
#include "utilities/macros.hpp"
#include "utilities/workgroup.hpp"
#ifdef TARGET_OS_FAMILY_linux
# include "os_linux.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_solaris
# include "os_solaris.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_windows
# include "os_windows.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_aix
# include "os_aix.inline.hpp"
#endif
#ifdef TARGET_OS_FAMILY_bsd
# include "os_bsd.inline.hpp"
#endif
// A space is an abstraction for the "storage units" backing
// up the generation abstraction. It includes specific
@ -468,272 +452,6 @@ protected:
size_t word_len);
};
#define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) { \
/* Compute the new addresses for the live objects and store it in the mark \
* Used by universe::mark_sweep_phase2() \
*/ \
HeapWord* compact_top; /* This is where we are currently compacting to. */ \
\
/* We're sure to be here before any objects are compacted into this \
* space, so this is a good time to initialize this: \
*/ \
set_compaction_top(bottom()); \
\
if (cp->space == NULL) { \
assert(cp->gen != NULL, "need a generation"); \
assert(cp->threshold == NULL, "just checking"); \
assert(cp->gen->first_compaction_space() == this, "just checking"); \
cp->space = cp->gen->first_compaction_space(); \
compact_top = cp->space->bottom(); \
cp->space->set_compaction_top(compact_top); \
cp->threshold = cp->space->initialize_threshold(); \
} else { \
compact_top = cp->space->compaction_top(); \
} \
\
/* We allow some amount of garbage towards the bottom of the space, so \
* we don't start compacting before there is a significant gain to be made.\
* Occasionally, we want to ensure a full compaction, which is determined \
* by the MarkSweepAlwaysCompactCount parameter. \
*/ \
uint invocations = MarkSweep::total_invocations(); \
bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0); \
\
size_t allowed_deadspace = 0; \
if (skip_dead) { \
const size_t ratio = allowed_dead_ratio(); \
allowed_deadspace = (capacity() * ratio / 100) / HeapWordSize; \
} \
\
HeapWord* q = bottom(); \
HeapWord* t = scan_limit(); \
\
HeapWord* end_of_live= q; /* One byte beyond the last byte of the last \
live object. */ \
HeapWord* first_dead = end();/* The first dead object. */ \
LiveRange* liveRange = NULL; /* The current live range, recorded in the \
first header of preceding free area. */ \
_first_dead = first_dead; \
\
const intx interval = PrefetchScanIntervalInBytes; \
\
while (q < t) { \
assert(!block_is_obj(q) || \
oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() || \
oop(q)->mark()->has_bias_pattern(), \
"these are the only valid states during a mark sweep"); \
if (block_is_obj(q) && oop(q)->is_gc_marked()) { \
/* prefetch beyond q */ \
Prefetch::write(q, interval); \
size_t size = block_size(q); \
compact_top = cp->space->forward(oop(q), size, cp, compact_top); \
q += size; \
end_of_live = q; \
} else { \
/* run over all the contiguous dead objects */ \
HeapWord* end = q; \
do { \
/* prefetch beyond end */ \
Prefetch::write(end, interval); \
end += block_size(end); \
} while (end < t && (!block_is_obj(end) || !oop(end)->is_gc_marked()));\
\
/* see if we might want to pretend this object is alive so that \
* we don't have to compact quite as often. \
*/ \
if (allowed_deadspace > 0 && q == compact_top) { \
size_t sz = pointer_delta(end, q); \
if (insert_deadspace(allowed_deadspace, q, sz)) { \
compact_top = cp->space->forward(oop(q), sz, cp, compact_top); \
q = end; \
end_of_live = end; \
continue; \
} \
} \
\
/* otherwise, it really is a free region. */ \
\
/* for the previous LiveRange, record the end of the live objects. */ \
if (liveRange) { \
liveRange->set_end(q); \
} \
\
/* record the current LiveRange object. \
* liveRange->start() is overlaid on the mark word. \
*/ \
liveRange = (LiveRange*)q; \
liveRange->set_start(end); \
liveRange->set_end(end); \
\
/* see if this is the first dead region. */ \
if (q < first_dead) { \
first_dead = q; \
} \
\
/* move on to the next object */ \
q = end; \
} \
} \
\
assert(q == t, "just checking"); \
if (liveRange != NULL) { \
liveRange->set_end(q); \
} \
_end_of_live = end_of_live; \
if (end_of_live < first_dead) { \
first_dead = end_of_live; \
} \
_first_dead = first_dead; \
\
/* save the compaction_top of the compaction space. */ \
cp->space->set_compaction_top(compact_top); \
}
#define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) { \
/* adjust all the interior pointers to point at the new locations of objects \
* Used by MarkSweep::mark_sweep_phase3() */ \
\
HeapWord* q = bottom(); \
HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ \
\
assert(_first_dead <= _end_of_live, "Stands to reason, no?"); \
\
if (q < t && _first_dead > q && \
!oop(q)->is_gc_marked()) { \
/* we have a chunk of the space which hasn't moved and we've \
* reinitialized the mark word during the previous pass, so we can't \
* use is_gc_marked for the traversal. */ \
HeapWord* end = _first_dead; \
\
while (q < end) { \
/* I originally tried to conjoin "block_start(q) == q" to the \
* assertion below, but that doesn't work, because you can't \
* accurately traverse previous objects to get to the current one \
* after their pointers have been \
* updated, until the actual compaction is done. dld, 4/00 */ \
assert(block_is_obj(q), \
"should be at block boundaries, and should be looking at objs"); \
\
/* point all the oops to the new location */ \
size_t size = oop(q)->adjust_pointers(); \
size = adjust_obj_size(size); \
\
q += size; \
} \
\
if (_first_dead == t) { \
q = t; \
} else { \
/* $$$ This is funky. Using this to read the previously written \
* LiveRange. See also use below. */ \
q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \
} \
} \
\
const intx interval = PrefetchScanIntervalInBytes; \
\
debug_only(HeapWord* prev_q = NULL); \
while (q < t) { \
/* prefetch beyond q */ \
Prefetch::write(q, interval); \
if (oop(q)->is_gc_marked()) { \
/* q is alive */ \
/* point all the oops to the new location */ \
size_t size = oop(q)->adjust_pointers(); \
size = adjust_obj_size(size); \
debug_only(prev_q = q); \
q += size; \
} else { \
/* q is not a live object, so its mark should point at the next \
* live object */ \
debug_only(prev_q = q); \
q = (HeapWord*) oop(q)->mark()->decode_pointer(); \
assert(q > prev_q, "we should be moving forward through memory"); \
} \
} \
\
assert(q == t, "just checking"); \
}
#define SCAN_AND_COMPACT(obj_size) { \
/* Copy all live objects to their new location \
* Used by MarkSweep::mark_sweep_phase4() */ \
\
HeapWord* q = bottom(); \
HeapWord* const t = _end_of_live; \
debug_only(HeapWord* prev_q = NULL); \
\
if (q < t && _first_dead > q && \
!oop(q)->is_gc_marked()) { \
debug_only( \
/* we have a chunk of the space which hasn't moved and we've reinitialized \
* the mark word during the previous pass, so we can't use is_gc_marked for \
* the traversal. */ \
HeapWord* const end = _first_dead; \
\
while (q < end) { \
size_t size = obj_size(q); \
assert(!oop(q)->is_gc_marked(), \
"should be unmarked (special dense prefix handling)"); \
debug_only(prev_q = q); \
q += size; \
} \
) /* debug_only */ \
\
if (_first_dead == t) { \
q = t; \
} else { \
/* $$$ Funky */ \
q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \
} \
} \
\
const intx scan_interval = PrefetchScanIntervalInBytes; \
const intx copy_interval = PrefetchCopyIntervalInBytes; \
while (q < t) { \
if (!oop(q)->is_gc_marked()) { \
/* mark is pointer to next marked oop */ \
debug_only(prev_q = q); \
q = (HeapWord*) oop(q)->mark()->decode_pointer(); \
assert(q > prev_q, "we should be moving forward through memory"); \
} else { \
/* prefetch beyond q */ \
Prefetch::read(q, scan_interval); \
\
/* size and destination */ \
size_t size = obj_size(q); \
HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \
\
/* prefetch beyond compaction_top */ \
Prefetch::write(compaction_top, copy_interval); \
\
/* copy object and reinit its mark */ \
assert(q != compaction_top, "everything in this pass should be moving"); \
Copy::aligned_conjoint_words(q, compaction_top, size); \
oop(compaction_top)->init_mark(); \
assert(oop(compaction_top)->klass() != NULL, "should have a class"); \
\
debug_only(prev_q = q); \
q += size; \
} \
} \
\
/* Let's remember if we were empty before we did the compaction. */ \
bool was_empty = used_region().is_empty(); \
/* Reset space after compaction is complete */ \
reset_after_compaction(); \
/* We do this clear, below, since it has overloaded meanings for some */ \
/* space subtypes. For example, OffsetTableContigSpace's that were */ \
/* compacted into will have had their offset table thresholds updated */ \
/* continuously, but those that weren't need to have their thresholds */ \
/* re-initialized. Also mangles unused area for debugging. */ \
if (used_region().is_empty()) { \
if (!was_empty) clear(SpaceDecorator::Mangle); \
} else { \
if (ZapUnusedHeapArea) mangle_unused_area(); \
} \
}
class GenSpaceMangler;
// A space in which the free area is contiguous. It therefore supports

View File

@ -28,12 +28,279 @@
#include "gc_interface/collectedHeap.hpp"
#include "memory/space.hpp"
#include "memory/universe.hpp"
#include "runtime/prefetch.inline.hpp"
#include "runtime/safepoint.hpp"
inline HeapWord* Space::block_start(const void* p) {
return block_start_const(p);
}
#define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) { \
/* Compute the new addresses for the live objects and store it in the mark \
* Used by universe::mark_sweep_phase2() \
*/ \
HeapWord* compact_top; /* This is where we are currently compacting to. */ \
\
/* We're sure to be here before any objects are compacted into this \
* space, so this is a good time to initialize this: \
*/ \
set_compaction_top(bottom()); \
\
if (cp->space == NULL) { \
assert(cp->gen != NULL, "need a generation"); \
assert(cp->threshold == NULL, "just checking"); \
assert(cp->gen->first_compaction_space() == this, "just checking"); \
cp->space = cp->gen->first_compaction_space(); \
compact_top = cp->space->bottom(); \
cp->space->set_compaction_top(compact_top); \
cp->threshold = cp->space->initialize_threshold(); \
} else { \
compact_top = cp->space->compaction_top(); \
} \
\
/* We allow some amount of garbage towards the bottom of the space, so \
* we don't start compacting before there is a significant gain to be made.\
* Occasionally, we want to ensure a full compaction, which is determined \
* by the MarkSweepAlwaysCompactCount parameter. \
*/ \
uint invocations = MarkSweep::total_invocations(); \
bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0); \
\
size_t allowed_deadspace = 0; \
if (skip_dead) { \
const size_t ratio = allowed_dead_ratio(); \
allowed_deadspace = (capacity() * ratio / 100) / HeapWordSize; \
} \
\
HeapWord* q = bottom(); \
HeapWord* t = scan_limit(); \
\
HeapWord* end_of_live= q; /* One byte beyond the last byte of the last \
live object. */ \
HeapWord* first_dead = end();/* The first dead object. */ \
LiveRange* liveRange = NULL; /* The current live range, recorded in the \
first header of preceding free area. */ \
_first_dead = first_dead; \
\
const intx interval = PrefetchScanIntervalInBytes; \
\
while (q < t) { \
assert(!block_is_obj(q) || \
oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() || \
oop(q)->mark()->has_bias_pattern(), \
"these are the only valid states during a mark sweep"); \
if (block_is_obj(q) && oop(q)->is_gc_marked()) { \
/* prefetch beyond q */ \
Prefetch::write(q, interval); \
size_t size = block_size(q); \
compact_top = cp->space->forward(oop(q), size, cp, compact_top); \
q += size; \
end_of_live = q; \
} else { \
/* run over all the contiguous dead objects */ \
HeapWord* end = q; \
do { \
/* prefetch beyond end */ \
Prefetch::write(end, interval); \
end += block_size(end); \
} while (end < t && (!block_is_obj(end) || !oop(end)->is_gc_marked()));\
\
/* see if we might want to pretend this object is alive so that \
* we don't have to compact quite as often. \
*/ \
if (allowed_deadspace > 0 && q == compact_top) { \
size_t sz = pointer_delta(end, q); \
if (insert_deadspace(allowed_deadspace, q, sz)) { \
compact_top = cp->space->forward(oop(q), sz, cp, compact_top); \
q = end; \
end_of_live = end; \
continue; \
} \
} \
\
/* otherwise, it really is a free region. */ \
\
/* for the previous LiveRange, record the end of the live objects. */ \
if (liveRange) { \
liveRange->set_end(q); \
} \
\
/* record the current LiveRange object. \
* liveRange->start() is overlaid on the mark word. \
*/ \
liveRange = (LiveRange*)q; \
liveRange->set_start(end); \
liveRange->set_end(end); \
\
/* see if this is the first dead region. */ \
if (q < first_dead) { \
first_dead = q; \
} \
\
/* move on to the next object */ \
q = end; \
} \
} \
\
assert(q == t, "just checking"); \
if (liveRange != NULL) { \
liveRange->set_end(q); \
} \
_end_of_live = end_of_live; \
if (end_of_live < first_dead) { \
first_dead = end_of_live; \
} \
_first_dead = first_dead; \
\
/* save the compaction_top of the compaction space. */ \
cp->space->set_compaction_top(compact_top); \
}
#define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) { \
/* adjust all the interior pointers to point at the new locations of objects \
* Used by MarkSweep::mark_sweep_phase3() */ \
\
HeapWord* q = bottom(); \
HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ \
\
assert(_first_dead <= _end_of_live, "Stands to reason, no?"); \
\
if (q < t && _first_dead > q && \
!oop(q)->is_gc_marked()) { \
/* we have a chunk of the space which hasn't moved and we've \
* reinitialized the mark word during the previous pass, so we can't \
* use is_gc_marked for the traversal. */ \
HeapWord* end = _first_dead; \
\
while (q < end) { \
/* I originally tried to conjoin "block_start(q) == q" to the \
* assertion below, but that doesn't work, because you can't \
* accurately traverse previous objects to get to the current one \
* after their pointers have been \
* updated, until the actual compaction is done. dld, 4/00 */ \
assert(block_is_obj(q), \
"should be at block boundaries, and should be looking at objs"); \
\
/* point all the oops to the new location */ \
size_t size = oop(q)->adjust_pointers(); \
size = adjust_obj_size(size); \
\
q += size; \
} \
\
if (_first_dead == t) { \
q = t; \
} else { \
/* $$$ This is funky. Using this to read the previously written \
* LiveRange. See also use below. */ \
q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \
} \
} \
\
const intx interval = PrefetchScanIntervalInBytes; \
\
debug_only(HeapWord* prev_q = NULL); \
while (q < t) { \
/* prefetch beyond q */ \
Prefetch::write(q, interval); \
if (oop(q)->is_gc_marked()) { \
/* q is alive */ \
/* point all the oops to the new location */ \
size_t size = oop(q)->adjust_pointers(); \
size = adjust_obj_size(size); \
debug_only(prev_q = q); \
q += size; \
} else { \
/* q is not a live object, so its mark should point at the next \
* live object */ \
debug_only(prev_q = q); \
q = (HeapWord*) oop(q)->mark()->decode_pointer(); \
assert(q > prev_q, "we should be moving forward through memory"); \
} \
} \
\
assert(q == t, "just checking"); \
}
#define SCAN_AND_COMPACT(obj_size) { \
/* Copy all live objects to their new location \
* Used by MarkSweep::mark_sweep_phase4() */ \
\
HeapWord* q = bottom(); \
HeapWord* const t = _end_of_live; \
debug_only(HeapWord* prev_q = NULL); \
\
if (q < t && _first_dead > q && \
!oop(q)->is_gc_marked()) { \
debug_only( \
/* we have a chunk of the space which hasn't moved and we've reinitialized \
* the mark word during the previous pass, so we can't use is_gc_marked for \
* the traversal. */ \
HeapWord* const end = _first_dead; \
\
while (q < end) { \
size_t size = obj_size(q); \
assert(!oop(q)->is_gc_marked(), \
"should be unmarked (special dense prefix handling)"); \
debug_only(prev_q = q); \
q += size; \
} \
) /* debug_only */ \
\
if (_first_dead == t) { \
q = t; \
} else { \
/* $$$ Funky */ \
q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \
} \
} \
\
const intx scan_interval = PrefetchScanIntervalInBytes; \
const intx copy_interval = PrefetchCopyIntervalInBytes; \
while (q < t) { \
if (!oop(q)->is_gc_marked()) { \
/* mark is pointer to next marked oop */ \
debug_only(prev_q = q); \
q = (HeapWord*) oop(q)->mark()->decode_pointer(); \
assert(q > prev_q, "we should be moving forward through memory"); \
} else { \
/* prefetch beyond q */ \
Prefetch::read(q, scan_interval); \
\
/* size and destination */ \
size_t size = obj_size(q); \
HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \
\
/* prefetch beyond compaction_top */ \
Prefetch::write(compaction_top, copy_interval); \
\
/* copy object and reinit its mark */ \
assert(q != compaction_top, "everything in this pass should be moving"); \
Copy::aligned_conjoint_words(q, compaction_top, size); \
oop(compaction_top)->init_mark(); \
assert(oop(compaction_top)->klass() != NULL, "should have a class"); \
\
debug_only(prev_q = q); \
q += size; \
} \
} \
\
/* Let's remember if we were empty before we did the compaction. */ \
bool was_empty = used_region().is_empty(); \
/* Reset space after compaction is complete */ \
reset_after_compaction(); \
/* We do this clear, below, since it has overloaded meanings for some */ \
/* space subtypes. For example, OffsetTableContigSpace's that were */ \
/* compacted into will have had their offset table thresholds updated */ \
/* continuously, but those that weren't need to have their thresholds */ \
/* re-initialized. Also mangles unused area for debugging. */ \
if (used_region().is_empty()) { \
if (!was_empty) clear(SpaceDecorator::Mangle); \
} else { \
if (ZapUnusedHeapArea) mangle_unused_area(); \
} \
}
inline HeapWord* OffsetTableContigSpace::allocate(size_t size) {
HeapWord* res = ContiguousSpace::allocate(size);
if (res != NULL) {

View File

@ -498,9 +498,10 @@ bool ConstantPoolCacheEntry::check_no_old_or_obsolete_entries() {
// _f1 == NULL || !_f1->is_method() are OK here
return true;
}
// return false if _f1 refers to an old or an obsolete method
// return false if _f1 refers to a non-deleted old or obsolete method
return (NOT_PRODUCT(_f1->is_valid() &&) _f1->is_method() &&
!((Method*)_f1)->is_old() && !((Method*)_f1)->is_obsolete());
(f1_as_method()->is_deleted() ||
(!f1_as_method()->is_old() && !f1_as_method()->is_obsolete())));
}
bool ConstantPoolCacheEntry::is_interesting_method_entry(Klass* k) {

View File

@ -640,7 +640,7 @@ void Klass::verify_on(outputStream* st) {
// This can be expensive, but it is worth checking that this klass is actually
// in the CLD graph but not in production.
assert(ClassLoaderDataGraph::contains((address)this), "Should be");
assert(Metaspace::contains((address)this), "Should be");
guarantee(this->is_klass(),"should be klass");

View File

@ -555,36 +555,6 @@ class Klass : public Metadata {
static void clean_weak_klass_links(BoolObjectClosure* is_alive);
// Prefetch within oop iterators. This is a macro because we
// can't guarantee that the compiler will inline it. In 64-bit
// it generally doesn't. Signature is
//
// static void prefetch_beyond(oop* const start,
// oop* const end,
// const intx foffset,
// const Prefetch::style pstyle);
#define prefetch_beyond(start, end, foffset, pstyle) { \
const intx foffset_ = (foffset); \
const Prefetch::style pstyle_ = (pstyle); \
assert(foffset_ > 0, "prefetch beyond, not behind"); \
if (pstyle_ != Prefetch::do_none) { \
oop* ref = (start); \
if (ref < (end)) { \
switch (pstyle_) { \
case Prefetch::do_read: \
Prefetch::read(*ref, foffset_); \
break; \
case Prefetch::do_write: \
Prefetch::write(*ref, foffset_); \
break; \
default: \
ShouldNotReachHere(); \
break; \
} \
} \
} \
}
// iterators
virtual int oop_oop_iterate(oop obj, ExtendedOopClosure* blk) = 0;
virtual int oop_oop_iterate_v(oop obj, ExtendedOopClosure* blk) {

View File

@ -1019,13 +1019,11 @@ bool Method::should_not_be_cached() const {
* security related stack walks (like Reflection.getCallerClass).
*/
bool Method::is_ignored_by_security_stack_walk() const {
const bool use_new_reflection = JDK_Version::is_gte_jdk14x_version() && UseNewReflection;
if (intrinsic_id() == vmIntrinsics::_invoke) {
// This is Method.invoke() -- ignore it
return true;
}
if (use_new_reflection &&
if (JDK_Version::is_gte_jdk14x_version() &&
method_holder()->is_subclass_of(SystemDictionary::reflect_MethodAccessorImpl_klass())) {
// This is an auxilary frame -- ignore it
return true;
@ -1868,6 +1866,14 @@ void Method::clear_jmethod_ids(ClassLoaderData* loader_data) {
loader_data->jmethod_ids()->clear_all_methods();
}
bool Method::has_method_vptr(const void* ptr) {
Method m;
// This assumes that the vtbl pointer is the first word of a C++ object.
// This assumption is also in universe.cpp patch_klass_vtble
void* vtbl2 = dereference_vptr((const void*)&m);
void* this_vtbl = dereference_vptr(ptr);
return vtbl2 == this_vtbl;
}
// Check that this pointer is valid by checking that the vtbl pointer matches
bool Method::is_valid_method() const {
@ -1876,12 +1882,7 @@ bool Method::is_valid_method() const {
} else if (!is_metaspace_object()) {
return false;
} else {
Method m;
// This assumes that the vtbl pointer is the first word of a C++ object.
// This assumption is also in universe.cpp patch_klass_vtble
void* vtbl2 = dereference_vptr((void*)&m);
void* this_vtbl = dereference_vptr((void*)this);
return vtbl2 == this_vtbl;
return has_method_vptr((const void*)this);
}
}

View File

@ -684,6 +684,8 @@ class Method : public Metadata {
void set_is_old() { _access_flags.set_is_old(); }
bool is_obsolete() const { return access_flags().is_obsolete(); }
void set_is_obsolete() { _access_flags.set_is_obsolete(); }
bool is_deleted() const { return access_flags().is_deleted(); }
void set_is_deleted() { _access_flags.set_is_deleted(); }
bool on_stack() const { return access_flags().on_stack(); }
void set_on_stack(const bool value);
@ -876,6 +878,7 @@ class Method : public Metadata {
const char* internal_name() const { return "{method}"; }
// Check for valid method pointer
static bool has_method_vptr(const void* ptr);
bool is_valid_method() const;
// Verify

View File

@ -851,11 +851,10 @@ private:
return _base_off + stack_slot_local_offset(i);
}
protected:
const int _number_of_entries;
// offset of cell for type for entry i within ProfileData object
int type_offset(int i) const {
int type_offset_in_cells(int i) const {
return _base_off + type_local_offset(i);
}
@ -868,6 +867,8 @@ public:
void post_initialize(Symbol* signature, bool has_receiver, bool include_receiver);
int number_of_entries() const { return _number_of_entries; }
// offset of cell for stack slot for entry i within this block of cells for a TypeStackSlotEntries
static int stack_slot_local_offset(int i) {
return i * per_arg_cell_count + stack_slot_entry;
@ -893,13 +894,13 @@ public:
// type for entry i
intptr_t type(int i) const {
assert(i >= 0 && i < _number_of_entries, "oob");
return _pd->intptr_at(type_offset(i));
return _pd->intptr_at(type_offset_in_cells(i));
}
// set type for entry i
void set_type(int i, intptr_t k) {
assert(i >= 0 && i < _number_of_entries, "oob");
_pd->set_intptr_at(type_offset(i), k);
_pd->set_intptr_at(type_offset_in_cells(i), k);
}
static ByteSize per_arg_size() {
@ -907,7 +908,11 @@ public:
}
static int per_arg_count() {
return per_arg_cell_count ;
return per_arg_cell_count;
}
ByteSize type_offset(int i) const {
return DataLayout::cell_offset(type_offset_in_cells(i));
}
// GC support
@ -973,7 +978,7 @@ private:
}
static int argument_type_local_offset(int i) {
return header_cell_count() + TypeStackSlotEntries::type_local_offset(i);;
return header_cell_count() + TypeStackSlotEntries::type_local_offset(i);
}
public:
@ -1129,6 +1134,14 @@ public:
return cell_offset(CounterData::static_cell_count()) + TypeEntriesAtCall::args_data_offset();
}
ByteSize argument_type_offset(int i) {
return _args.type_offset(i);
}
ByteSize return_type_offset() {
return _ret.type_offset();
}
// GC support
virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure) {
if (has_arguments()) {
@ -1436,6 +1449,14 @@ public:
return cell_offset(VirtualCallData::static_cell_count()) + TypeEntriesAtCall::args_data_offset();
}
ByteSize argument_type_offset(int i) {
return _args.type_offset(i);
}
ByteSize return_type_offset() {
return _ret.type_offset();
}
// GC support
virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure) {
ReceiverTypeData::clean_weak_klass_links(is_alive_closure);
@ -1926,7 +1947,7 @@ public:
class SpeculativeTrapData : public ProfileData {
protected:
enum {
method_offset,
speculative_trap_method,
speculative_trap_cell_count
};
public:
@ -1946,11 +1967,15 @@ public:
// Direct accessor
Method* method() const {
return (Method*)intptr_at(method_offset);
return (Method*)intptr_at(speculative_trap_method);
}
void set_method(Method* m) {
set_intptr_at(method_offset, (intptr_t)m);
set_intptr_at(speculative_trap_method, (intptr_t)m);
}
static ByteSize method_offset() {
return cell_offset(speculative_trap_method);
}
virtual void print_data_on(outputStream* st, const char* extra = NULL) const;

View File

@ -391,7 +391,7 @@ void LateInlineCallGenerator::do_late_inline() {
}
// Setup default node notes to be picked up by the inlining
Node_Notes* old_nn = C->default_node_notes();
Node_Notes* old_nn = C->node_notes_at(call->_idx);
if (old_nn != NULL) {
Node_Notes* entry_nn = old_nn->clone(C);
entry_nn->set_jvms(jvms);

View File

@ -364,7 +364,7 @@ bool Compile::should_delay_string_inlining(ciMethod* call_method, JVMState* jvms
bool Compile::should_delay_boxing_inlining(ciMethod* call_method, JVMState* jvms) {
if (eliminate_boxing() && call_method->is_boxing_method()) {
set_has_boxed_value(true);
return true;
return aggressive_unboxing();
}
return false;
}

View File

@ -673,7 +673,7 @@ const TypeInt* IfNode::filtered_int_type(PhaseGVN* gvn, Node *val, Node* if_proj
// / Region
//
Node* IfNode::fold_compares(PhaseGVN* phase) {
if (!phase->C->eliminate_boxing() || Opcode() != Op_If) return NULL;
if (Opcode() != Op_If) return NULL;
Node* this_cmp = in(1)->in(1);
if (this_cmp != NULL && this_cmp->Opcode() == Op_CmpI &&

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -420,7 +420,6 @@ CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
return NULL;
case vmIntrinsics::_getCallerClass:
if (!UseNewReflection) return NULL;
if (!InlineReflectionGetCallerClass) return NULL;
if (SystemDictionary::reflect_CallerSensitive_klass() == NULL) return NULL;
break;

View File

@ -1393,6 +1393,15 @@ void PhaseIterGVN::add_users_to_worklist( Node *n ) {
_worklist.push(u);
}
}
// If changed AddI/SubI inputs, check CmpU for range check optimization.
if (use_op == Op_AddI || use_op == Op_SubI) {
for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
Node* u = use->fast_out(i2);
if (u->is_Cmp() && (u->Opcode() == Op_CmpU)) {
_worklist.push(u);
}
}
}
// If changed AddP inputs, check Stores for loop invariant
if( use_op == Op_AddP ) {
for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {

View File

@ -80,7 +80,7 @@ Node *SubNode::Identity( PhaseTransform *phase ) {
//------------------------------Value------------------------------------------
// A subtract node differences it's two inputs.
const Type *SubNode::Value( PhaseTransform *phase ) const {
const Type* SubNode::Value_common(PhaseTransform *phase) const {
const Node* in1 = in(1);
const Node* in2 = in(2);
// Either input is TOP ==> the result is TOP
@ -97,6 +97,16 @@ const Type *SubNode::Value( PhaseTransform *phase ) const {
if( t1 == Type::BOTTOM || t2 == Type::BOTTOM )
return bottom_type();
return NULL;
}
const Type* SubNode::Value(PhaseTransform *phase) const {
const Type* t = Value_common(phase);
if (t != NULL) {
return t;
}
const Type* t1 = phase->type(in(1));
const Type* t2 = phase->type(in(2));
return sub(t1,t2); // Local flavor of type subtraction
}
@ -570,6 +580,81 @@ const Type *CmpUNode::sub( const Type *t1, const Type *t2 ) const {
return TypeInt::CC; // else use worst case results
}
const Type* CmpUNode::Value(PhaseTransform *phase) const {
const Type* t = SubNode::Value_common(phase);
if (t != NULL) {
return t;
}
const Node* in1 = in(1);
const Node* in2 = in(2);
const Type* t1 = phase->type(in1);
const Type* t2 = phase->type(in2);
assert(t1->isa_int(), "CmpU has only Int type inputs");
if (t2 == TypeInt::INT) { // Compare to bottom?
return bottom_type();
}
uint in1_op = in1->Opcode();
if (in1_op == Op_AddI || in1_op == Op_SubI) {
// The problem rise when result of AddI(SubI) may overflow
// signed integer value. Let say the input type is
// [256, maxint] then +128 will create 2 ranges due to
// overflow: [minint, minint+127] and [384, maxint].
// But C2 type system keep only 1 type range and as result
// it use general [minint, maxint] for this case which we
// can't optimize.
//
// Make 2 separate type ranges based on types of AddI(SubI) inputs
// and compare results of their compare. If results are the same
// CmpU node can be optimized.
const Node* in11 = in1->in(1);
const Node* in12 = in1->in(2);
const Type* t11 = (in11 == in1) ? Type::TOP : phase->type(in11);
const Type* t12 = (in12 == in1) ? Type::TOP : phase->type(in12);
// Skip cases when input types are top or bottom.
if ((t11 != Type::TOP) && (t11 != TypeInt::INT) &&
(t12 != Type::TOP) && (t12 != TypeInt::INT)) {
const TypeInt *r0 = t11->is_int();
const TypeInt *r1 = t12->is_int();
jlong lo_r0 = r0->_lo;
jlong hi_r0 = r0->_hi;
jlong lo_r1 = r1->_lo;
jlong hi_r1 = r1->_hi;
if (in1_op == Op_SubI) {
jlong tmp = hi_r1;
hi_r1 = -lo_r1;
lo_r1 = -tmp;
// Note, for substructing [minint,x] type range
// long arithmetic provides correct overflow answer.
// The confusion come from the fact that in 32-bit
// -minint == minint but in 64-bit -minint == maxint+1.
}
jlong lo_long = lo_r0 + lo_r1;
jlong hi_long = hi_r0 + hi_r1;
int lo_tr1 = min_jint;
int hi_tr1 = (int)hi_long;
int lo_tr2 = (int)lo_long;
int hi_tr2 = max_jint;
bool underflow = lo_long != (jlong)lo_tr2;
bool overflow = hi_long != (jlong)hi_tr1;
// Use sub(t1, t2) when there is no overflow (one type range)
// or when both overflow and underflow (too complex).
if ((underflow != overflow) && (hi_tr1 < lo_tr2)) {
// Overflow only on one boundary, compare 2 separate type ranges.
int w = MAX2(r0->_widen, r1->_widen); // _widen does not matter here
const TypeInt* tr1 = TypeInt::make(lo_tr1, hi_tr1, w);
const TypeInt* tr2 = TypeInt::make(lo_tr2, hi_tr2, w);
const Type* cmp1 = sub(tr1, t2);
const Type* cmp2 = sub(tr2, t2);
if (cmp1 == cmp2) {
return cmp1; // Hit!
}
}
}
}
return sub(t1, t2); // Local flavor of type subtraction
}
bool CmpUNode::is_index_range_check() const {
// Check for the "(X ModI Y) CmpU Y" shape
return (in(1)->Opcode() == Op_ModI &&

View File

@ -50,6 +50,7 @@ public:
// Compute a new Type for this node. Basically we just do the pre-check,
// then call the virtual add() to set the type.
virtual const Type *Value( PhaseTransform *phase ) const;
const Type* Value_common( PhaseTransform *phase ) const;
// Supplied function returns the subtractend of the inputs.
// This also type-checks the inputs for sanity. Guaranteed never to
@ -158,6 +159,7 @@ public:
CmpUNode( Node *in1, Node *in2 ) : CmpNode(in1,in2) {}
virtual int Opcode() const;
virtual const Type *sub( const Type *, const Type * ) const;
const Type *Value( PhaseTransform *phase ) const;
bool is_index_range_check() const;
};

View File

@ -199,6 +199,7 @@
# include "runtime/perfData.hpp"
# include "runtime/perfMemory.hpp"
# include "runtime/prefetch.hpp"
# include "runtime/prefetch.inline.hpp"
# include "runtime/reflection.hpp"
# include "runtime/reflectionUtils.hpp"
# include "runtime/registerMap.hpp"

View File

@ -544,7 +544,7 @@ JNI_ENTRY(jobject, jni_ToReflectedMethod(JNIEnv *env, jclass cls, jmethodID meth
if (m->is_initializer()) {
reflection_method = Reflection::new_constructor(m, CHECK_NULL);
} else {
reflection_method = Reflection::new_method(m, UseNewReflection, false, CHECK_NULL);
reflection_method = Reflection::new_method(m, false, CHECK_NULL);
}
ret = JNIHandles::make_local(env, reflection_method);
return ret;
@ -2272,7 +2272,7 @@ JNI_ENTRY(jobject, jni_ToReflectedField(JNIEnv *env, jclass cls, jfieldID fieldI
found = InstanceKlass::cast(k)->find_field_from_offset(offset, false, &fd);
}
assert(found, "bad fieldID passed into jni_ToReflectedField");
oop reflected = Reflection::new_field(&fd, UseNewReflection, CHECK_NULL);
oop reflected = Reflection::new_field(&fd, CHECK_NULL);
ret = JNIHandles::make_local(env, reflected);
return ret;
JNI_END

View File

@ -1854,7 +1854,7 @@ JVM_ENTRY(jobjectArray, JVM_GetClassDeclaredFields(JNIEnv *env, jclass ofClass,
if (!publicOnly || fs.access_flags().is_public()) {
fd.reinitialize(k(), fs.index());
oop field = Reflection::new_field(&fd, UseNewReflection, CHECK_NULL);
oop field = Reflection::new_field(&fd, CHECK_NULL);
result->obj_at_put(out_idx, field);
++out_idx;
}
@ -1932,7 +1932,7 @@ static jobjectArray get_class_declared_methods_helper(
if (want_constructor) {
m = Reflection::new_constructor(method, CHECK_NULL);
} else {
m = Reflection::new_method(method, UseNewReflection, false, CHECK_NULL);
m = Reflection::new_method(method, false, CHECK_NULL);
}
result->obj_at_put(i, m);
}
@ -2055,7 +2055,7 @@ static jobject get_method_at_helper(constantPoolHandle cp, jint index, bool forc
}
oop method;
if (!m->is_initializer() || m->is_static()) {
method = Reflection::new_method(m, true, true, CHECK_NULL);
method = Reflection::new_method(m, true, CHECK_NULL);
} else {
method = Reflection::new_constructor(m, CHECK_NULL);
}
@ -2105,7 +2105,7 @@ static jobject get_field_at_helper(constantPoolHandle cp, jint index, bool force
if (target_klass == NULL) {
THROW_MSG_0(vmSymbols::java_lang_RuntimeException(), "Unable to look up field in target class");
}
oop field = Reflection::new_field(&fd, true, CHECK_NULL);
oop field = Reflection::new_field(&fd, CHECK_NULL);
return JNIHandles::make_local(field);
}
@ -3521,7 +3521,6 @@ JVM_END
JVM_ENTRY(jobject, JVM_LatestUserDefinedLoader(JNIEnv *env))
for (vframeStream vfst(thread); !vfst.at_end(); vfst.next()) {
// UseNewReflection
vfst.skip_reflection_related_frames(); // Only needed for 1.4 reflection
oop loader = vfst.method()->method_holder()->class_loader();
if (loader != NULL) {

View File

@ -2970,7 +2970,8 @@ void VM_RedefineClasses::check_methods_and_mark_as_obsolete(
assert(!old_method->has_vtable_index(),
"cannot delete methods with vtable entries");;
// Mark all deleted methods as old and obsolete
// Mark all deleted methods as old, obsolete and deleted
old_method->set_is_deleted();
old_method->set_is_old();
old_method->set_is_obsolete();
++obsolete_count;
@ -3576,7 +3577,7 @@ void VM_RedefineClasses::CheckClass::do_klass(Klass* k) {
no_old_methods = false;
}
// the constant pool cache should never contain old or obsolete methods
// the constant pool cache should never contain non-deleted old or obsolete methods
if (ik->constants() != NULL &&
ik->constants()->cache() != NULL &&
!ik->constants()->cache()->check_no_old_or_obsolete_entries()) {

View File

@ -33,6 +33,7 @@
#include "prims/jvm.h"
#include "runtime/globals.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/prefetch.inline.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "runtime/reflection.hpp"
#include "runtime/synchronizer.hpp"

View File

@ -312,6 +312,9 @@ static ObsoleteFlag obsolete_jvm_flags[] = {
{ "DefaultThreadPriority", JDK_Version::jdk(9), JDK_Version::jdk(10) },
{ "NoYieldsInMicrolock", JDK_Version::jdk(9), JDK_Version::jdk(10) },
{ "BackEdgeThreshold", JDK_Version::jdk(9), JDK_Version::jdk(10) },
{ "UseNewReflection", JDK_Version::jdk(9), JDK_Version::jdk(10) },
{ "ReflectionWrapResolutionErrors",JDK_Version::jdk(9), JDK_Version::jdk(10) },
{ "VerifyReflectionBytecodes", JDK_Version::jdk(9), JDK_Version::jdk(10) },
{ NULL, JDK_Version(0), JDK_Version(0) }
};
@ -581,11 +584,20 @@ char* SysClassPath::add_jars_to_path(char* path, const char* directory) {
// Parses a memory size specification string.
static bool atomull(const char *s, julong* result) {
julong n = 0;
int args_read = sscanf(s, JULONG_FORMAT, &n);
int args_read = 0;
bool is_hex = false;
// Skip leading 0[xX] for hexadecimal
if (*s =='0' && (*(s+1) == 'x' || *(s+1) == 'X')) {
s += 2;
is_hex = true;
args_read = sscanf(s, JULONG_FORMAT_X, &n);
} else {
args_read = sscanf(s, JULONG_FORMAT, &n);
}
if (args_read != 1) {
return false;
}
while (*s != '\0' && isdigit(*s)) {
while (*s != '\0' && (isdigit(*s) || (is_hex && isxdigit(*s)))) {
s++;
}
// 4705540: illegal if more characters are found after the first non-digit
@ -779,7 +791,7 @@ bool Arguments::parse_argument(const char* arg, Flag::Flags origin) {
}
}
#define VALUE_RANGE "[-kmgtKMGT0123456789]"
#define VALUE_RANGE "[-kmgtxKMGTX0123456789abcdefABCDEF]"
if (sscanf(arg, "%" XSTR(BUFLEN) NAME_RANGE "=" "%" XSTR(BUFLEN) VALUE_RANGE "%c", name, value, &dummy) == 2) {
return set_numeric_flag(name, value, origin);
}
@ -2363,6 +2375,9 @@ bool Arguments::check_vm_args_consistency() {
status = status && verify_percentage(MarkSweepDeadRatio, "MarkSweepDeadRatio");
status = status && verify_min_value(MarkSweepAlwaysCompactCount, 1, "MarkSweepAlwaysCompactCount");
#ifdef COMPILER1
status = status && verify_min_value(ValueMapInitialSize, 1, "ValueMapInitialSize");
#endif
if (PrintNMTStatistics) {
#if INCLUDE_NMT

View File

@ -1340,7 +1340,7 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* thread, jint tra
if (xtty != NULL)
xtty->name(class_name);
}
if (xtty != NULL && trap_mdo != NULL) {
if (xtty != NULL && trap_mdo != NULL && (int)reason < (int)MethodData::_trap_hist_limit) {
// Dump the relevant MDO state.
// This is the deopt count for the current reason, any previous
// reasons or recompiles seen at this point.
@ -1818,7 +1818,7 @@ const char* Deoptimization::format_trap_state(char* buf, size_t buflen,
//--------------------------------statics--------------------------------------
const char* Deoptimization::_trap_reason_name[Reason_LIMIT] = {
const char* Deoptimization::_trap_reason_name[] = {
// Note: Keep this in sync. with enum DeoptReason.
"none",
"null_check",
@ -1839,9 +1839,10 @@ const char* Deoptimization::_trap_reason_name[Reason_LIMIT] = {
"loop_limit_check",
"speculate_class_check",
"speculate_null_check",
"rtm_state_change"
"rtm_state_change",
"tenured"
};
const char* Deoptimization::_trap_action_name[Action_LIMIT] = {
const char* Deoptimization::_trap_action_name[] = {
// Note: Keep this in sync. with enum DeoptAction.
"none",
"maybe_recompile",
@ -1851,6 +1852,9 @@ const char* Deoptimization::_trap_action_name[Action_LIMIT] = {
};
const char* Deoptimization::trap_reason_name(int reason) {
// Check that every reason has a name
STATIC_ASSERT(sizeof(_trap_reason_name)/sizeof(const char*) == Reason_LIMIT);
if (reason == Reason_many) return "many";
if ((uint)reason < Reason_LIMIT)
return _trap_reason_name[reason];
@ -1859,6 +1863,9 @@ const char* Deoptimization::trap_reason_name(int reason) {
return buf;
}
const char* Deoptimization::trap_action_name(int action) {
// Check that every action has a name
STATIC_ASSERT(sizeof(_trap_action_name)/sizeof(const char*) == Action_LIMIT);
if ((uint)action < Action_LIMIT)
return _trap_action_name[action];
static char buf[20];

View File

@ -376,8 +376,8 @@ class Deoptimization : AllStatic {
static UnrollBlock* fetch_unroll_info_helper(JavaThread* thread);
static DeoptAction _unloaded_action; // == Action_reinterpret;
static const char* _trap_reason_name[Reason_LIMIT];
static const char* _trap_action_name[Action_LIMIT];
static const char* _trap_reason_name[];
static const char* _trap_action_name[];
static juint _deoptimization_hist[Reason_LIMIT][1+Action_LIMIT][BC_CASE_LIMIT];
// Note: Histogram array size is 1-2 Kb.

View File

@ -3656,22 +3656,6 @@ class CommandLineFlags {
\
/* New JDK 1.4 reflection implementation */ \
\
develop(bool, UseNewReflection, true, \
"Temporary flag for transition to reflection based on dynamic " \
"bytecode generation in 1.4; can no longer be turned off in 1.4 " \
"JDK, and is unneeded in 1.3 JDK, but marks most places VM " \
"changes were needed") \
\
develop(bool, VerifyReflectionBytecodes, false, \
"Force verification of 1.4 reflection bytecodes. Does not work " \
"in situations like that described in 4486457 or for " \
"constructors generated for serialization, so can not be enabled "\
"in product.") \
\
product(bool, ReflectionWrapResolutionErrors, true, \
"Temporary flag for transition to AbstractMethodError wrapped " \
"in InvocationTargetException. See 6531596") \
\
develop(intx, FastSuperclassLimit, 8, \
"Depth of hardwired instanceof accelerator array") \
\

View File

@ -1097,11 +1097,15 @@ void os::print_location(outputStream* st, intptr_t x, bool verbose) {
}
// Check if in metaspace.
if (ClassLoaderDataGraph::contains((address)addr)) {
// Use addr->print() from the debugger instead (not here)
st->print_cr(INTPTR_FORMAT
" is pointing into metadata", addr);
// Check if in metaspace and print types that have vptrs (only method now)
if (Metaspace::contains(addr)) {
if (Method::has_method_vptr((const void*)addr)) {
((Method*)addr)->print_value_on(st);
st->cr();
} else {
// Use addr->print() from the debugger instead (not here)
st->print_cr(INTPTR_FORMAT " is pointing into metadata", addr);
}
return;
}

View File

@ -0,0 +1,73 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_RUNTIME_PREFETCH_INLINE_HPP
#define SHARE_VM_RUNTIME_PREFETCH_INLINE_HPP
#include "runtime/prefetch.hpp"
// Linux
#ifdef TARGET_OS_ARCH_linux_x86
# include "prefetch_linux_x86.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_linux_sparc
# include "prefetch_linux_sparc.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_linux_zero
# include "prefetch_linux_zero.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_linux_arm
# include "prefetch_linux_arm.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_linux_ppc
# include "prefetch_linux_ppc.inline.hpp"
#endif
// Solaris
#ifdef TARGET_OS_ARCH_solaris_x86
# include "prefetch_solaris_x86.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_solaris_sparc
# include "prefetch_solaris_sparc.inline.hpp"
#endif
// Windows
#ifdef TARGET_OS_ARCH_windows_x86
# include "prefetch_windows_x86.inline.hpp"
#endif
// AIX
#ifdef TARGET_OS_ARCH_aix_ppc
# include "prefetch_aix_ppc.inline.hpp"
#endif
// BSD
#ifdef TARGET_OS_ARCH_bsd_x86
# include "prefetch_bsd_x86.inline.hpp"
#endif
#ifdef TARGET_OS_ARCH_bsd_zero
# include "prefetch_bsd_zero.inline.hpp"
#endif
#endif // SHARE_VM_RUNTIME_PREFETCH_INLINE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -466,7 +466,6 @@ bool Reflection::verify_class_access(Klass* current_class, Klass* new_class, boo
// New (1.4) reflection implementation. Allow all accesses from
// sun/reflect/MagicAccessorImpl subclasses to succeed trivially.
if ( JDK_Version::is_gte_jdk14x_version()
&& UseNewReflection
&& current_class->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass())) {
return true;
}
@ -571,7 +570,6 @@ bool Reflection::verify_field_access(Klass* current_class,
// New (1.4) reflection implementation. Allow all accesses from
// sun/reflect/MagicAccessorImpl subclasses to succeed trivially.
if ( JDK_Version::is_gte_jdk14x_version()
&& UseNewReflection
&& current_class->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass())) {
return true;
}
@ -708,7 +706,7 @@ Handle Reflection::new_type(Symbol* signature, KlassHandle k, TRAPS) {
}
oop Reflection::new_method(methodHandle method, bool intern_name, bool for_constant_pool_access, TRAPS) {
oop Reflection::new_method(methodHandle method, bool for_constant_pool_access, TRAPS) {
// In jdk1.2.x, getMethods on an interface erroneously includes <clinit>, thus the complicated assert.
// Also allow sun.reflect.ConstantPool to refer to <clinit> methods as java.lang.reflect.Methods.
assert(!method()->is_initializer() ||
@ -731,14 +729,8 @@ oop Reflection::new_method(methodHandle method, bool intern_name, bool for_const
if (exception_types.is_null()) return NULL;
Symbol* method_name = method->name();
Handle name;
if (intern_name) {
// intern_name is only true with UseNewReflection
oop name_oop = StringTable::intern(method_name, CHECK_NULL);
name = Handle(THREAD, name_oop);
} else {
name = java_lang_String::create_from_symbol(method_name, CHECK_NULL);
}
oop name_oop = StringTable::intern(method_name, CHECK_NULL);
Handle name = Handle(THREAD, name_oop);
if (name == NULL) return NULL;
int modifiers = method->access_flags().as_int() & JVM_RECOGNIZED_METHOD_MODIFIERS;
@ -825,16 +817,10 @@ oop Reflection::new_constructor(methodHandle method, TRAPS) {
}
oop Reflection::new_field(fieldDescriptor* fd, bool intern_name, TRAPS) {
oop Reflection::new_field(fieldDescriptor* fd, TRAPS) {
Symbol* field_name = fd->name();
Handle name;
if (intern_name) {
// intern_name is only true with UseNewReflection
oop name_oop = StringTable::intern(field_name, CHECK_NULL);
name = Handle(THREAD, name_oop);
} else {
name = java_lang_String::create_from_symbol(field_name, CHECK_NULL);
}
oop name_oop = StringTable::intern(field_name, CHECK_NULL);
Handle name = Handle(THREAD, name_oop);
Symbol* signature = fd->signature();
instanceKlassHandle holder (THREAD, fd->field_holder());
Handle type = new_type(signature, holder, CHECK_NULL);
@ -933,27 +919,23 @@ oop Reflection::invoke(instanceKlassHandle klass, methodHandle reflected_method,
// resolve based on the receiver
if (reflected_method->method_holder()->is_interface()) {
// resolve interface call
if (ReflectionWrapResolutionErrors) {
// new default: 6531596
// Match resolution errors with those thrown due to reflection inlining
// Linktime resolution & IllegalAccessCheck already done by Class.getMethod()
method = resolve_interface_call(klass, reflected_method, target_klass, receiver, THREAD);
if (HAS_PENDING_EXCEPTION) {
// Method resolution threw an exception; wrap it in an InvocationTargetException
oop resolution_exception = PENDING_EXCEPTION;
CLEAR_PENDING_EXCEPTION;
// JVMTI has already reported the pending exception
// JVMTI internal flag reset is needed in order to report InvocationTargetException
if (THREAD->is_Java_thread()) {
JvmtiExport::clear_detected_exception((JavaThread*) THREAD);
}
JavaCallArguments args(Handle(THREAD, resolution_exception));
THROW_ARG_0(vmSymbols::java_lang_reflect_InvocationTargetException(),
vmSymbols::throwable_void_signature(),
&args);
//
// Match resolution errors with those thrown due to reflection inlining
// Linktime resolution & IllegalAccessCheck already done by Class.getMethod()
method = resolve_interface_call(klass, reflected_method, target_klass, receiver, THREAD);
if (HAS_PENDING_EXCEPTION) {
// Method resolution threw an exception; wrap it in an InvocationTargetException
oop resolution_exception = PENDING_EXCEPTION;
CLEAR_PENDING_EXCEPTION;
// JVMTI has already reported the pending exception
// JVMTI internal flag reset is needed in order to report InvocationTargetException
if (THREAD->is_Java_thread()) {
JvmtiExport::clear_detected_exception((JavaThread*) THREAD);
}
} else {
method = resolve_interface_call(klass, reflected_method, target_klass, receiver, CHECK_(NULL));
JavaCallArguments args(Handle(THREAD, resolution_exception));
THROW_ARG_0(vmSymbols::java_lang_reflect_InvocationTargetException(),
vmSymbols::throwable_void_signature(),
&args);
}
} else {
// if the method can be overridden, we resolve using the vtable index.
@ -970,24 +952,16 @@ oop Reflection::invoke(instanceKlassHandle klass, methodHandle reflected_method,
// Check for abstract methods as well
if (method->is_abstract()) {
// new default: 6531596
if (ReflectionWrapResolutionErrors) {
ResourceMark rm(THREAD);
Handle h_origexception = Exceptions::new_exception(THREAD,
vmSymbols::java_lang_AbstractMethodError(),
Method::name_and_sig_as_C_string(target_klass(),
method->name(),
method->signature()));
JavaCallArguments args(h_origexception);
THROW_ARG_0(vmSymbols::java_lang_reflect_InvocationTargetException(),
vmSymbols::throwable_void_signature(),
&args);
} else {
ResourceMark rm(THREAD);
THROW_MSG_0(vmSymbols::java_lang_AbstractMethodError(),
Method::name_and_sig_as_C_string(target_klass(),
method->name(),
method->signature()));
}
ResourceMark rm(THREAD);
Handle h_origexception = Exceptions::new_exception(THREAD,
vmSymbols::java_lang_AbstractMethodError(),
Method::name_and_sig_as_C_string(target_klass(),
method->name(),
method->signature()));
JavaCallArguments args(h_origexception);
THROW_ARG_0(vmSymbols::java_lang_reflect_InvocationTargetException(),
vmSymbols::throwable_void_signature(),
&args);
}
}
}
@ -1006,7 +980,7 @@ oop Reflection::invoke(instanceKlassHandle klass, methodHandle reflected_method,
// In the JDK 1.4 reflection implementation, the security check is
// done at the Java level
if (!(JDK_Version::is_gte_jdk14x_version() && UseNewReflection)) {
if (!JDK_Version::is_gte_jdk14x_version()) {
// Access checking (unless overridden by Method)
if (!override) {
@ -1018,7 +992,7 @@ oop Reflection::invoke(instanceKlassHandle klass, methodHandle reflected_method,
}
}
} // !(Universe::is_gte_jdk14x_version() && UseNewReflection)
} // !Universe::is_gte_jdk14x_version()
assert(ptypes->is_objArray(), "just checking");
int args_len = args.is_null() ? 0 : args->length();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -113,11 +113,11 @@ class Reflection: public AllStatic {
//
// Create a java.lang.reflect.Method object based on a method
static oop new_method(methodHandle method, bool intern_name, bool for_constant_pool_access, TRAPS);
static oop new_method(methodHandle method, bool for_constant_pool_access, TRAPS);
// Create a java.lang.reflect.Constructor object based on a method
static oop new_constructor(methodHandle method, TRAPS);
// Create a java.lang.reflect.Field object based on a field descriptor
static oop new_field(fieldDescriptor* fd, bool intern_name, TRAPS);
static oop new_field(fieldDescriptor* fd, TRAPS);
// Create a java.lang.reflect.Parameter object based on a
// MethodParameterElement
static oop new_parameter(Handle method, int index, Symbol* sym,

View File

@ -541,6 +541,13 @@ void SafepointSynchronize::do_cleanup_tasks() {
gclog_or_tty->rotate_log(false);
}
{
// CMS delays purging the CLDG until the beginning of the next safepoint and to
// make sure concurrent sweep is done
TraceTime t7("purging class loader data graph", TraceSafepointCleanupTime);
ClassLoaderDataGraph::purge_if_needed();
}
if (MemTracker::is_on()) {
MemTracker::sync();
}

View File

@ -473,7 +473,7 @@ void vframeStreamCommon::skip_prefixed_method_and_wrappers() {
void vframeStreamCommon::skip_reflection_related_frames() {
while (!at_end() &&
(JDK_Version::is_gte_jdk14x_version() && UseNewReflection &&
(JDK_Version::is_gte_jdk14x_version() &&
(method()->method_holder()->is_subclass_of(SystemDictionary::reflect_MethodAccessorImpl_klass()) ||
method()->method_holder()->is_subclass_of(SystemDictionary::reflect_ConstructorAccessorImpl_klass())))) {
next();

View File

@ -354,6 +354,7 @@ typedef TwoOopHashtable<Symbol*, mtClass> SymbolTwoOopHashtable;
nonstatic_field(MethodData, _method, Method*) \
nonstatic_field(MethodData, _data_size, int) \
nonstatic_field(MethodData, _data[0], intptr_t) \
nonstatic_field(MethodData, _parameters_type_data_di, int) \
nonstatic_field(MethodData, _nof_decompiles, uint) \
nonstatic_field(MethodData, _nof_overflow_recompiles, uint) \
nonstatic_field(MethodData, _nof_overflow_traps, uint) \
@ -2500,6 +2501,10 @@ typedef TwoOopHashtable<Symbol*, mtClass> SymbolTwoOopHashtable;
declare_constant(Deoptimization::Reason_age) \
declare_constant(Deoptimization::Reason_predicate) \
declare_constant(Deoptimization::Reason_loop_limit_check) \
declare_constant(Deoptimization::Reason_speculate_class_check) \
declare_constant(Deoptimization::Reason_speculate_null_check) \
declare_constant(Deoptimization::Reason_rtm_state_change) \
declare_constant(Deoptimization::Reason_tenured) \
declare_constant(Deoptimization::Reason_LIMIT) \
declare_constant(Deoptimization::Reason_RECORDED_LIMIT) \
\

View File

@ -66,7 +66,7 @@ class TraceStream : public StackObj {
}
void print_val(const char* label, s8 val) {
_st.print("%s = "INT64_FORMAT, label, val);
_st.print("%s = "INT64_FORMAT, label, (int64_t) val);
}
void print_val(const char* label, bool val) {

View File

@ -54,7 +54,8 @@ enum {
JVM_ACC_IS_OLD = 0x00010000, // RedefineClasses() has replaced this method
JVM_ACC_IS_OBSOLETE = 0x00020000, // RedefineClasses() has made method obsolete
JVM_ACC_IS_PREFIXED_NATIVE = 0x00040000, // JVMTI has prefixed this native method
JVM_ACC_ON_STACK = 0x00080000, // RedefinedClasses() is used on the stack
JVM_ACC_ON_STACK = 0x00080000, // RedefineClasses() was used on the stack
JVM_ACC_IS_DELETED = 0x00008000, // RedefineClasses() has deleted this method
// Klass* flags
JVM_ACC_HAS_MIRANDA_METHODS = 0x10000000, // True if this class has miranda methods in it's vtable
@ -131,6 +132,7 @@ class AccessFlags VALUE_OBJ_CLASS_SPEC {
bool has_jsrs () const { return (_flags & JVM_ACC_HAS_JSRS ) != 0; }
bool is_old () const { return (_flags & JVM_ACC_IS_OLD ) != 0; }
bool is_obsolete () const { return (_flags & JVM_ACC_IS_OBSOLETE ) != 0; }
bool is_deleted () const { return (_flags & JVM_ACC_IS_DELETED ) != 0; }
bool is_prefixed_native () const { return (_flags & JVM_ACC_IS_PREFIXED_NATIVE ) != 0; }
// Klass* flags
@ -195,6 +197,7 @@ class AccessFlags VALUE_OBJ_CLASS_SPEC {
void set_has_jsrs() { atomic_set_bits(JVM_ACC_HAS_JSRS); }
void set_is_old() { atomic_set_bits(JVM_ACC_IS_OLD); }
void set_is_obsolete() { atomic_set_bits(JVM_ACC_IS_OBSOLETE); }
void set_is_deleted() { atomic_set_bits(JVM_ACC_IS_DELETED); }
void set_is_prefixed_native() { atomic_set_bits(JVM_ACC_IS_PREFIXED_NATIVE); }
void clear_not_c1_compilable() { atomic_clear_bits(JVM_ACC_NOT_C1_COMPILABLE); }

View File

@ -1346,6 +1346,9 @@ inline intptr_t p2i(const void * p) {
#ifndef JULONG_FORMAT
#define JULONG_FORMAT UINT64_FORMAT
#endif
#ifndef JULONG_FORMAT_X
#define JULONG_FORMAT_X UINT64_FORMAT_X
#endif
// Format pointers which change size between 32- and 64-bit.
#ifdef _LP64
@ -1385,7 +1388,7 @@ inline intptr_t p2i(const void * p) {
// All C++ compilers that we know of have the vtbl pointer in the first
// word. If there are exceptions, this function needs to be made compiler
// specific.
static inline void* dereference_vptr(void* addr) {
static inline void* dereference_vptr(const void* addr) {
return *(void**)addr;
}

View File

@ -378,21 +378,22 @@ const char* AbstractGangTask::name() const {
WorkGangBarrierSync::WorkGangBarrierSync()
: _monitor(Mutex::safepoint, "work gang barrier sync", true),
_n_workers(0), _n_completed(0), _should_reset(false) {
_n_workers(0), _n_completed(0), _should_reset(false), _aborted(false) {
}
WorkGangBarrierSync::WorkGangBarrierSync(uint n_workers, const char* name)
: _monitor(Mutex::safepoint, name, true),
_n_workers(n_workers), _n_completed(0), _should_reset(false) {
_n_workers(n_workers), _n_completed(0), _should_reset(false), _aborted(false) {
}
void WorkGangBarrierSync::set_n_workers(uint n_workers) {
_n_workers = n_workers;
_n_completed = 0;
_n_workers = n_workers;
_n_completed = 0;
_should_reset = false;
_aborted = false;
}
void WorkGangBarrierSync::enter() {
bool WorkGangBarrierSync::enter() {
MutexLockerEx x(monitor(), Mutex::_no_safepoint_check_flag);
if (should_reset()) {
// The should_reset() was set and we are the first worker to enter
@ -415,10 +416,17 @@ void WorkGangBarrierSync::enter() {
set_should_reset(true);
monitor()->notify_all();
} else {
while (n_completed() != n_workers()) {
while (n_completed() != n_workers() && !aborted()) {
monitor()->wait(/* no_safepoint_check */ true);
}
}
return !aborted();
}
void WorkGangBarrierSync::abort() {
MutexLockerEx x(monitor(), Mutex::_no_safepoint_check_flag);
set_aborted();
monitor()->notify_all();
}
// SubTasksDone functions.

View File

@ -359,18 +359,20 @@ class FlexibleWorkGang: public WorkGang {
class WorkGangBarrierSync : public StackObj {
protected:
Monitor _monitor;
uint _n_workers;
uint _n_completed;
uint _n_workers;
uint _n_completed;
bool _should_reset;
bool _aborted;
Monitor* monitor() { return &_monitor; }
uint n_workers() { return _n_workers; }
uint n_completed() { return _n_completed; }
bool should_reset() { return _should_reset; }
bool aborted() { return _aborted; }
void zero_completed() { _n_completed = 0; }
void inc_completed() { _n_completed++; }
void set_aborted() { _aborted = true; }
void set_should_reset(bool v) { _should_reset = v; }
public:
@ -383,8 +385,14 @@ public:
// Enter the barrier. A worker that enters the barrier will
// not be allowed to leave until all other threads have
// also entered the barrier.
void enter();
// also entered the barrier or the barrier is aborted.
// Returns false if the barrier was aborted.
bool enter();
// Aborts the barrier and wakes up any threads waiting for
// the barrier to complete. The barrier will remain in the
// aborted state until the next call to set_n_workers().
void abort();
};
// A class to manage claiming of subtasks within a group of tasks. The

View File

@ -0,0 +1,107 @@
/*
* Copyright 2014 Google, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8043354
* @summary bcEscapeAnalyzer allocated_escapes not conservative enough
* @run main/othervm -XX:CompileOnly=.visitAndPop TestAllocatedEscapesPtrComparison
* @author Chuck Rasbold rasbold@google.com
*/
/*
* Test always passes with -XX:-OptmimizePtrCompare
*/
import java.util.ArrayList;
import java.util.List;
public class TestAllocatedEscapesPtrComparison {
static TestAllocatedEscapesPtrComparison dummy;
class Marker {
}
List<Marker> markerList = new ArrayList<>();
// Suppress compilation of this method, it must be processed
// by the bytecode escape analyzer.
// Make a new marker and put it on the List
Marker getMarker() {
// result escapes through markerList
final Marker result = new Marker();
markerList.add(result);
return result;
}
void visit(int depth) {
// Make a new marker
getMarker();
// Call visitAndPop every once in a while
// Cap the depth of our recursive visits
if (depth % 10 == 2) {
visitAndPop(depth + 1);
} else if (depth < 15) {
visit(depth + 1);
}
}
void visitAndPop(int depth) {
// Random dummy allocation to force EscapeAnalysis to process this method
dummy = new TestAllocatedEscapesPtrComparison();
// Make a new marker
Marker marker = getMarker();
visit(depth + 1);
// Walk and pop the marker list up to the current marker
boolean found = false;
for (int i = markerList.size() - 1; i >= 0; i--) {
Marker removed = markerList.remove(i);
// In the failure, EA mistakenly converts this comparison to false
if (removed == marker) {
found = true;
break;
}
}
if (!found) {
throw new RuntimeException("test fails");
}
}
public static void main(String args[]) {
TestAllocatedEscapesPtrComparison tc = new TestAllocatedEscapesPtrComparison();
// Warmup and run enough times
for (int i = 0; i < 20000; i++) {
tc.visit(0);
}
}
}

View File

@ -26,7 +26,6 @@
##
## @test
## @bug 8011675
## @ignore 8032498
## @summary testing of ciReplay with using generated by VM replay.txt
## @author igor.ignatyev@oracle.com
## @run shell TestVM.sh

View File

@ -26,7 +26,6 @@
##
## @test
## @bug 8011675
## @ignore 8032498
## @summary testing of ciReplay with using generated by VM replay.txt w/o comp_level
## @author igor.ignatyev@oracle.com
## @run shell TestVM_no_comp_level.sh

View File

@ -234,6 +234,12 @@ generate_replay() {
sed -e 's/.*location: //'`
echo CRASH OUTPUT:
cat crash.out
if [ "${core_locations}" = "" ]
then
test_fail 2 "CHECK :: CORE_LOCATION" "output doesn't contain the location of core file, see crash.out"
fi
rm crash.out
# processing core locations for *nix

View File

@ -21,6 +21,11 @@
* questions.
*/
import static com.oracle.java.testlibrary.Asserts.assertEQ;
import static com.oracle.java.testlibrary.Asserts.assertFalse;
import static com.oracle.java.testlibrary.Asserts.assertTrue;
import com.oracle.java.testlibrary.DynamicVMOption;
/**
* @test TestDynMaxHeapFreeRatio
* @bug 8028391
@ -33,32 +38,45 @@
* @run main/othervm -XX:MinHeapFreeRatio=51 -XX:MaxHeapFreeRatio=52 TestDynMaxHeapFreeRatio
* @run main/othervm -XX:MinHeapFreeRatio=75 -XX:MaxHeapFreeRatio=100 TestDynMaxHeapFreeRatio
*/
import com.oracle.java.testlibrary.TestDynamicVMOption;
import com.oracle.java.testlibrary.DynamicVMOptionChecker;
public class TestDynMaxHeapFreeRatio extends TestDynamicVMOption {
public static final String MinFreeRatioFlagName = "MinHeapFreeRatio";
public static final String MaxFreeRatioFlagName = "MaxHeapFreeRatio";
public TestDynMaxHeapFreeRatio() {
super(MaxFreeRatioFlagName);
}
public void test() {
int minHeapFreeValue = DynamicVMOptionChecker.getIntValue(MinFreeRatioFlagName);
System.out.println(MinFreeRatioFlagName + " = " + minHeapFreeValue);
testPercentageValues();
checkInvalidValue(Integer.toString(minHeapFreeValue - 1));
checkValidValue(Integer.toString(minHeapFreeValue));
checkValidValue("100");
}
public class TestDynMaxHeapFreeRatio {
public static void main(String args[]) throws Exception {
new TestDynMaxHeapFreeRatio().test();
}
// low boundary value
int minValue = DynamicVMOption.getInt("MinHeapFreeRatio");
System.out.println("MinHeapFreeRatio= " + minValue);
String badValues[] = {
null,
"",
"not a number",
"8.5", "-0.01",
Integer.toString(Integer.MIN_VALUE),
Integer.toString(Integer.MAX_VALUE),
Integer.toString(minValue - 1),
"-1024", "-1", "101", "1997"
};
String goodValues[] = {
Integer.toString(minValue),
Integer.toString(minValue + 1),
Integer.toString((minValue + 100) / 2),
"99", "100"
};
DynamicVMOption option = new DynamicVMOption("MaxHeapFreeRatio");
assertTrue(option.isWriteable(), "Option " + option.name
+ " is expected to be writable");
for (String v : badValues) {
assertFalse(option.isValidValue(v),
"'" + v + "' is expected to be illegal for flag " + option.name);
}
for (String v : goodValues) {
option.setValue(v);
String newValue = option.getValue();
assertEQ(v, newValue);
}
}
}

View File

@ -33,30 +33,52 @@
* @run main/othervm -XX:MinHeapFreeRatio=51 -XX:MaxHeapFreeRatio=52 TestDynMinHeapFreeRatio
* @run main/othervm -XX:MinHeapFreeRatio=75 -XX:MaxHeapFreeRatio=100 TestDynMinHeapFreeRatio
*/
import com.oracle.java.testlibrary.TestDynamicVMOption;
import com.oracle.java.testlibrary.DynamicVMOptionChecker;
import static com.oracle.java.testlibrary.Asserts.assertEQ;
import static com.oracle.java.testlibrary.Asserts.assertFalse;
import static com.oracle.java.testlibrary.Asserts.assertTrue;
import com.oracle.java.testlibrary.DynamicVMOption;
public class TestDynMinHeapFreeRatio extends TestDynamicVMOption {
public static final String MinFreeRatioFlagName = "MinHeapFreeRatio";
public static final String MaxFreeRatioFlagName = "MaxHeapFreeRatio";
public TestDynMinHeapFreeRatio() {
super(MinFreeRatioFlagName);
}
public void test() {
int maxHeapFreeValue = DynamicVMOptionChecker.getIntValue(MaxFreeRatioFlagName);
System.out.println(MaxFreeRatioFlagName + " = " + maxHeapFreeValue);
testPercentageValues();
checkInvalidValue(Integer.toString(maxHeapFreeValue + 1));
checkValidValue(Integer.toString(maxHeapFreeValue));
checkValidValue("0");
}
public class TestDynMinHeapFreeRatio {
public static void main(String args[]) throws Exception {
new TestDynMinHeapFreeRatio().test();
// high boundary value
int maxValue = DynamicVMOption.getInt("MaxHeapFreeRatio");
System.out.println("MaxHeapFreeRatio= " + maxValue);
String badValues[] = {
null,
"",
"not a number",
"8.5", "-0.01",
Integer.toString(Integer.MIN_VALUE),
Integer.toString(Integer.MAX_VALUE),
Integer.toString(maxValue + 1),
"-1024", "-1", "101", "1997"
};
String goodValues[] = {
Integer.toString(maxValue),
Integer.toString(maxValue - 1),
Integer.toString(maxValue / 2),
"0", "1"
};
// option under test
DynamicVMOption option = new DynamicVMOption("MinHeapFreeRatio");
assertTrue(option.isWriteable(), "Option " + option.name
+ " is expected to be writable");
for (String v : badValues) {
assertFalse(option.isValidValue(v),
"'" + v + "' is expected to be illegal for flag " + option.name);
}
for (String v : goodValues) {
option.setValue(v);
String newValue = option.getValue();
assertEQ(v, newValue);
}
}
}

Some files were not shown because too many files have changed in this diff Show More