mirror of
https://github.com/openjdk/jdk.git
synced 2026-02-11 19:08:23 +00:00
Merge
This commit is contained in:
commit
93ed663cf0
@ -44,6 +44,7 @@ import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
/**
|
||||
@ -63,11 +64,18 @@ import java.util.regex.Pattern;
|
||||
*
|
||||
* <h2>Tables: row headings</h2>
|
||||
*
|
||||
* {@code scope="row"} is added to the {@code <td>} elements in the first
|
||||
* column whose cell contents are all different and therefore which can be
|
||||
* used to identify the row. In case of ambiguity, a column containing
|
||||
* a {@code <th>} whose contents begin <em>name</em> is preferred.
|
||||
* For simple tables, as typically generated by _pandoc_, determine the column
|
||||
* whose contents are unique, and convert the cells in that column to be header
|
||||
* cells with {@code scope="row"}. In case of ambiguity, a column containing a
|
||||
* {@code <th>} whose contents begin with <em>name</em> is preferred.
|
||||
* When converting the cell, the {@code style} attribute will be updated to
|
||||
* specify {@code font-weight: normal}, and if there is not already an explicit
|
||||
* setting for {@code text-align}, then the style will be updated to include
|
||||
* {@code text-align:left;}.
|
||||
*
|
||||
* These rules do not apply if the table contains any cells that include
|
||||
* a setting for the {@code scope} attribute, or if the table contains
|
||||
* spanning cells or nested tables.
|
||||
*
|
||||
* <h2>{@code <meta name="generator">}</h2>
|
||||
*
|
||||
@ -533,12 +541,39 @@ public class Main {
|
||||
}
|
||||
index++;
|
||||
}
|
||||
boolean updateEndTd = false;
|
||||
Pattern styleAttr = Pattern.compile("(?<before>.*style=\")(?<style>[^\"]*)(?<after>\".*)");
|
||||
for (Entry e : entries) {
|
||||
if (simple && e.column == maxIndex) {
|
||||
out.write(e.html.substring(0, e.html.length() - 1));
|
||||
out.write(" scope=\"row\">");
|
||||
String attrs = e.html.substring(3, e.html.length() - 1);
|
||||
out.write("<th");
|
||||
Matcher m = styleAttr.matcher(attrs);
|
||||
if (m.matches()) {
|
||||
out.write(m.group("before"));
|
||||
out.write("font-weight: normal; ");
|
||||
String style = m.group("style");
|
||||
if (!style.contains("text-align")) {
|
||||
out.write("text-align: left; ");
|
||||
}
|
||||
out.write(style);
|
||||
out.write(m.group("after"));
|
||||
} else {
|
||||
out.write(" style=\"font-weight: normal; text-align:left;\" ");
|
||||
out.write(attrs);
|
||||
}
|
||||
out.write(" scope=\"row\"");
|
||||
out.write(">");
|
||||
updateEndTd = true;
|
||||
} else if (updateEndTd && e.html.equalsIgnoreCase("</td>")) {
|
||||
out.write("</th>");
|
||||
updateEndTd = false;
|
||||
} else {
|
||||
out.write(e.html);
|
||||
if (updateEndTd && e.html.regionMatches(true, 0, "<td", 0, 3)) {
|
||||
// a new cell has been started without explicitly closing the
|
||||
// cell that was being updated
|
||||
updateEndTd = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
1177
src/hotspot/cpu/aarch64/aarch64-asmtest.py
Normal file
1177
src/hotspot/cpu/aarch64/aarch64-asmtest.py
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -306,10 +306,12 @@ public:
|
||||
};
|
||||
class Post : public PrePost {
|
||||
Register _idx;
|
||||
bool _is_postreg;
|
||||
public:
|
||||
Post(Register reg, int o) : PrePost(reg, o) { _idx = NULL; }
|
||||
Post(Register reg, Register idx) : PrePost(reg, 0) { _idx = idx; }
|
||||
Post(Register reg, int o) : PrePost(reg, o) { _idx = NULL; _is_postreg = false; }
|
||||
Post(Register reg, Register idx) : PrePost(reg, 0) { _idx = idx; _is_postreg = true; }
|
||||
Register idx_reg() { return _idx; }
|
||||
bool is_postreg() {return _is_postreg; }
|
||||
};
|
||||
|
||||
namespace ext
|
||||
@ -393,7 +395,7 @@ class Address {
|
||||
: _base(p.reg()), _offset(p.offset()), _mode(pre) { }
|
||||
Address(Post p)
|
||||
: _base(p.reg()), _index(p.idx_reg()), _offset(p.offset()),
|
||||
_mode(p.idx_reg() == NULL ? post : post_reg), _target(0) { }
|
||||
_mode(p.is_postreg() ? post_reg : post), _target(0) { }
|
||||
Address(address target, RelocationHolder const& rspec)
|
||||
: _mode(literal),
|
||||
_rspec(rspec),
|
||||
@ -807,32 +809,34 @@ public:
|
||||
#undef INSN
|
||||
|
||||
// Bitfield
|
||||
#define INSN(NAME, opcode) \
|
||||
#define INSN(NAME, opcode, size) \
|
||||
void NAME(Register Rd, Register Rn, unsigned immr, unsigned imms) { \
|
||||
starti; \
|
||||
guarantee(size == 1 || (immr < 32 && imms < 32), "incorrect immr/imms");\
|
||||
f(opcode, 31, 22), f(immr, 21, 16), f(imms, 15, 10); \
|
||||
zrf(Rn, 5), rf(Rd, 0); \
|
||||
}
|
||||
|
||||
INSN(sbfmw, 0b0001001100);
|
||||
INSN(bfmw, 0b0011001100);
|
||||
INSN(ubfmw, 0b0101001100);
|
||||
INSN(sbfm, 0b1001001101);
|
||||
INSN(bfm, 0b1011001101);
|
||||
INSN(ubfm, 0b1101001101);
|
||||
INSN(sbfmw, 0b0001001100, 0);
|
||||
INSN(bfmw, 0b0011001100, 0);
|
||||
INSN(ubfmw, 0b0101001100, 0);
|
||||
INSN(sbfm, 0b1001001101, 1);
|
||||
INSN(bfm, 0b1011001101, 1);
|
||||
INSN(ubfm, 0b1101001101, 1);
|
||||
|
||||
#undef INSN
|
||||
|
||||
// Extract
|
||||
#define INSN(NAME, opcode) \
|
||||
#define INSN(NAME, opcode, size) \
|
||||
void NAME(Register Rd, Register Rn, Register Rm, unsigned imms) { \
|
||||
starti; \
|
||||
guarantee(size == 1 || imms < 32, "incorrect imms"); \
|
||||
f(opcode, 31, 21), f(imms, 15, 10); \
|
||||
rf(Rm, 16), rf(Rn, 5), rf(Rd, 0); \
|
||||
zrf(Rm, 16), zrf(Rn, 5), zrf(Rd, 0); \
|
||||
}
|
||||
|
||||
INSN(extrw, 0b00010011100);
|
||||
INSN(extr, 0b10010011110);
|
||||
INSN(extrw, 0b00010011100, 0);
|
||||
INSN(extr, 0b10010011110, 1);
|
||||
|
||||
#undef INSN
|
||||
|
||||
@ -1126,7 +1130,7 @@ public:
|
||||
Register Rn, enum operand_size sz, int op, bool ordered) {
|
||||
starti;
|
||||
f(sz, 31, 30), f(0b001000, 29, 24), f(op, 23, 21);
|
||||
rf(Rs, 16), f(ordered, 15), rf(Rt2, 10), srf(Rn, 5), zrf(Rt1, 0);
|
||||
rf(Rs, 16), f(ordered, 15), zrf(Rt2, 10), srf(Rn, 5), zrf(Rt1, 0);
|
||||
}
|
||||
|
||||
void load_exclusive(Register dst, Register addr,
|
||||
@ -1255,7 +1259,7 @@ public:
|
||||
enum operand_size sz, int op1, int op2, bool a, bool r) {
|
||||
starti;
|
||||
f(sz, 31, 30), f(0b111000, 29, 24), f(a, 23), f(r, 22), f(1, 21);
|
||||
rf(Rs, 16), f(op1, 15), f(op2, 14, 12), f(0, 11, 10), srf(Rn, 5), zrf(Rt, 0);
|
||||
zrf(Rs, 16), f(op1, 15), f(op2, 14, 12), f(0, 11, 10), srf(Rn, 5), zrf(Rt, 0);
|
||||
}
|
||||
|
||||
#define INSN(NAME, NAME_A, NAME_L, NAME_AL, op1, op2) \
|
||||
@ -1477,6 +1481,7 @@ public:
|
||||
void NAME(Register Rd, Register Rn, Register Rm, \
|
||||
enum shift_kind kind = LSL, unsigned shift = 0) { \
|
||||
starti; \
|
||||
guarantee(size == 1 || shift < 32, "incorrect shift"); \
|
||||
f(N, 21); \
|
||||
zrf(Rm, 16), zrf(Rn, 5), zrf(Rd, 0); \
|
||||
op_shifted_reg(0b01010, kind, shift, size, op); \
|
||||
@ -1539,6 +1544,7 @@ void mvnw(Register Rd, Register Rm,
|
||||
starti; \
|
||||
f(0, 21); \
|
||||
assert_cond(kind != ROR); \
|
||||
guarantee(size == 1 || shift < 32, "incorrect shift");\
|
||||
zrf(Rd, 0), zrf(Rn, 5), zrf(Rm, 16); \
|
||||
op_shifted_reg(0b01011, kind, shift, size, op); \
|
||||
}
|
||||
@ -1567,7 +1573,7 @@ void mvnw(Register Rd, Register Rm,
|
||||
void add_sub_extended_reg(unsigned op, unsigned decode,
|
||||
Register Rd, Register Rn, Register Rm,
|
||||
unsigned opt, ext::operation option, unsigned imm) {
|
||||
guarantee(imm <= 4, "shift amount must be < 4");
|
||||
guarantee(imm <= 4, "shift amount must be <= 4");
|
||||
f(op, 31, 29), f(decode, 28, 24), f(opt, 23, 22), f(1, 21);
|
||||
f(option, 15, 13), f(imm, 12, 10);
|
||||
}
|
||||
@ -1652,7 +1658,7 @@ void mvnw(Register Rd, Register Rm,
|
||||
f(o2, 10);
|
||||
f(o3, 4);
|
||||
f(nzcv, 3, 0);
|
||||
f(imm5, 20, 16), rf(Rn, 5);
|
||||
f(imm5, 20, 16), zrf(Rn, 5);
|
||||
}
|
||||
|
||||
#define INSN(NAME, op) \
|
||||
@ -2121,7 +2127,12 @@ public:
|
||||
}
|
||||
void ld_st(FloatRegister Vt, SIMD_Arrangement T, Register Xn,
|
||||
int imm, int op1, int op2, int regs) {
|
||||
guarantee(T <= T1Q && imm == SIMD_Size_in_bytes[T] * regs, "bad offset");
|
||||
|
||||
bool replicate = op2 >> 2 == 3;
|
||||
// post-index value (imm) is formed differently for replicate/non-replicate ld* instructions
|
||||
int expectedImmediate = replicate ? regs * (1 << (T >> 1)) : SIMD_Size_in_bytes[T] * regs;
|
||||
guarantee(T < T1Q , "incorrect arrangement");
|
||||
guarantee(imm == expectedImmediate, "bad offset");
|
||||
starti;
|
||||
f(0,31), f((int)T & 1, 30);
|
||||
f(op1 | 0b100, 29, 21), f(0b11111, 20, 16), f(op2, 15, 12);
|
||||
@ -2228,42 +2239,47 @@ public:
|
||||
|
||||
#undef INSN
|
||||
|
||||
#define INSN(NAME, opc, opc2) \
|
||||
#define INSN(NAME, opc, opc2, acceptT2D) \
|
||||
void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \
|
||||
guarantee(T != T1Q && T != T1D, "incorrect arrangement"); \
|
||||
if (!acceptT2D) guarantee(T != T2D, "incorrect arrangement"); \
|
||||
starti; \
|
||||
f(0, 31), f((int)T & 1, 30), f(opc, 29), f(0b01110, 28, 24); \
|
||||
f((int)T >> 1, 23, 22), f(1, 21), rf(Vm, 16), f(opc2, 15, 10); \
|
||||
rf(Vn, 5), rf(Vd, 0); \
|
||||
}
|
||||
|
||||
INSN(addv, 0, 0b100001);
|
||||
INSN(subv, 1, 0b100001);
|
||||
INSN(mulv, 0, 0b100111);
|
||||
INSN(mlav, 0, 0b100101);
|
||||
INSN(mlsv, 1, 0b100101);
|
||||
INSN(sshl, 0, 0b010001);
|
||||
INSN(ushl, 1, 0b010001);
|
||||
INSN(umullv, 1, 0b110000);
|
||||
INSN(umlalv, 1, 0b100000);
|
||||
INSN(addv, 0, 0b100001, true); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D
|
||||
INSN(subv, 1, 0b100001, true); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D
|
||||
INSN(mulv, 0, 0b100111, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
|
||||
INSN(mlav, 0, 0b100101, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
|
||||
INSN(mlsv, 1, 0b100101, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
|
||||
INSN(sshl, 0, 0b010001, true); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D
|
||||
INSN(ushl, 1, 0b010001, true); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D
|
||||
INSN(umullv, 1, 0b110000, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
|
||||
INSN(umlalv, 1, 0b100000, false); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S
|
||||
|
||||
#undef INSN
|
||||
|
||||
#define INSN(NAME, opc, opc2) \
|
||||
#define INSN(NAME, opc, opc2, accepted) \
|
||||
void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) { \
|
||||
guarantee(T != T1Q && T != T1D, "incorrect arrangement"); \
|
||||
if (accepted < 2) guarantee(T != T2S && T != T2D, "incorrect arrangement"); \
|
||||
if (accepted == 0) guarantee(T == T8B || T == T16B, "incorrect arrangement"); \
|
||||
starti; \
|
||||
f(0, 31), f((int)T & 1, 30), f(opc, 29), f(0b01110, 28, 24); \
|
||||
f((int)T >> 1, 23, 22), f(opc2, 21, 10); \
|
||||
rf(Vn, 5), rf(Vd, 0); \
|
||||
}
|
||||
|
||||
INSN(absr, 0, 0b100000101110);
|
||||
INSN(negr, 1, 0b100000101110);
|
||||
INSN(notr, 1, 0b100000010110);
|
||||
INSN(addv, 0, 0b110001101110);
|
||||
INSN(cls, 0, 0b100000010010);
|
||||
INSN(clz, 1, 0b100000010010);
|
||||
INSN(cnt, 0, 0b100000010110);
|
||||
INSN(uaddlv, 1, 0b110000001110);
|
||||
INSN(absr, 0, 0b100000101110, 1); // accepted arrangements: T8B, T16B, T4H, T8H, T4S
|
||||
INSN(negr, 1, 0b100000101110, 2); // accepted arrangements: T8B, T16B, T4H, T8H, T2S, T4S, T2D
|
||||
INSN(notr, 1, 0b100000010110, 0); // accepted arrangements: T8B, T16B
|
||||
INSN(addv, 0, 0b110001101110, 1); // accepted arrangements: T8B, T16B, T4H, T8H, T4S
|
||||
INSN(cls, 0, 0b100000010010, 1); // accepted arrangements: T8B, T16B, T4H, T8H, T4S
|
||||
INSN(clz, 1, 0b100000010010, 1); // accepted arrangements: T8B, T16B, T4H, T8H, T4S
|
||||
INSN(cnt, 0, 0b100000010110, 0); // accepted arrangements: T8B, T16B
|
||||
INSN(uaddlv, 1, 0b110000001110, 1); // accepted arrangements: T8B, T16B, T4H, T8H, T4S
|
||||
|
||||
#undef INSN
|
||||
|
||||
@ -2287,7 +2303,7 @@ public:
|
||||
starti; \
|
||||
assert(lsl == 0 || \
|
||||
((T == T4H || T == T8H) && lsl == 8) || \
|
||||
((T == T2S || T == T4S) && ((lsl >> 3) < 4)), "invalid shift"); \
|
||||
((T == T2S || T == T4S) && ((lsl >> 3) < 4) && ((lsl & 7) == 0)), "invalid shift");\
|
||||
cmode |= lsl >> 2; \
|
||||
if (T == T4H || T == T8H) cmode |= 0b1000; \
|
||||
if (!(T == T4H || T == T8H || T == T2S || T == T4S)) { \
|
||||
@ -2448,7 +2464,8 @@ public:
|
||||
|
||||
#undef INSN
|
||||
|
||||
void ushll(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, SIMD_Arrangement Tb, int shift) {
|
||||
private:
|
||||
void _ushll(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, SIMD_Arrangement Tb, int shift) {
|
||||
starti;
|
||||
/* The encodings for the immh:immb fields (bits 22:16) are
|
||||
* 0001 xxx 8H, 8B/16b shift = xxx
|
||||
@ -2461,8 +2478,16 @@ public:
|
||||
f(0, 31), f(Tb & 1, 30), f(0b1011110, 29, 23), f((1 << ((Tb>>1)+3))|shift, 22, 16);
|
||||
f(0b101001, 15, 10), rf(Vn, 5), rf(Vd, 0);
|
||||
}
|
||||
|
||||
public:
|
||||
void ushll(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, SIMD_Arrangement Tb, int shift) {
|
||||
assert(Tb == T8B || Tb == T4H || Tb == T2S, "invalid arrangement");
|
||||
_ushll(Vd, Ta, Vn, Tb, shift);
|
||||
}
|
||||
|
||||
void ushll2(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, SIMD_Arrangement Tb, int shift) {
|
||||
ushll(Vd, Ta, Vn, Tb, shift);
|
||||
assert(Tb == T16B || Tb == T8H || Tb == T4S, "invalid arrangement");
|
||||
_ushll(Vd, Ta, Vn, Tb, shift);
|
||||
}
|
||||
|
||||
// Move from general purpose register
|
||||
@ -2470,19 +2495,21 @@ public:
|
||||
void mov(FloatRegister Vd, SIMD_Arrangement T, int index, Register Xn) {
|
||||
starti;
|
||||
f(0b01001110000, 31, 21), f(((1 << (T >> 1)) | (index << ((T >> 1) + 1))), 20, 16);
|
||||
f(0b000111, 15, 10), rf(Xn, 5), rf(Vd, 0);
|
||||
f(0b000111, 15, 10), zrf(Xn, 5), rf(Vd, 0);
|
||||
}
|
||||
|
||||
// Move to general purpose register
|
||||
// mov Rd, Vn.T[index]
|
||||
void mov(Register Xd, FloatRegister Vn, SIMD_Arrangement T, int index) {
|
||||
guarantee(T >= T2S && T < T1Q, "only D and S arrangements are supported");
|
||||
starti;
|
||||
f(0, 31), f((T >= T1D) ? 1:0, 30), f(0b001110000, 29, 21);
|
||||
f(((1 << (T >> 1)) | (index << ((T >> 1) + 1))), 20, 16);
|
||||
f(0b001111, 15, 10), rf(Vn, 5), rf(Xd, 0);
|
||||
}
|
||||
|
||||
void pmull(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, FloatRegister Vm, SIMD_Arrangement Tb) {
|
||||
private:
|
||||
void _pmull(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, FloatRegister Vm, SIMD_Arrangement Tb) {
|
||||
starti;
|
||||
assert((Ta == T1Q && (Tb == T1D || Tb == T2D)) ||
|
||||
(Ta == T8H && (Tb == T8B || Tb == T16B)), "Invalid Size specifier");
|
||||
@ -2490,9 +2517,16 @@ public:
|
||||
f(0, 31), f(Tb & 1, 30), f(0b001110, 29, 24), f(size, 23, 22);
|
||||
f(1, 21), rf(Vm, 16), f(0b111000, 15, 10), rf(Vn, 5), rf(Vd, 0);
|
||||
}
|
||||
|
||||
public:
|
||||
void pmull(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, FloatRegister Vm, SIMD_Arrangement Tb) {
|
||||
assert(Tb == T1D || Tb == T8B, "pmull assumes T1D or T8B as the second size specifier");
|
||||
_pmull(Vd, Ta, Vn, Vm, Tb);
|
||||
}
|
||||
|
||||
void pmull2(FloatRegister Vd, SIMD_Arrangement Ta, FloatRegister Vn, FloatRegister Vm, SIMD_Arrangement Tb) {
|
||||
assert(Tb == T2D || Tb == T16B, "pmull2 assumes T2D or T16B as the second size specifier");
|
||||
pmull(Vd, Ta, Vn, Vm, Tb);
|
||||
_pmull(Vd, Ta, Vn, Vm, Tb);
|
||||
}
|
||||
|
||||
void uqxtn(FloatRegister Vd, SIMD_Arrangement Tb, FloatRegister Vn, SIMD_Arrangement Ta) {
|
||||
@ -2509,7 +2543,7 @@ public:
|
||||
starti;
|
||||
assert(T != T1D, "reserved encoding");
|
||||
f(0,31), f((int)T & 1, 30), f(0b001110000, 29, 21);
|
||||
f((1 << (T >> 1)), 20, 16), f(0b000011, 15, 10), rf(Xs, 5), rf(Vd, 0);
|
||||
f((1 << (T >> 1)), 20, 16), f(0b000011, 15, 10), zrf(Xs, 5), rf(Vd, 0);
|
||||
}
|
||||
|
||||
void dup(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, int index = 0)
|
||||
@ -2524,6 +2558,7 @@ public:
|
||||
// AdvSIMD ZIP/UZP/TRN
|
||||
#define INSN(NAME, opcode) \
|
||||
void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn, FloatRegister Vm) { \
|
||||
guarantee(T != T1D && T != T1Q, "invalid arrangement"); \
|
||||
starti; \
|
||||
f(0, 31), f(0b001110, 29, 24), f(0, 21), f(0, 15); \
|
||||
f(opcode, 14, 12), f(0b10, 11, 10); \
|
||||
|
||||
@ -136,7 +136,7 @@ class FloatRegisterImpl: public AbstractRegisterImpl {
|
||||
VMReg as_VMReg();
|
||||
|
||||
// derived registers, offsets, and addresses
|
||||
FloatRegister successor() const { return as_FloatRegister(encoding() + 1); }
|
||||
FloatRegister successor() const { return as_FloatRegister((encoding() + 1) % 32); }
|
||||
|
||||
// accessors
|
||||
int encoding() const { assert(is_valid(), "invalid register"); return (intptr_t)this; }
|
||||
|
||||
@ -80,7 +80,19 @@ int LIR_Assembler::check_icache() {
|
||||
}
|
||||
|
||||
void LIR_Assembler::clinit_barrier(ciMethod* method) {
|
||||
ShouldNotReachHere(); // not implemented
|
||||
assert(!method->holder()->is_not_initialized(), "initialization should have been started");
|
||||
|
||||
Label L_skip_barrier;
|
||||
Register klass = R20;
|
||||
|
||||
metadata2reg(method->holder()->constant_encoding(), klass);
|
||||
__ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/);
|
||||
|
||||
__ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0);
|
||||
__ mtctr(klass);
|
||||
__ bctr();
|
||||
|
||||
__ bind(L_skip_barrier);
|
||||
}
|
||||
|
||||
void LIR_Assembler::osr_entry() {
|
||||
|
||||
@ -82,6 +82,8 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
// load cpool->resolved_klass_at(index)
|
||||
void load_resolved_klass_at_offset(Register Rcpool, Register Roffset, Register Rklass);
|
||||
|
||||
void load_resolved_method_at_index(int byte_no, Register cache, Register method);
|
||||
|
||||
void load_receiver(Register Rparam_count, Register Rrecv_dst);
|
||||
|
||||
// helpers for expression stack
|
||||
|
||||
@ -516,6 +516,18 @@ void InterpreterMacroAssembler::load_resolved_klass_at_offset(Register Rcpool, R
|
||||
ldx(Rklass, Rklass, Roffset);
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::load_resolved_method_at_index(int byte_no,
|
||||
Register cache,
|
||||
Register method) {
|
||||
const int method_offset = in_bytes(
|
||||
ConstantPoolCache::base_offset() +
|
||||
((byte_no == TemplateTable::f2_byte)
|
||||
? ConstantPoolCacheEntry::f2_offset()
|
||||
: ConstantPoolCacheEntry::f1_offset()));
|
||||
|
||||
ld(method, method_offset, cache); // get f1 Method*
|
||||
}
|
||||
|
||||
// Generate a subtype check: branch to ok_is_subtype if sub_klass is
|
||||
// a subtype of super_klass. Blows registers Rsub_klass, tmp1, tmp2.
|
||||
void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass, Register Rsuper_klass, Register Rtmp1,
|
||||
|
||||
@ -2011,6 +2011,35 @@ void MacroAssembler::check_klass_subtype(Register sub_klass,
|
||||
bind(L_failure); // Fallthru if not successful.
|
||||
}
|
||||
|
||||
void MacroAssembler::clinit_barrier(Register klass, Register thread, Label* L_fast_path, Label* L_slow_path) {
|
||||
assert(L_fast_path != NULL || L_slow_path != NULL, "at least one is required");
|
||||
|
||||
Label L_fallthrough;
|
||||
if (L_fast_path == NULL) {
|
||||
L_fast_path = &L_fallthrough;
|
||||
} else if (L_slow_path == NULL) {
|
||||
L_slow_path = &L_fallthrough;
|
||||
}
|
||||
|
||||
// Fast path check: class is fully initialized
|
||||
lbz(R0, in_bytes(InstanceKlass::init_state_offset()), klass);
|
||||
cmpwi(CCR0, R0, InstanceKlass::fully_initialized);
|
||||
beq(CCR0, *L_fast_path);
|
||||
|
||||
// Fast path check: current thread is initializer thread
|
||||
ld(R0, in_bytes(InstanceKlass::init_thread_offset()), klass);
|
||||
cmpd(CCR0, thread, R0);
|
||||
if (L_slow_path == &L_fallthrough) {
|
||||
beq(CCR0, *L_fast_path);
|
||||
} else if (L_fast_path == &L_fallthrough) {
|
||||
bne(CCR0, *L_slow_path);
|
||||
} else {
|
||||
Unimplemented();
|
||||
}
|
||||
|
||||
bind(L_fallthrough);
|
||||
}
|
||||
|
||||
void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_reg,
|
||||
Register temp_reg,
|
||||
Label& wrong_method_type) {
|
||||
@ -3194,6 +3223,12 @@ void MacroAssembler::load_mirror_from_const_method(Register mirror, Register con
|
||||
resolve_oop_handle(mirror);
|
||||
}
|
||||
|
||||
void MacroAssembler::load_method_holder(Register holder, Register method) {
|
||||
ld(holder, in_bytes(Method::const_offset()), method);
|
||||
ld(holder, in_bytes(ConstMethod::constants_offset()), holder);
|
||||
ld(holder, ConstantPool::pool_holder_offset_in_bytes(), holder);
|
||||
}
|
||||
|
||||
// Clear Array
|
||||
// For very short arrays. tmp == R0 is allowed.
|
||||
void MacroAssembler::clear_memory_unrolled(Register base_ptr, int cnt_dwords, Register tmp, int offset) {
|
||||
|
||||
@ -559,6 +559,11 @@ class MacroAssembler: public Assembler {
|
||||
Register temp2_reg,
|
||||
Label& L_success);
|
||||
|
||||
void clinit_barrier(Register klass,
|
||||
Register thread,
|
||||
Label* L_fast_path = NULL,
|
||||
Label* L_slow_path = NULL);
|
||||
|
||||
// Method handle support (JSR 292).
|
||||
void check_method_handle_type(Register mtype_reg, Register mh_reg, Register temp_reg, Label& wrong_method_type);
|
||||
|
||||
@ -722,6 +727,7 @@ class MacroAssembler: public Assembler {
|
||||
|
||||
void resolve_oop_handle(Register result);
|
||||
void load_mirror_from_const_method(Register mirror, Register const_method);
|
||||
void load_method_holder(Register holder, Register method);
|
||||
|
||||
static int instr_size_for_decode_klass_not_null();
|
||||
void decode_klass_not_null(Register dst, Register src = noreg);
|
||||
|
||||
@ -1400,6 +1400,24 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
|
||||
___(mflr) mflr(return_pc);
|
||||
}
|
||||
|
||||
if (C->clinit_barrier_on_entry()) {
|
||||
assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
|
||||
|
||||
Label L_skip_barrier;
|
||||
Register klass = toc_temp;
|
||||
|
||||
// Notify OOP recorder (don't need the relocation)
|
||||
AddressLiteral md = __ constant_metadata_address(C->method()->holder()->constant_encoding());
|
||||
__ load_const_optimized(klass, md.value(), R0);
|
||||
__ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/);
|
||||
|
||||
__ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0);
|
||||
__ mtctr(klass);
|
||||
__ bctr();
|
||||
|
||||
__ bind(L_skip_barrier);
|
||||
}
|
||||
|
||||
// Calls to C2R adapters often do not accept exceptional returns.
|
||||
// We require that their callers must bang for them. But be
|
||||
// careful, because some VM calls (such as call site linkage) can
|
||||
|
||||
@ -1274,7 +1274,30 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
|
||||
|
||||
// entry: c2i
|
||||
|
||||
c2i_entry = gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, call_interpreter, ientry);
|
||||
c2i_entry = __ pc();
|
||||
|
||||
// Class initialization barrier for static methods
|
||||
if (VM_Version::supports_fast_class_init_checks()) {
|
||||
Label L_skip_barrier;
|
||||
|
||||
{ // Bypass the barrier for non-static methods
|
||||
__ lwz(R0, in_bytes(Method::access_flags_offset()), R19_method);
|
||||
__ andi_(R0, R0, JVM_ACC_STATIC);
|
||||
__ beq(CCR0, L_skip_barrier); // non-static
|
||||
}
|
||||
|
||||
Register klass = R11_scratch1;
|
||||
__ load_method_holder(klass, R19_method);
|
||||
__ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/);
|
||||
|
||||
__ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0);
|
||||
__ mtctr(klass);
|
||||
__ bctr();
|
||||
|
||||
__ bind(L_skip_barrier);
|
||||
}
|
||||
|
||||
gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, call_interpreter, ientry);
|
||||
|
||||
return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
|
||||
}
|
||||
@ -2106,6 +2129,21 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
__ tabort_();
|
||||
}
|
||||
|
||||
if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
|
||||
Label L_skip_barrier;
|
||||
Register klass = r_temp_1;
|
||||
// Notify OOP recorder (don't need the relocation)
|
||||
AddressLiteral md = __ constant_metadata_address(method->method_holder());
|
||||
__ load_const_optimized(klass, md.value(), R0);
|
||||
__ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/);
|
||||
|
||||
__ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0);
|
||||
__ mtctr(klass);
|
||||
__ bctr();
|
||||
|
||||
__ bind(L_skip_barrier);
|
||||
}
|
||||
|
||||
__ save_LR_CR(r_temp_1);
|
||||
__ generate_stack_overflow_check(frame_size_in_bytes); // Check before creating frame.
|
||||
__ mr(r_callers_sp, R1_SP); // Remember frame pointer.
|
||||
|
||||
@ -2232,7 +2232,7 @@ void TemplateTable::_return(TosState state) {
|
||||
void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register Rscratch, size_t index_size) {
|
||||
|
||||
__ get_cache_and_index_at_bcp(Rcache, 1, index_size);
|
||||
Label Lresolved, Ldone;
|
||||
Label Lresolved, Ldone, L_clinit_barrier_slow;
|
||||
|
||||
Bytecodes::Code code = bytecode();
|
||||
switch (code) {
|
||||
@ -2253,6 +2253,9 @@ void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Regist
|
||||
__ cmpdi(CCR0, Rscratch, (int)code);
|
||||
__ beq(CCR0, Lresolved);
|
||||
|
||||
// Class initialization barrier slow path lands here as well.
|
||||
__ bind(L_clinit_barrier_slow);
|
||||
|
||||
address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
|
||||
__ li(R4_ARG2, code);
|
||||
__ call_VM(noreg, entry, R4_ARG2, true);
|
||||
@ -2263,6 +2266,17 @@ void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Regist
|
||||
|
||||
__ bind(Lresolved);
|
||||
__ isync(); // Order load wrt. succeeding loads.
|
||||
|
||||
// Class initialization barrier for static methods
|
||||
if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
|
||||
const Register method = Rscratch;
|
||||
const Register klass = Rscratch;
|
||||
|
||||
__ load_resolved_method_at_index(byte_no, Rcache, method);
|
||||
__ load_method_holder(klass, method);
|
||||
__ clinit_barrier(klass, R16_thread, NULL /*L_fast_path*/, &L_clinit_barrier_slow);
|
||||
}
|
||||
|
||||
__ bind(Ldone);
|
||||
}
|
||||
|
||||
@ -2329,7 +2343,7 @@ void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
|
||||
// Already resolved.
|
||||
__ get_cache_and_index_at_bcp(Rcache, 1);
|
||||
} else {
|
||||
resolve_cache_and_index(byte_no, Rcache, R0, is_invokedynamic ? sizeof(u4) : sizeof(u2));
|
||||
resolve_cache_and_index(byte_no, Rcache, /* temp */ Rmethod, is_invokedynamic ? sizeof(u4) : sizeof(u2));
|
||||
}
|
||||
|
||||
__ ld(Rmethod, method_offset, Rcache);
|
||||
@ -3634,9 +3648,7 @@ void TemplateTable::invokeinterface(int byte_no) {
|
||||
// Find entry point to call.
|
||||
|
||||
// Get declaring interface class from method
|
||||
__ ld(Rinterface_klass, in_bytes(Method::const_offset()), Rmethod);
|
||||
__ ld(Rinterface_klass, in_bytes(ConstMethod::constants_offset()), Rinterface_klass);
|
||||
__ ld(Rinterface_klass, ConstantPool::pool_holder_offset_in_bytes(), Rinterface_klass);
|
||||
__ load_method_holder(Rinterface_klass, Rmethod);
|
||||
|
||||
// Get itable index from method
|
||||
__ lwa(Rindex, in_bytes(Method::itable_index_offset()), Rmethod);
|
||||
|
||||
@ -95,6 +95,9 @@ public:
|
||||
// Override Abstract_VM_Version implementation
|
||||
static bool use_biased_locking();
|
||||
|
||||
// PPC64 supports fast class initialization checks for static methods.
|
||||
static bool supports_fast_class_init_checks() { return true; }
|
||||
|
||||
static bool is_determine_features_test_running() { return _is_determine_features_test_running; }
|
||||
// CPU instruction support
|
||||
static bool has_fsqrt() { return (_features & fsqrt_m) != 0; }
|
||||
|
||||
@ -21,6 +21,12 @@
|
||||
// questions.
|
||||
//
|
||||
|
||||
source_hpp %{
|
||||
|
||||
#include "gc/z/c2/zBarrierSetC2.hpp"
|
||||
|
||||
%}
|
||||
|
||||
source %{
|
||||
|
||||
#include "gc/z/zBarrierSetAssembler.hpp"
|
||||
@ -45,7 +51,7 @@ instruct zLoadBarrierSlowRegXmmAndYmm(rRegP dst, memory src, rFlagsReg cr,
|
||||
rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
|
||||
|
||||
match(Set dst (LoadBarrierSlowReg src));
|
||||
predicate(UseAVX <= 2);
|
||||
predicate((UseAVX <= 2) && !n->as_LoadBarrierSlowReg()->is_weak());
|
||||
|
||||
effect(DEF dst, KILL cr,
|
||||
KILL x0, KILL x1, KILL x2, KILL x3,
|
||||
@ -74,7 +80,7 @@ instruct zLoadBarrierSlowRegZmm(rRegP dst, memory src, rFlagsReg cr,
|
||||
rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
|
||||
|
||||
match(Set dst (LoadBarrierSlowReg src));
|
||||
predicate(UseAVX == 3);
|
||||
predicate((UseAVX == 3) && !n->as_LoadBarrierSlowReg()->is_weak());
|
||||
|
||||
effect(DEF dst, KILL cr,
|
||||
KILL x0, KILL x1, KILL x2, KILL x3,
|
||||
@ -102,8 +108,8 @@ instruct zLoadBarrierWeakSlowRegXmmAndYmm(rRegP dst, memory src, rFlagsReg cr,
|
||||
rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
|
||||
rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
|
||||
|
||||
match(Set dst (LoadBarrierWeakSlowReg src));
|
||||
predicate(UseAVX <= 2);
|
||||
match(Set dst (LoadBarrierSlowReg src));
|
||||
predicate((UseAVX <= 2) && n->as_LoadBarrierSlowReg()->is_weak());
|
||||
|
||||
effect(DEF dst, KILL cr,
|
||||
KILL x0, KILL x1, KILL x2, KILL x3,
|
||||
@ -131,8 +137,8 @@ instruct zLoadBarrierWeakSlowRegZmm(rRegP dst, memory src, rFlagsReg cr,
|
||||
rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27,
|
||||
rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
|
||||
|
||||
match(Set dst (LoadBarrierWeakSlowReg src));
|
||||
predicate(UseAVX == 3);
|
||||
match(Set dst (LoadBarrierSlowReg src));
|
||||
predicate((UseAVX == 3) && n->as_LoadBarrierSlowReg()->is_weak());
|
||||
|
||||
effect(DEF dst, KILL cr,
|
||||
KILL x0, KILL x1, KILL x2, KILL x3,
|
||||
@ -152,3 +158,58 @@ instruct zLoadBarrierWeakSlowRegZmm(rRegP dst, memory src, rFlagsReg cr,
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// Specialized versions of compareAndExchangeP that adds a keepalive that is consumed
|
||||
// but doesn't affect output.
|
||||
|
||||
instruct z_compareAndExchangeP(
|
||||
memory mem_ptr,
|
||||
rax_RegP oldval, rRegP newval, rRegP keepalive,
|
||||
rFlagsReg cr) %{
|
||||
predicate(VM_Version::supports_cx8());
|
||||
match(Set oldval (ZCompareAndExchangeP (Binary mem_ptr keepalive) (Binary oldval newval)));
|
||||
effect(KILL cr);
|
||||
|
||||
format %{ "cmpxchgq $mem_ptr,$newval\t# "
|
||||
"If rax == $mem_ptr then store $newval into $mem_ptr\n\t" %}
|
||||
opcode(0x0F, 0xB1);
|
||||
ins_encode(lock_prefix,
|
||||
REX_reg_mem_wide(newval, mem_ptr),
|
||||
OpcP, OpcS,
|
||||
reg_mem(newval, mem_ptr) // lock cmpxchg
|
||||
);
|
||||
ins_pipe( pipe_cmpxchg );
|
||||
%}
|
||||
|
||||
instruct z_compareAndSwapP(rRegI res,
|
||||
memory mem_ptr,
|
||||
rax_RegP oldval, rRegP newval, rRegP keepalive,
|
||||
rFlagsReg cr) %{
|
||||
predicate(VM_Version::supports_cx8());
|
||||
match(Set res (ZCompareAndSwapP (Binary mem_ptr keepalive) (Binary oldval newval)));
|
||||
match(Set res (ZWeakCompareAndSwapP (Binary mem_ptr keepalive) (Binary oldval newval)));
|
||||
effect(KILL cr, KILL oldval);
|
||||
|
||||
format %{ "cmpxchgq $mem_ptr,$newval\t# "
|
||||
"If rax == $mem_ptr then store $newval into $mem_ptr\n\t"
|
||||
"sete $res\n\t"
|
||||
"movzbl $res, $res" %}
|
||||
opcode(0x0F, 0xB1);
|
||||
ins_encode(lock_prefix,
|
||||
REX_reg_mem_wide(newval, mem_ptr),
|
||||
OpcP, OpcS,
|
||||
reg_mem(newval, mem_ptr),
|
||||
REX_breg(res), Opcode(0x0F), Opcode(0x94), reg(res), // sete
|
||||
REX_reg_breg(res, res), // movzbl
|
||||
Opcode(0xF), Opcode(0xB6), reg_reg(res, res));
|
||||
ins_pipe( pipe_cmpxchg );
|
||||
%}
|
||||
|
||||
instruct z_xchgP( memory mem, rRegP newval, rRegP keepalive) %{
|
||||
match(Set newval (ZGetAndSetP mem (Binary newval keepalive)));
|
||||
format %{ "XCHGQ $newval,[$mem]" %}
|
||||
ins_encode %{
|
||||
__ xchgq($newval$$Register, $mem$$Address);
|
||||
%}
|
||||
ins_pipe( pipe_cmpxchg );
|
||||
%}
|
||||
|
||||
@ -8358,7 +8358,7 @@ instruct vshift4B(vecS dst, vecS src, vecS shift, vecS tmp, rRegI scratch) %{
|
||||
match(Set dst (LShiftVB src shift));
|
||||
match(Set dst (RShiftVB src shift));
|
||||
match(Set dst (URShiftVB src shift));
|
||||
effect(TEMP dst, TEMP tmp, TEMP scratch);
|
||||
effect(TEMP dst, USE src, USE shift, TEMP tmp, TEMP scratch);
|
||||
format %{"vextendbw $tmp,$src\n\t"
|
||||
"vshiftw $tmp,$shift\n\t"
|
||||
"movdqu $dst,[0x00ff00ff0x00ff00ff]\n\t"
|
||||
@ -8381,7 +8381,7 @@ instruct vshift8B(vecD dst, vecD src, vecS shift, vecD tmp, rRegI scratch) %{
|
||||
match(Set dst (LShiftVB src shift));
|
||||
match(Set dst (RShiftVB src shift));
|
||||
match(Set dst (URShiftVB src shift));
|
||||
effect(TEMP dst, TEMP tmp, TEMP scratch);
|
||||
effect(TEMP dst, USE src, USE shift, TEMP tmp, TEMP scratch);
|
||||
format %{"vextendbw $tmp,$src\n\t"
|
||||
"vshiftw $tmp,$shift\n\t"
|
||||
"movdqu $dst,[0x00ff00ff0x00ff00ff]\n\t"
|
||||
@ -8404,7 +8404,7 @@ instruct vshift16B(vecX dst, vecX src, vecS shift, vecX tmp1, vecX tmp2, rRegI s
|
||||
match(Set dst (LShiftVB src shift));
|
||||
match(Set dst (RShiftVB src shift));
|
||||
match(Set dst (URShiftVB src shift));
|
||||
effect(TEMP dst, TEMP tmp1, TEMP tmp2, TEMP scratch);
|
||||
effect(TEMP dst, USE src, USE shift, TEMP tmp1, TEMP tmp2, TEMP scratch);
|
||||
format %{"vextendbw $tmp1,$src\n\t"
|
||||
"vshiftw $tmp1,$shift\n\t"
|
||||
"pshufd $tmp2,$src\n\t"
|
||||
@ -8435,7 +8435,7 @@ instruct vshift16B_avx(vecX dst, vecX src, vecS shift, vecX tmp, rRegI scratch)
|
||||
match(Set dst (LShiftVB src shift));
|
||||
match(Set dst (RShiftVB src shift));
|
||||
match(Set dst (URShiftVB src shift));
|
||||
effect(TEMP dst, TEMP tmp, TEMP scratch);
|
||||
effect(TEMP dst, USE src, USE shift, TEMP tmp, TEMP scratch);
|
||||
format %{"vextendbw $tmp,$src\n\t"
|
||||
"vshiftw $tmp,$tmp,$shift\n\t"
|
||||
"vpand $tmp,$tmp,[0x00ff00ff0x00ff00ff]\n\t"
|
||||
@ -8459,7 +8459,7 @@ instruct vshift32B_avx(vecY dst, vecY src, vecS shift, vecY tmp, rRegI scratch)
|
||||
match(Set dst (LShiftVB src shift));
|
||||
match(Set dst (RShiftVB src shift));
|
||||
match(Set dst (URShiftVB src shift));
|
||||
effect(TEMP dst, TEMP tmp, TEMP scratch);
|
||||
effect(TEMP dst, USE src, USE shift, TEMP tmp, TEMP scratch);
|
||||
format %{"vextracti128_high $tmp,$src\n\t"
|
||||
"vextendbw $tmp,$tmp\n\t"
|
||||
"vextendbw $dst,$src\n\t"
|
||||
@ -8491,7 +8491,7 @@ instruct vshift64B_avx(vecZ dst, vecZ src, vecS shift, vecZ tmp1, vecZ tmp2, rRe
|
||||
match(Set dst (LShiftVB src shift));
|
||||
match(Set dst (RShiftVB src shift));
|
||||
match(Set dst (URShiftVB src shift));
|
||||
effect(TEMP dst, TEMP tmp1, TEMP tmp2, TEMP scratch);
|
||||
effect(TEMP dst, USE src, USE shift, TEMP tmp1, TEMP tmp2, TEMP scratch);
|
||||
format %{"vextracti64x4 $tmp1,$src\n\t"
|
||||
"vextendbw $tmp1,$tmp1\n\t"
|
||||
"vextendbw $tmp2,$src\n\t"
|
||||
@ -8534,6 +8534,7 @@ instruct vshist2S(vecS dst, vecS src, vecS shift) %{
|
||||
match(Set dst (LShiftVS src shift));
|
||||
match(Set dst (RShiftVS src shift));
|
||||
match(Set dst (URShiftVS src shift));
|
||||
effect(TEMP dst, USE src, USE shift);
|
||||
format %{ "vshiftw $dst,$src,$shift\t! shift packed2S" %}
|
||||
ins_encode %{
|
||||
int opcode = this->as_Mach()->ideal_Opcode();
|
||||
@ -8554,6 +8555,7 @@ instruct vshift4S(vecD dst, vecD src, vecS shift) %{
|
||||
match(Set dst (LShiftVS src shift));
|
||||
match(Set dst (RShiftVS src shift));
|
||||
match(Set dst (URShiftVS src shift));
|
||||
effect(TEMP dst, USE src, USE shift);
|
||||
format %{ "vshiftw $dst,$src,$shift\t! shift packed4S" %}
|
||||
ins_encode %{
|
||||
int opcode = this->as_Mach()->ideal_Opcode();
|
||||
@ -8575,6 +8577,7 @@ instruct vshift8S(vecX dst, vecX src, vecS shift) %{
|
||||
match(Set dst (LShiftVS src shift));
|
||||
match(Set dst (RShiftVS src shift));
|
||||
match(Set dst (URShiftVS src shift));
|
||||
effect(TEMP dst, USE src, USE shift);
|
||||
format %{ "vshiftw $dst,$src,$shift\t! shift packed8S" %}
|
||||
ins_encode %{
|
||||
int opcode = this->as_Mach()->ideal_Opcode();
|
||||
@ -8595,6 +8598,7 @@ instruct vshift16S(vecY dst, vecY src, vecS shift) %{
|
||||
match(Set dst (LShiftVS src shift));
|
||||
match(Set dst (RShiftVS src shift));
|
||||
match(Set dst (URShiftVS src shift));
|
||||
effect(DEF dst, USE src, USE shift);
|
||||
format %{ "vshiftw $dst,$src,$shift\t! shift packed16S" %}
|
||||
ins_encode %{
|
||||
int vector_len = 1;
|
||||
@ -8609,6 +8613,7 @@ instruct vshift32S(vecZ dst, vecZ src, vecS shift) %{
|
||||
match(Set dst (LShiftVS src shift));
|
||||
match(Set dst (RShiftVS src shift));
|
||||
match(Set dst (URShiftVS src shift));
|
||||
effect(DEF dst, USE src, USE shift);
|
||||
format %{ "vshiftw $dst,$src,$shift\t! shift packed32S" %}
|
||||
ins_encode %{
|
||||
int vector_len = 2;
|
||||
@ -8624,6 +8629,7 @@ instruct vshift2I(vecD dst, vecD src, vecS shift) %{
|
||||
match(Set dst (LShiftVI src shift));
|
||||
match(Set dst (RShiftVI src shift));
|
||||
match(Set dst (URShiftVI src shift));
|
||||
effect(TEMP dst, USE src, USE shift);
|
||||
format %{ "vshiftd $dst,$src,$shift\t! shift packed2I" %}
|
||||
ins_encode %{
|
||||
int opcode = this->as_Mach()->ideal_Opcode();
|
||||
@ -8644,6 +8650,7 @@ instruct vshift4I(vecX dst, vecX src, vecS shift) %{
|
||||
match(Set dst (LShiftVI src shift));
|
||||
match(Set dst (RShiftVI src shift));
|
||||
match(Set dst (URShiftVI src shift));
|
||||
effect(TEMP dst, USE src, USE shift);
|
||||
format %{ "vshiftd $dst,$src,$shift\t! shift packed4I" %}
|
||||
ins_encode %{
|
||||
int opcode = this->as_Mach()->ideal_Opcode();
|
||||
@ -8664,6 +8671,7 @@ instruct vshift8I(vecY dst, vecY src, vecS shift) %{
|
||||
match(Set dst (LShiftVI src shift));
|
||||
match(Set dst (RShiftVI src shift));
|
||||
match(Set dst (URShiftVI src shift));
|
||||
effect(DEF dst, USE src, USE shift);
|
||||
format %{ "vshiftd $dst,$src,$shift\t! shift packed8I" %}
|
||||
ins_encode %{
|
||||
int vector_len = 1;
|
||||
@ -8678,6 +8686,7 @@ instruct vshift16I(vecZ dst, vecZ src, vecS shift) %{
|
||||
match(Set dst (LShiftVI src shift));
|
||||
match(Set dst (RShiftVI src shift));
|
||||
match(Set dst (URShiftVI src shift));
|
||||
effect(DEF dst, USE src, USE shift);
|
||||
format %{ "vshiftd $dst,$src,$shift\t! shift packed16I" %}
|
||||
ins_encode %{
|
||||
int vector_len = 2;
|
||||
@ -8692,6 +8701,7 @@ instruct vshift2L(vecX dst, vecX src, vecS shift) %{
|
||||
predicate(n->as_Vector()->length() == 2);
|
||||
match(Set dst (LShiftVL src shift));
|
||||
match(Set dst (URShiftVL src shift));
|
||||
effect(TEMP dst, USE src, USE shift);
|
||||
format %{ "vshiftq $dst,$src,$shift\t! shift packed2L" %}
|
||||
ins_encode %{
|
||||
int opcode = this->as_Mach()->ideal_Opcode();
|
||||
@ -8711,6 +8721,7 @@ instruct vshift4L(vecY dst, vecY src, vecS shift) %{
|
||||
predicate(UseAVX > 1 && n->as_Vector()->length() == 4);
|
||||
match(Set dst (LShiftVL src shift));
|
||||
match(Set dst (URShiftVL src shift));
|
||||
effect(DEF dst, USE src, USE shift);
|
||||
format %{ "vshiftq $dst,$src,$shift\t! left shift packed4L" %}
|
||||
ins_encode %{
|
||||
int vector_len = 1;
|
||||
@ -8725,6 +8736,7 @@ instruct vshift8L(vecZ dst, vecZ src, vecS shift) %{
|
||||
match(Set dst (LShiftVL src shift));
|
||||
match(Set dst (RShiftVL src shift));
|
||||
match(Set dst (URShiftVL src shift));
|
||||
effect(DEF dst, USE src, USE shift);
|
||||
format %{ "vshiftq $dst,$src,$shift\t! shift packed8L" %}
|
||||
ins_encode %{
|
||||
int vector_len = 2;
|
||||
|
||||
@ -2189,11 +2189,6 @@ char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int f
|
||||
// available (and not reserved for something else).
|
||||
|
||||
char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
|
||||
const int max_tries = 10;
|
||||
char* base[max_tries];
|
||||
size_t size[max_tries];
|
||||
const size_t gap = 0x000000;
|
||||
|
||||
// Assert only that the size is a multiple of the page size, since
|
||||
// that's all that mmap requires, and since that's all we really know
|
||||
// about at this low abstraction level. If we need higher alignment,
|
||||
@ -2216,50 +2211,7 @@ char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
|
||||
anon_munmap(addr, bytes);
|
||||
}
|
||||
|
||||
int i;
|
||||
for (i = 0; i < max_tries; ++i) {
|
||||
base[i] = reserve_memory(bytes);
|
||||
|
||||
if (base[i] != NULL) {
|
||||
// Is this the block we wanted?
|
||||
if (base[i] == requested_addr) {
|
||||
size[i] = bytes;
|
||||
break;
|
||||
}
|
||||
|
||||
// Does this overlap the block we wanted? Give back the overlapped
|
||||
// parts and try again.
|
||||
|
||||
size_t top_overlap = requested_addr + (bytes + gap) - base[i];
|
||||
if (top_overlap >= 0 && top_overlap < bytes) {
|
||||
unmap_memory(base[i], top_overlap);
|
||||
base[i] += top_overlap;
|
||||
size[i] = bytes - top_overlap;
|
||||
} else {
|
||||
size_t bottom_overlap = base[i] + bytes - requested_addr;
|
||||
if (bottom_overlap >= 0 && bottom_overlap < bytes) {
|
||||
unmap_memory(requested_addr, bottom_overlap);
|
||||
size[i] = bytes - bottom_overlap;
|
||||
} else {
|
||||
size[i] = bytes;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Give back the unused reserved pieces.
|
||||
|
||||
for (int j = 0; j < i; ++j) {
|
||||
if (base[j] != NULL) {
|
||||
unmap_memory(base[j], size[j]);
|
||||
}
|
||||
}
|
||||
|
||||
if (i < max_tries) {
|
||||
return requested_addr;
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Sleep forever; naked call to OS-specific sleep; use with CAUTION
|
||||
|
||||
@ -4105,11 +4105,6 @@ char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int f
|
||||
// available (and not reserved for something else).
|
||||
|
||||
char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
|
||||
const int max_tries = 10;
|
||||
char* base[max_tries];
|
||||
size_t size[max_tries];
|
||||
const size_t gap = 0x000000;
|
||||
|
||||
// Assert only that the size is a multiple of the page size, since
|
||||
// that's all that mmap requires, and since that's all we really know
|
||||
// about at this low abstraction level. If we need higher alignment,
|
||||
@ -4132,50 +4127,7 @@ char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
|
||||
anon_munmap(addr, bytes);
|
||||
}
|
||||
|
||||
int i;
|
||||
for (i = 0; i < max_tries; ++i) {
|
||||
base[i] = reserve_memory(bytes);
|
||||
|
||||
if (base[i] != NULL) {
|
||||
// Is this the block we wanted?
|
||||
if (base[i] == requested_addr) {
|
||||
size[i] = bytes;
|
||||
break;
|
||||
}
|
||||
|
||||
// Does this overlap the block we wanted? Give back the overlapped
|
||||
// parts and try again.
|
||||
|
||||
ptrdiff_t top_overlap = requested_addr + (bytes + gap) - base[i];
|
||||
if (top_overlap >= 0 && (size_t)top_overlap < bytes) {
|
||||
unmap_memory(base[i], top_overlap);
|
||||
base[i] += top_overlap;
|
||||
size[i] = bytes - top_overlap;
|
||||
} else {
|
||||
ptrdiff_t bottom_overlap = base[i] + bytes - requested_addr;
|
||||
if (bottom_overlap >= 0 && (size_t)bottom_overlap < bytes) {
|
||||
unmap_memory(requested_addr, bottom_overlap);
|
||||
size[i] = bytes - bottom_overlap;
|
||||
} else {
|
||||
size[i] = bytes;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Give back the unused reserved pieces.
|
||||
|
||||
for (int j = 0; j < i; ++j) {
|
||||
if (base[j] != NULL) {
|
||||
unmap_memory(base[j], size[j]);
|
||||
}
|
||||
}
|
||||
|
||||
if (i < max_tries) {
|
||||
return requested_addr;
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Sleep forever; naked call to OS-specific sleep; use with CAUTION
|
||||
|
||||
@ -2553,17 +2553,6 @@ char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int f
|
||||
// available (and not reserved for something else).
|
||||
|
||||
char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
|
||||
const int max_tries = 10;
|
||||
char* base[max_tries];
|
||||
size_t size[max_tries];
|
||||
|
||||
// Solaris adds a gap between mmap'ed regions. The size of the gap
|
||||
// is dependent on the requested size and the MMU. Our initial gap
|
||||
// value here is just a guess and will be corrected later.
|
||||
bool had_top_overlap = false;
|
||||
bool have_adjusted_gap = false;
|
||||
size_t gap = 0x400000;
|
||||
|
||||
// Assert only that the size is a multiple of the page size, since
|
||||
// that's all that mmap requires, and since that's all we really know
|
||||
// about at this low abstraction level. If we need higher alignment,
|
||||
@ -2572,105 +2561,18 @@ char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
|
||||
assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
|
||||
|
||||
// Since snv_84, Solaris attempts to honor the address hint - see 5003415.
|
||||
// Give it a try, if the kernel honors the hint we can return immediately.
|
||||
char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false);
|
||||
|
||||
volatile int err = errno;
|
||||
if (addr == requested_addr) {
|
||||
return addr;
|
||||
} else if (addr != NULL) {
|
||||
}
|
||||
|
||||
if (addr != NULL) {
|
||||
pd_unmap_memory(addr, bytes);
|
||||
}
|
||||
|
||||
if (log_is_enabled(Warning, os)) {
|
||||
char buf[256];
|
||||
buf[0] = '\0';
|
||||
if (addr == NULL) {
|
||||
jio_snprintf(buf, sizeof(buf), ": %s", os::strerror(err));
|
||||
}
|
||||
log_info(os)("attempt_reserve_memory_at: couldn't reserve " SIZE_FORMAT " bytes at "
|
||||
PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT
|
||||
"%s", bytes, requested_addr, addr, buf);
|
||||
}
|
||||
|
||||
// Address hint method didn't work. Fall back to the old method.
|
||||
// In theory, once SNV becomes our oldest supported platform, this
|
||||
// code will no longer be needed.
|
||||
//
|
||||
// Repeatedly allocate blocks until the block is allocated at the
|
||||
// right spot. Give up after max_tries.
|
||||
int i;
|
||||
for (i = 0; i < max_tries; ++i) {
|
||||
base[i] = reserve_memory(bytes);
|
||||
|
||||
if (base[i] != NULL) {
|
||||
// Is this the block we wanted?
|
||||
if (base[i] == requested_addr) {
|
||||
size[i] = bytes;
|
||||
break;
|
||||
}
|
||||
|
||||
// check that the gap value is right
|
||||
if (had_top_overlap && !have_adjusted_gap) {
|
||||
size_t actual_gap = base[i-1] - base[i] - bytes;
|
||||
if (gap != actual_gap) {
|
||||
// adjust the gap value and retry the last 2 allocations
|
||||
assert(i > 0, "gap adjustment code problem");
|
||||
have_adjusted_gap = true; // adjust the gap only once, just in case
|
||||
gap = actual_gap;
|
||||
log_info(os)("attempt_reserve_memory_at: adjusted gap to 0x%lx", gap);
|
||||
unmap_memory(base[i], bytes);
|
||||
unmap_memory(base[i-1], size[i-1]);
|
||||
i-=2;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Does this overlap the block we wanted? Give back the overlapped
|
||||
// parts and try again.
|
||||
//
|
||||
// There is still a bug in this code: if top_overlap == bytes,
|
||||
// the overlap is offset from requested region by the value of gap.
|
||||
// In this case giving back the overlapped part will not work,
|
||||
// because we'll give back the entire block at base[i] and
|
||||
// therefore the subsequent allocation will not generate a new gap.
|
||||
// This could be fixed with a new algorithm that used larger
|
||||
// or variable size chunks to find the requested region -
|
||||
// but such a change would introduce additional complications.
|
||||
// It's rare enough that the planets align for this bug,
|
||||
// so we'll just wait for a fix for 6204603/5003415 which
|
||||
// will provide a mmap flag to allow us to avoid this business.
|
||||
|
||||
size_t top_overlap = requested_addr + (bytes + gap) - base[i];
|
||||
if (top_overlap >= 0 && top_overlap < bytes) {
|
||||
had_top_overlap = true;
|
||||
unmap_memory(base[i], top_overlap);
|
||||
base[i] += top_overlap;
|
||||
size[i] = bytes - top_overlap;
|
||||
} else {
|
||||
size_t bottom_overlap = base[i] + bytes - requested_addr;
|
||||
if (bottom_overlap >= 0 && bottom_overlap < bytes) {
|
||||
if (bottom_overlap == 0) {
|
||||
log_info(os)("attempt_reserve_memory_at: possible alignment bug");
|
||||
}
|
||||
unmap_memory(requested_addr, bottom_overlap);
|
||||
size[i] = bytes - bottom_overlap;
|
||||
} else {
|
||||
size[i] = bytes;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Give back the unused reserved pieces.
|
||||
|
||||
for (int j = 0; j < i; ++j) {
|
||||
if (base[j] != NULL) {
|
||||
unmap_memory(base[j], size[j]);
|
||||
}
|
||||
}
|
||||
|
||||
return (i < max_tries) ? requested_addr : NULL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bool os::pd_release_memory(char* addr, size_t bytes) {
|
||||
|
||||
@ -757,7 +757,6 @@ int InstructForm::memory_operand(FormDict &globals) const {
|
||||
return NO_MEMORY_OPERAND;
|
||||
}
|
||||
|
||||
|
||||
// This instruction captures the machine-independent bottom_type
|
||||
// Expected use is for pointer vs oop determination for LoadP
|
||||
bool InstructForm::captures_bottom_type(FormDict &globals) const {
|
||||
@ -775,8 +774,9 @@ bool InstructForm::captures_bottom_type(FormDict &globals) const {
|
||||
!strcmp(_matrule->_rChild->_opType,"GetAndSetP") ||
|
||||
!strcmp(_matrule->_rChild->_opType,"GetAndSetN") ||
|
||||
#if INCLUDE_ZGC
|
||||
!strcmp(_matrule->_rChild->_opType,"ZGetAndSetP") ||
|
||||
!strcmp(_matrule->_rChild->_opType,"ZCompareAndExchangeP") ||
|
||||
!strcmp(_matrule->_rChild->_opType,"LoadBarrierSlowReg") ||
|
||||
!strcmp(_matrule->_rChild->_opType,"LoadBarrierWeakSlowReg") ||
|
||||
#endif
|
||||
#if INCLUDE_SHENANDOAHGC
|
||||
!strcmp(_matrule->_rChild->_opType,"ShenandoahCompareAndExchangeP") ||
|
||||
@ -3506,12 +3506,16 @@ int MatchNode::needs_ideal_memory_edge(FormDict &globals) const {
|
||||
"CompareAndSwapB", "CompareAndSwapS", "CompareAndSwapI", "CompareAndSwapL", "CompareAndSwapP", "CompareAndSwapN",
|
||||
"WeakCompareAndSwapB", "WeakCompareAndSwapS", "WeakCompareAndSwapI", "WeakCompareAndSwapL", "WeakCompareAndSwapP", "WeakCompareAndSwapN",
|
||||
"CompareAndExchangeB", "CompareAndExchangeS", "CompareAndExchangeI", "CompareAndExchangeL", "CompareAndExchangeP", "CompareAndExchangeN",
|
||||
#if INCLUDE_SHENANDOAHGC
|
||||
"ShenandoahCompareAndSwapN", "ShenandoahCompareAndSwapP", "ShenandoahWeakCompareAndSwapP", "ShenandoahWeakCompareAndSwapN", "ShenandoahCompareAndExchangeP", "ShenandoahCompareAndExchangeN",
|
||||
#endif
|
||||
"StoreCM",
|
||||
"ClearArray",
|
||||
"GetAndSetB", "GetAndSetS", "GetAndAddI", "GetAndSetI", "GetAndSetP",
|
||||
"GetAndAddB", "GetAndAddS", "GetAndAddL", "GetAndSetL", "GetAndSetN",
|
||||
"LoadBarrierSlowReg", "LoadBarrierWeakSlowReg"
|
||||
#if INCLUDE_ZGC
|
||||
"LoadBarrierSlowReg", "ZGetAndSetP", "ZCompareAndSwapP", "ZCompareAndExchangeP", "ZWeakCompareAndSwapP",
|
||||
#endif
|
||||
"ClearArray"
|
||||
};
|
||||
int cnt = sizeof(needs_ideal_memory_list)/sizeof(char*);
|
||||
if( strcmp(_opType,"PrefetchAllocation")==0 )
|
||||
|
||||
@ -51,6 +51,7 @@ private:
|
||||
int _scopes_begin;
|
||||
int _reloc_begin;
|
||||
int _exception_table_begin;
|
||||
int _nul_chk_table_begin;
|
||||
int _oopmap_begin;
|
||||
address at_offset(size_t offset) const { return ((address) this) + offset; }
|
||||
public:
|
||||
@ -63,9 +64,9 @@ public:
|
||||
relocInfo* relocation_begin() const { return (relocInfo*) at_offset(_reloc_begin); }
|
||||
relocInfo* relocation_end() const { return (relocInfo*) at_offset(_exception_table_begin); }
|
||||
address handler_table_begin () const { return at_offset(_exception_table_begin); }
|
||||
address handler_table_end() const { return at_offset(_oopmap_begin); }
|
||||
address handler_table_end() const { return at_offset(_nul_chk_table_begin); }
|
||||
|
||||
address nul_chk_table_begin() const { return at_offset(_oopmap_begin); }
|
||||
address nul_chk_table_begin() const { return at_offset(_nul_chk_table_begin); }
|
||||
address nul_chk_table_end() const { return at_offset(_oopmap_begin); }
|
||||
|
||||
ImmutableOopMapSet* oopmap_set() const { return (ImmutableOopMapSet*) at_offset(_oopmap_begin); }
|
||||
|
||||
@ -413,6 +413,7 @@ class CodeBuffer: public StackObj {
|
||||
|| PrintMethodHandleStubs
|
||||
|| PrintInterpreter
|
||||
|| PrintSignatureHandlers
|
||||
|| UnlockDiagnosticVMOptions
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@ -4754,60 +4754,62 @@ static bool has_illegal_visibility(jint flags) {
|
||||
|
||||
// A legal major_version.minor_version must be one of the following:
|
||||
//
|
||||
// Major_version = 45, any minor_version.
|
||||
// Major_version >= 46 and major_version <= current_major_version and minor_version = 0.
|
||||
// Major_version = current_major_version and minor_version = 65535 and --enable-preview is present.
|
||||
// Major_version >= 45 and major_version < 56, any minor_version.
|
||||
// Major_version >= 56 and major_version <= JVM_CLASSFILE_MAJOR_VERSION and minor_version = 0.
|
||||
// Major_version = JVM_CLASSFILE_MAJOR_VERSION and minor_version = 65535 and --enable-preview is present.
|
||||
//
|
||||
static void verify_class_version(u2 major, u2 minor, Symbol* class_name, TRAPS){
|
||||
ResourceMark rm(THREAD);
|
||||
const u2 max_version = JVM_CLASSFILE_MAJOR_VERSION;
|
||||
if (major != JAVA_MIN_SUPPORTED_VERSION) { // All 45.* are ok including 45.65535
|
||||
if (minor == JAVA_PREVIEW_MINOR_VERSION) {
|
||||
if (major != max_version) {
|
||||
ResourceMark rm(THREAD);
|
||||
Exceptions::fthrow(
|
||||
THREAD_AND_LOCATION,
|
||||
vmSymbols::java_lang_UnsupportedClassVersionError(),
|
||||
"%s (class file version %u.%u) was compiled with preview features that are unsupported. "
|
||||
"This version of the Java Runtime only recognizes preview features for class file version %u.%u",
|
||||
class_name->as_C_string(), major, minor, JVM_CLASSFILE_MAJOR_VERSION, JAVA_PREVIEW_MINOR_VERSION);
|
||||
return;
|
||||
}
|
||||
if (major < JAVA_MIN_SUPPORTED_VERSION) {
|
||||
Exceptions::fthrow(
|
||||
THREAD_AND_LOCATION,
|
||||
vmSymbols::java_lang_UnsupportedClassVersionError(),
|
||||
"%s (class file version %u.%u) was compiled with an invalid major version",
|
||||
class_name->as_C_string(), major, minor);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!Arguments::enable_preview()) {
|
||||
ResourceMark rm(THREAD);
|
||||
Exceptions::fthrow(
|
||||
THREAD_AND_LOCATION,
|
||||
vmSymbols::java_lang_UnsupportedClassVersionError(),
|
||||
"Preview features are not enabled for %s (class file version %u.%u). Try running with '--enable-preview'",
|
||||
class_name->as_C_string(), major, minor);
|
||||
return;
|
||||
}
|
||||
if (major > max_version) {
|
||||
Exceptions::fthrow(
|
||||
THREAD_AND_LOCATION,
|
||||
vmSymbols::java_lang_UnsupportedClassVersionError(),
|
||||
"%s has been compiled by a more recent version of the Java Runtime (class file version %u.%u), "
|
||||
"this version of the Java Runtime only recognizes class file versions up to %u.0",
|
||||
class_name->as_C_string(), major, minor, JVM_CLASSFILE_MAJOR_VERSION);
|
||||
return;
|
||||
}
|
||||
|
||||
} else { // minor != JAVA_PREVIEW_MINOR_VERSION
|
||||
if (major > max_version) {
|
||||
ResourceMark rm(THREAD);
|
||||
Exceptions::fthrow(
|
||||
THREAD_AND_LOCATION,
|
||||
vmSymbols::java_lang_UnsupportedClassVersionError(),
|
||||
"%s has been compiled by a more recent version of the Java Runtime (class file version %u.%u), "
|
||||
"this version of the Java Runtime only recognizes class file versions up to %u.0",
|
||||
class_name->as_C_string(), major, minor, JVM_CLASSFILE_MAJOR_VERSION);
|
||||
} else if (major < JAVA_MIN_SUPPORTED_VERSION) {
|
||||
ResourceMark rm(THREAD);
|
||||
Exceptions::fthrow(
|
||||
THREAD_AND_LOCATION,
|
||||
vmSymbols::java_lang_UnsupportedClassVersionError(),
|
||||
"%s (class file version %u.%u) was compiled with an invalid major version",
|
||||
class_name->as_C_string(), major, minor);
|
||||
} else if (minor != 0) {
|
||||
ResourceMark rm(THREAD);
|
||||
Exceptions::fthrow(
|
||||
THREAD_AND_LOCATION,
|
||||
vmSymbols::java_lang_UnsupportedClassVersionError(),
|
||||
"%s (class file version %u.%u) was compiled with an invalid non-zero minor version",
|
||||
class_name->as_C_string(), major, minor);
|
||||
}
|
||||
if (major < JAVA_12_VERSION || minor == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (minor == JAVA_PREVIEW_MINOR_VERSION) {
|
||||
if (major != max_version) {
|
||||
Exceptions::fthrow(
|
||||
THREAD_AND_LOCATION,
|
||||
vmSymbols::java_lang_UnsupportedClassVersionError(),
|
||||
"%s (class file version %u.%u) was compiled with preview features that are unsupported. "
|
||||
"This version of the Java Runtime only recognizes preview features for class file version %u.%u",
|
||||
class_name->as_C_string(), major, minor, JVM_CLASSFILE_MAJOR_VERSION, JAVA_PREVIEW_MINOR_VERSION);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!Arguments::enable_preview()) {
|
||||
Exceptions::fthrow(
|
||||
THREAD_AND_LOCATION,
|
||||
vmSymbols::java_lang_UnsupportedClassVersionError(),
|
||||
"Preview features are not enabled for %s (class file version %u.%u). Try running with '--enable-preview'",
|
||||
class_name->as_C_string(), major, minor);
|
||||
return;
|
||||
}
|
||||
|
||||
} else { // minor != JAVA_PREVIEW_MINOR_VERSION
|
||||
Exceptions::fthrow(
|
||||
THREAD_AND_LOCATION,
|
||||
vmSymbols::java_lang_UnsupportedClassVersionError(),
|
||||
"%s (class file version %u.%u) was compiled with an invalid non-zero minor version",
|
||||
class_name->as_C_string(), major, minor);
|
||||
}
|
||||
}
|
||||
|
||||
@ -5641,11 +5643,11 @@ void ClassFileParser::fill_instance_klass(InstanceKlass* ik, bool changed_by_loa
|
||||
}
|
||||
|
||||
if (ik->minor_version() == JAVA_PREVIEW_MINOR_VERSION &&
|
||||
ik->major_version() != JAVA_MIN_SUPPORTED_VERSION &&
|
||||
ik->major_version() == JVM_CLASSFILE_MAJOR_VERSION &&
|
||||
log_is_enabled(Info, class, preview)) {
|
||||
ResourceMark rm;
|
||||
log_info(class, preview)("Loading class %s that depends on preview features (class file version %d.65535)",
|
||||
ik->external_name(), ik->major_version());
|
||||
ik->external_name(), JVM_CLASSFILE_MAJOR_VERSION);
|
||||
}
|
||||
|
||||
if (log_is_enabled(Debug, class, resolve)) {
|
||||
|
||||
@ -58,8 +58,8 @@
|
||||
const double PREF_AVG_LIST_LEN = 2.0;
|
||||
// 2^24 is max size
|
||||
const size_t END_SIZE = 24;
|
||||
// If a chain gets to 32 something might be wrong
|
||||
const size_t REHASH_LEN = 32;
|
||||
// If a chain gets to 100 something might be wrong
|
||||
const size_t REHASH_LEN = 100;
|
||||
// If we have as many dead items as 50% of the number of bucket
|
||||
const double CLEAN_DEAD_HIGH_WATER_MARK = 0.5;
|
||||
|
||||
@ -496,8 +496,9 @@ bool StringTable::do_rehash() {
|
||||
return false;
|
||||
}
|
||||
|
||||
// We use max size
|
||||
StringTableHash* new_table = new StringTableHash(END_SIZE, END_SIZE, REHASH_LEN);
|
||||
// We use current size, not max size.
|
||||
size_t new_size = _local_table->get_size_log2(Thread::current());
|
||||
StringTableHash* new_table = new StringTableHash(new_size, END_SIZE, REHASH_LEN);
|
||||
// Use alt hash from now on
|
||||
_alt_hash = true;
|
||||
if (!_local_table->try_move_nodes_to(Thread::current(), new_table)) {
|
||||
|
||||
@ -267,7 +267,7 @@ void SymbolTable::symbols_do(SymbolClosure *cl) {
|
||||
// all symbols from the dynamic table
|
||||
SymbolsDo sd(cl);
|
||||
if (!_local_table->try_scan(Thread::current(), sd)) {
|
||||
log_info(stringtable)("symbols_do unavailable at this moment");
|
||||
log_info(symboltable)("symbols_do unavailable at this moment");
|
||||
}
|
||||
}
|
||||
|
||||
@ -557,7 +557,7 @@ void SymbolTable::verify() {
|
||||
Thread* thr = Thread::current();
|
||||
VerifySymbols vs;
|
||||
if (!_local_table->try_scan(thr, vs)) {
|
||||
log_info(stringtable)("verify unavailable at this moment");
|
||||
log_info(symboltable)("verify unavailable at this moment");
|
||||
}
|
||||
}
|
||||
|
||||
@ -763,8 +763,9 @@ bool SymbolTable::do_rehash() {
|
||||
return false;
|
||||
}
|
||||
|
||||
// We use max size
|
||||
SymbolTableHash* new_table = new SymbolTableHash(END_SIZE, END_SIZE, REHASH_LEN);
|
||||
// We use current size
|
||||
size_t new_size = _local_table->get_size_log2(Thread::current());
|
||||
SymbolTableHash* new_table = new SymbolTableHash(new_size, END_SIZE, REHASH_LEN);
|
||||
// Use alt hash from now on
|
||||
_alt_hash = true;
|
||||
if (!_local_table->try_move_nodes_to(Thread::current(), new_table)) {
|
||||
|
||||
@ -1284,7 +1284,7 @@ void CodeCache::report_codemem_full(int code_blob_type, bool print) {
|
||||
|
||||
if (heap->full_count() == 0) {
|
||||
if (PrintCodeHeapAnalytics) {
|
||||
CompileBroker::print_heapinfo(tty, "all", "4096"); // details, may be a lot!
|
||||
CompileBroker::print_heapinfo(tty, "all", 4096); // details, may be a lot!
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1571,7 +1571,7 @@ void CodeCache::log_state(outputStream* st) {
|
||||
|
||||
//---< BEGIN >--- CodeHeap State Analytics.
|
||||
|
||||
void CodeCache::aggregate(outputStream *out, const char* granularity) {
|
||||
void CodeCache::aggregate(outputStream *out, size_t granularity) {
|
||||
FOR_ALL_ALLOCABLE_HEAPS(heap) {
|
||||
CodeHeapState::aggregate(out, (*heap), granularity);
|
||||
}
|
||||
|
||||
@ -294,7 +294,7 @@ class CodeCache : AllStatic {
|
||||
|
||||
// CodeHeap State Analytics.
|
||||
// interface methods for CodeHeap printing, called by CompileBroker
|
||||
static void aggregate(outputStream *out, const char* granularity);
|
||||
static void aggregate(outputStream *out, size_t granularity);
|
||||
static void discard(outputStream *out);
|
||||
static void print_usedSpace(outputStream *out);
|
||||
static void print_freeSpace(outputStream *out);
|
||||
|
||||
@ -530,7 +530,7 @@ void CodeHeapState::discard(outputStream* out, CodeHeap* heap) {
|
||||
}
|
||||
}
|
||||
|
||||
void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, const char* granularity_request) {
|
||||
void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, size_t granularity) {
|
||||
unsigned int nBlocks_free = 0;
|
||||
unsigned int nBlocks_used = 0;
|
||||
unsigned int nBlocks_zomb = 0;
|
||||
@ -612,7 +612,8 @@ void CodeHeapState::aggregate(outputStream* out, CodeHeap* heap, const char* gra
|
||||
// Finally, we adjust the granularity such that each granule covers at most 64k-1 segments.
|
||||
// This is necessary to prevent an unsigned short overflow while accumulating space information.
|
||||
//
|
||||
size_t granularity = strtol(granularity_request, NULL, 0);
|
||||
assert(granularity > 0, "granularity should be positive.");
|
||||
|
||||
if (granularity > size) {
|
||||
granularity = size;
|
||||
}
|
||||
|
||||
@ -99,7 +99,7 @@ class CodeHeapState : public CHeapObj<mtCode> {
|
||||
|
||||
public:
|
||||
static void discard(outputStream* out, CodeHeap* heap);
|
||||
static void aggregate(outputStream* out, CodeHeap* heap, const char* granularity);
|
||||
static void aggregate(outputStream* out, CodeHeap* heap, size_t granularity);
|
||||
static void print_usedSpace(outputStream* out, CodeHeap* heap);
|
||||
static void print_freeSpace(outputStream* out, CodeHeap* heap);
|
||||
static void print_count(outputStream* out, CodeHeap* heap);
|
||||
|
||||
@ -25,6 +25,7 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "code/compiledIC.hpp"
|
||||
#include "code/compiledMethod.inline.hpp"
|
||||
#include "code/exceptionHandlerTable.hpp"
|
||||
#include "code/scopeDesc.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "code/icBuffer.hpp"
|
||||
@ -37,8 +38,10 @@
|
||||
#include "oops/methodData.hpp"
|
||||
#include "oops/method.inline.hpp"
|
||||
#include "prims/methodHandles.hpp"
|
||||
#include "runtime/deoptimization.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
|
||||
CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout,
|
||||
int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps,
|
||||
@ -638,6 +641,46 @@ bool CompiledMethod::nmethod_access_is_safe(nmethod* nm) {
|
||||
os::is_readable_pointer(method->signature());
|
||||
}
|
||||
|
||||
address CompiledMethod::continuation_for_implicit_exception(address pc, bool for_div0_check) {
|
||||
// Exception happened outside inline-cache check code => we are inside
|
||||
// an active nmethod => use cpc to determine a return address
|
||||
int exception_offset = pc - code_begin();
|
||||
int cont_offset = ImplicitExceptionTable(this).continuation_offset( exception_offset );
|
||||
#ifdef ASSERT
|
||||
if (cont_offset == 0) {
|
||||
Thread* thread = Thread::current();
|
||||
ResetNoHandleMark rnm; // Might be called from LEAF/QUICK ENTRY
|
||||
HandleMark hm(thread);
|
||||
ResourceMark rm(thread);
|
||||
CodeBlob* cb = CodeCache::find_blob(pc);
|
||||
assert(cb != NULL && cb == this, "");
|
||||
ttyLocker ttyl;
|
||||
tty->print_cr("implicit exception happened at " INTPTR_FORMAT, p2i(pc));
|
||||
print();
|
||||
method()->print_codes();
|
||||
print_code();
|
||||
print_pcs();
|
||||
}
|
||||
#endif
|
||||
if (cont_offset == 0) {
|
||||
// Let the normal error handling report the exception
|
||||
return NULL;
|
||||
}
|
||||
if (cont_offset == exception_offset) {
|
||||
#if INCLUDE_JVMCI
|
||||
Deoptimization::DeoptReason deopt_reason = for_div0_check ? Deoptimization::Reason_div0_check : Deoptimization::Reason_null_check;
|
||||
JavaThread *thread = JavaThread::current();
|
||||
thread->set_jvmci_implicit_exception_pc(pc);
|
||||
thread->set_pending_deoptimization(Deoptimization::make_trap_request(deopt_reason,
|
||||
Deoptimization::Action_reinterpret));
|
||||
return (SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap());
|
||||
#else
|
||||
ShouldNotReachHere();
|
||||
#endif
|
||||
}
|
||||
return code_begin() + cont_offset;
|
||||
}
|
||||
|
||||
class HasEvolDependency : public MetadataClosure {
|
||||
bool _has_evol_dependency;
|
||||
public:
|
||||
|
||||
@ -350,7 +350,8 @@ public:
|
||||
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f);
|
||||
|
||||
// implicit exceptions support
|
||||
virtual address continuation_for_implicit_exception(address pc) { return NULL; }
|
||||
address continuation_for_implicit_div0_exception(address pc) { return continuation_for_implicit_exception(pc, true); }
|
||||
address continuation_for_implicit_null_exception(address pc) { return continuation_for_implicit_exception(pc, false); }
|
||||
|
||||
static address get_deopt_original_pc(const frame* fr);
|
||||
|
||||
@ -358,6 +359,8 @@ public:
|
||||
private:
|
||||
bool cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all);
|
||||
|
||||
address continuation_for_implicit_exception(address pc, bool for_div0_check);
|
||||
|
||||
public:
|
||||
// Serial version used by sweeper and whitebox test
|
||||
void cleanup_inline_caches(bool clean_all);
|
||||
|
||||
@ -373,8 +373,7 @@ class Dependencies: public ResourceObj {
|
||||
assert(ctxk->is_abstract(), "must be abstract");
|
||||
}
|
||||
static void check_unique_method(Klass* ctxk, Method* m) {
|
||||
// Graal can register redundant dependencies
|
||||
assert(UseJVMCICompiler || !m->can_be_statically_bound(InstanceKlass::cast(ctxk)), "redundant");
|
||||
assert(!m->can_be_statically_bound(InstanceKlass::cast(ctxk)), "redundant");
|
||||
}
|
||||
|
||||
void assert_common_1(DepType dept, DepValue x);
|
||||
|
||||
@ -176,7 +176,7 @@ void ImplicitExceptionTable::append( uint exec_off, uint cont_off ) {
|
||||
_len = l+1;
|
||||
};
|
||||
|
||||
uint ImplicitExceptionTable::at( uint exec_off ) const {
|
||||
uint ImplicitExceptionTable::continuation_offset( uint exec_off ) const {
|
||||
uint l = len();
|
||||
for( uint i=0; i<l; i++ )
|
||||
if( *adr(i) == exec_off )
|
||||
@ -205,7 +205,7 @@ void ImplicitExceptionTable::print(address base) const {
|
||||
}
|
||||
}
|
||||
|
||||
ImplicitExceptionTable::ImplicitExceptionTable(const nmethod* nm) {
|
||||
ImplicitExceptionTable::ImplicitExceptionTable(const CompiledMethod* nm) {
|
||||
if (nm->nul_chk_table_size() == 0) {
|
||||
_len = 0;
|
||||
_data = NULL;
|
||||
@ -221,9 +221,13 @@ ImplicitExceptionTable::ImplicitExceptionTable(const nmethod* nm) {
|
||||
}
|
||||
|
||||
void ImplicitExceptionTable::copy_to( nmethod* nm ) {
|
||||
assert(size_in_bytes() <= nm->nul_chk_table_size(), "size of space allocated in nmethod incorrect");
|
||||
copy_bytes_to(nm->nul_chk_table_begin(), nm->nul_chk_table_size());
|
||||
}
|
||||
|
||||
void ImplicitExceptionTable::copy_bytes_to(address addr, int size) {
|
||||
assert(size_in_bytes() <= size, "size of space allocated in nmethod incorrect");
|
||||
if (len() != 0) {
|
||||
implicit_null_entry* nmdata = (implicit_null_entry*)nm->nul_chk_table_begin();
|
||||
implicit_null_entry* nmdata = (implicit_null_entry*)addr;
|
||||
// store the length in the first uint
|
||||
nmdata[0] = _len;
|
||||
nmdata++;
|
||||
@ -232,7 +236,7 @@ void ImplicitExceptionTable::copy_to( nmethod* nm ) {
|
||||
} else {
|
||||
// zero length table takes zero bytes
|
||||
assert(size_in_bytes() == 0, "bad size");
|
||||
assert(nm->nul_chk_table_size() == 0, "bad size");
|
||||
assert(size == 0, "bad size");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -146,19 +146,36 @@ class ImplicitExceptionTable {
|
||||
implicit_null_entry *_data;
|
||||
implicit_null_entry *adr( uint idx ) const { return &_data[2*idx]; }
|
||||
ReallocMark _nesting; // assertion check for reallocations
|
||||
|
||||
public:
|
||||
ImplicitExceptionTable( ) : _size(0), _len(0), _data(0) { }
|
||||
// (run-time) construction from nmethod
|
||||
ImplicitExceptionTable( const nmethod *nm );
|
||||
ImplicitExceptionTable( const CompiledMethod *nm );
|
||||
|
||||
void set_size( uint size );
|
||||
void append( uint exec_off, uint cont_off );
|
||||
uint at( uint exec_off ) const;
|
||||
|
||||
#if INCLUDE_JVMCI
|
||||
void add_deoptimize(uint exec_off) {
|
||||
// Use the same offset as a marker value for deoptimization
|
||||
append(exec_off, exec_off);
|
||||
}
|
||||
#endif
|
||||
|
||||
// Returns the offset to continue execution at. If the returned
|
||||
// value equals exec_off then the dispatch is expected to be a
|
||||
// deoptimization instead.
|
||||
uint continuation_offset( uint exec_off ) const;
|
||||
|
||||
uint len() const { return _len; }
|
||||
|
||||
uint get_exec_offset(uint i) { assert(i < _len, "oob"); return *adr(i); }
|
||||
uint get_cont_offset(uint i) { assert(i < _len, "oob"); return *(adr(i) + 1); }
|
||||
|
||||
int size_in_bytes() const { return len() == 0 ? 0 : ((2 * len() + 1) * sizeof(implicit_null_entry)); }
|
||||
|
||||
void copy_to(nmethod* nm);
|
||||
void copy_bytes_to(address addr, int size);
|
||||
void print(address base) const;
|
||||
void verify(nmethod *nm) const;
|
||||
};
|
||||
|
||||
@ -923,6 +923,7 @@ void nmethod::print_nmethod(bool printmethod) {
|
||||
ttyLocker ttyl; // keep the following output all in one block
|
||||
if (xtty != NULL) {
|
||||
xtty->begin_head("print_nmethod");
|
||||
log_identity(xtty);
|
||||
xtty->stamp();
|
||||
xtty->end_head();
|
||||
}
|
||||
@ -2093,34 +2094,6 @@ bool nmethod::is_patchable_at(address instr_addr) {
|
||||
}
|
||||
|
||||
|
||||
address nmethod::continuation_for_implicit_exception(address pc) {
|
||||
// Exception happened outside inline-cache check code => we are inside
|
||||
// an active nmethod => use cpc to determine a return address
|
||||
int exception_offset = pc - code_begin();
|
||||
int cont_offset = ImplicitExceptionTable(this).at( exception_offset );
|
||||
#ifdef ASSERT
|
||||
if (cont_offset == 0) {
|
||||
Thread* thread = Thread::current();
|
||||
ResetNoHandleMark rnm; // Might be called from LEAF/QUICK ENTRY
|
||||
HandleMark hm(thread);
|
||||
ResourceMark rm(thread);
|
||||
CodeBlob* cb = CodeCache::find_blob(pc);
|
||||
assert(cb != NULL && cb == this, "");
|
||||
ttyLocker ttyl;
|
||||
tty->print_cr("implicit exception happened at " INTPTR_FORMAT, p2i(pc));
|
||||
// Print all available nmethod info.
|
||||
print_nmethod(true);
|
||||
method()->print_codes();
|
||||
}
|
||||
#endif
|
||||
if (cont_offset == 0) {
|
||||
// Let the normal error handling report the exception
|
||||
return NULL;
|
||||
}
|
||||
return code_begin() + cont_offset;
|
||||
}
|
||||
|
||||
|
||||
void nmethod_init() {
|
||||
// make sure you didn't forget to adjust the filler fields
|
||||
assert(sizeof(nmethod) % oopSize == 0, "nmethod size must be multiple of a word");
|
||||
@ -2213,6 +2186,30 @@ void nmethod::verify() {
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
#if INCLUDE_JVMCI
|
||||
{
|
||||
// Verify that implicit exceptions that deoptimize have a PcDesc and OopMap
|
||||
ImmutableOopMapSet* oms = oop_maps();
|
||||
ImplicitExceptionTable implicit_table(this);
|
||||
for (uint i = 0; i < implicit_table.len(); i++) {
|
||||
int exec_offset = (int) implicit_table.get_exec_offset(i);
|
||||
if (implicit_table.get_exec_offset(i) == implicit_table.get_cont_offset(i)) {
|
||||
assert(pc_desc_at(code_begin() + exec_offset) != NULL, "missing PcDesc");
|
||||
bool found = false;
|
||||
for (int i = 0, imax = oms->count(); i < imax; i++) {
|
||||
if (oms->pair_at(i)->pc_offset() == exec_offset) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
assert(found, "missing oopmap");
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
VerifyOopsClosure voc(this);
|
||||
oops_do(&voc);
|
||||
assert(voc.ok(), "embedded oops must be OK");
|
||||
@ -3012,16 +3009,32 @@ bool nmethod::has_code_comment(address begin, address end) {
|
||||
if (str != NULL) return true;
|
||||
|
||||
// implicit exceptions?
|
||||
int cont_offset = ImplicitExceptionTable(this).at(begin - code_begin());
|
||||
int cont_offset = ImplicitExceptionTable(this).continuation_offset(begin - code_begin());
|
||||
if (cont_offset != 0) return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void nmethod::print_code_comment_on(outputStream* st, int column, address begin, address end) {
|
||||
// First, find an oopmap in (begin, end].
|
||||
// We use the odd half-closed interval so that oop maps and scope descs
|
||||
// which are tied to the byte after a call are printed with the call itself.
|
||||
ImplicitExceptionTable implicit_table(this);
|
||||
int pc_offset = begin - code_begin();
|
||||
int cont_offset = implicit_table.continuation_offset(pc_offset);
|
||||
bool oop_map_required = false;
|
||||
if (cont_offset != 0) {
|
||||
st->move_to(column, 6, 0);
|
||||
if (pc_offset == cont_offset) {
|
||||
st->print("; implicit exception: deoptimizes");
|
||||
oop_map_required = true;
|
||||
} else {
|
||||
st->print("; implicit exception: dispatches to " INTPTR_FORMAT, p2i(code_begin() + cont_offset));
|
||||
}
|
||||
}
|
||||
|
||||
// Find an oopmap in (begin, end]. We use the odd half-closed
|
||||
// interval so that oop maps and scope descs which are tied to the
|
||||
// byte after a call are printed with the call itself. OopMaps
|
||||
// associated with implicit exceptions are printed with the implicit
|
||||
// instruction.
|
||||
address base = code_begin();
|
||||
ImmutableOopMapSet* oms = oop_maps();
|
||||
if (oms != NULL) {
|
||||
@ -3029,16 +3042,25 @@ void nmethod::print_code_comment_on(outputStream* st, int column, address begin,
|
||||
const ImmutableOopMapPair* pair = oms->pair_at(i);
|
||||
const ImmutableOopMap* om = pair->get_from(oms);
|
||||
address pc = base + pair->pc_offset();
|
||||
if (pc > begin) {
|
||||
if (pc <= end) {
|
||||
if (pc >= begin) {
|
||||
#if INCLUDE_JVMCI
|
||||
bool is_implicit_deopt = implicit_table.continuation_offset(pair->pc_offset()) == (uint) pair->pc_offset();
|
||||
#else
|
||||
bool is_implicit_deopt = false;
|
||||
#endif
|
||||
if (is_implicit_deopt ? pc == begin : pc > begin && pc <= end) {
|
||||
st->move_to(column, 6, 0);
|
||||
st->print("; ");
|
||||
om->print_on(st);
|
||||
oop_map_required = false;
|
||||
}
|
||||
}
|
||||
if (pc > end) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
assert(!oop_map_required, "missed oopmap");
|
||||
|
||||
// Print any debug info present at this pc.
|
||||
ScopeDesc* sd = scope_desc_in(begin, end);
|
||||
@ -3128,12 +3150,6 @@ void nmethod::print_code_comment_on(outputStream* st, int column, address begin,
|
||||
st->move_to(column, 6, 0);
|
||||
st->print("; {%s}", str);
|
||||
}
|
||||
int cont_offset = ImplicitExceptionTable(this).at(begin - code_begin());
|
||||
if (cont_offset != 0) {
|
||||
st->move_to(column, 6, 0);
|
||||
st->print("; implicit exception: dispatches to " INTPTR_FORMAT, p2i(code_begin() + cont_offset));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@ -424,9 +424,6 @@ public:
|
||||
long stack_traversal_mark() { return _stack_traversal_mark; }
|
||||
void set_stack_traversal_mark(long l) { _stack_traversal_mark = l; }
|
||||
|
||||
// implicit exceptions support
|
||||
address continuation_for_implicit_exception(address pc);
|
||||
|
||||
// On-stack replacement support
|
||||
int osr_entry_bci() const { assert(is_osr_method(), "wrong kind of nmethod"); return _entry_bci; }
|
||||
address osr_entry() const { assert(is_osr_method(), "wrong kind of nmethod"); return _osr_entry_point; }
|
||||
|
||||
@ -38,12 +38,12 @@
|
||||
bool AbstractDisassembler::_show_data_hex = true;
|
||||
bool AbstractDisassembler::_show_data_int = false;
|
||||
bool AbstractDisassembler::_show_data_float = false;
|
||||
bool AbstractDisassembler::_align_instr = false;
|
||||
bool AbstractDisassembler::_align_instr = true;
|
||||
bool AbstractDisassembler::_show_pc = true;
|
||||
bool AbstractDisassembler::_show_offset = false;
|
||||
bool AbstractDisassembler::_show_structs = false;
|
||||
bool AbstractDisassembler::_show_comment = false;
|
||||
bool AbstractDisassembler::_show_block_comment = false;
|
||||
bool AbstractDisassembler::_show_structs = true;
|
||||
bool AbstractDisassembler::_show_comment = true;
|
||||
bool AbstractDisassembler::_show_block_comment = true;
|
||||
|
||||
// set "true" to see what's in memory bit by bit
|
||||
// might prove cumbersome on platforms where instr_len is hard to find out
|
||||
|
||||
@ -2640,7 +2640,7 @@ void CompileBroker::print_info(outputStream *out) {
|
||||
// That's a tradeoff which keeps together important blocks of output.
|
||||
// At the same time, continuous tty_lock hold time is kept in check,
|
||||
// preventing concurrently printing threads from stalling a long time.
|
||||
void CompileBroker::print_heapinfo(outputStream* out, const char* function, const char* granularity) {
|
||||
void CompileBroker::print_heapinfo(outputStream* out, const char* function, size_t granularity) {
|
||||
TimeStamp ts_total;
|
||||
TimeStamp ts_global;
|
||||
TimeStamp ts;
|
||||
|
||||
@ -417,7 +417,7 @@ public:
|
||||
|
||||
// CodeHeap State Analytics.
|
||||
static void print_info(outputStream *out);
|
||||
static void print_heapinfo(outputStream *out, const char* function, const char* granularity );
|
||||
static void print_heapinfo(outputStream *out, const char* function, size_t granularity);
|
||||
};
|
||||
|
||||
#endif // SHARE_COMPILER_COMPILEBROKER_HPP
|
||||
|
||||
@ -67,7 +67,7 @@ NOT_PRODUCT(cflags(TraceOptoOutput, bool, TraceOptoOutput, TraceOptoOutput))
|
||||
cflags(CloneMapDebug, bool, false, CloneMapDebug) \
|
||||
cflags(IGVPrintLevel, intx, PrintIdealGraphLevel, IGVPrintLevel) \
|
||||
cflags(MaxNodeLimit, intx, MaxNodeLimit, MaxNodeLimit) \
|
||||
ZGC_ONLY(cflags(ZOptimizeLoadBarriers, bool, ZOptimizeLoadBarriers, ZOptimizeLoadBarriers))
|
||||
ZGC_ONLY(cflags(ZTraceLoadBarriers, bool, false, ZTraceLoadBarriers))
|
||||
#else
|
||||
#define compilerdirectives_c2_flags(cflags)
|
||||
#endif
|
||||
|
||||
@ -147,7 +147,10 @@ class decode_env {
|
||||
|
||||
if (AbstractDisassembler::show_comment()) {
|
||||
if ((_nm != NULL) && _nm->has_code_comment(pc0, pc)) {
|
||||
_nm->print_code_comment_on(st, _post_decode_alignment, pc0, pc);
|
||||
_nm->print_code_comment_on
|
||||
(st,
|
||||
_post_decode_alignment ? _post_decode_alignment : COMMENT_COLUMN,
|
||||
pc0, pc);
|
||||
// this calls reloc_string_for which calls oop::print_value_on
|
||||
}
|
||||
print_hook_comments(pc0, _nm != NULL);
|
||||
|
||||
@ -379,12 +379,8 @@ void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
|
||||
continue;
|
||||
}
|
||||
#ifdef ASSERT
|
||||
// We can not verify the oop here if we are using ZGC, the oop
|
||||
// will be bad in case we had a safepoint between a load and a
|
||||
// load barrier.
|
||||
if (!UseZGC &&
|
||||
((((uintptr_t)loc & (sizeof(*loc)-1)) != 0) ||
|
||||
!Universe::heap()->is_in_or_null(*loc))) {
|
||||
if ((((uintptr_t)loc & (sizeof(*loc)-1)) != 0) ||
|
||||
!Universe::heap()->is_in_or_null(*loc)) {
|
||||
tty->print_cr("# Found non oop pointer. Dumping state at failure");
|
||||
// try to dump out some helpful debugging information
|
||||
trace_codeblob_maps(fr, reg_map);
|
||||
|
||||
@ -259,6 +259,7 @@ public:
|
||||
Optimization,
|
||||
Expansion
|
||||
};
|
||||
|
||||
virtual bool array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, ArrayCopyPhase phase) const { return false; }
|
||||
virtual void clone_barrier_at_expansion(ArrayCopyNode* ac, Node* call, PhaseIterGVN& igvn) const;
|
||||
|
||||
@ -273,7 +274,6 @@ public:
|
||||
virtual void eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const { }
|
||||
virtual void enqueue_useful_gc_barrier(PhaseIterGVN* igvn, Node* node) const {}
|
||||
virtual void eliminate_useless_gc_barriers(Unique_Node_List &useful, Compile* C) const {}
|
||||
virtual void add_users_to_worklist(Unique_Node_List* worklist) const {}
|
||||
|
||||
// Allow barrier sets to have shared state that is preserved across a compilation unit.
|
||||
// This could for example comprise macro nodes to be expanded during macro expansion.
|
||||
@ -286,17 +286,21 @@ public:
|
||||
virtual bool is_gc_specific_loop_opts_pass(LoopOptsMode mode) const { return false; }
|
||||
|
||||
virtual bool has_special_unique_user(const Node* node) const { return false; }
|
||||
virtual bool needs_anti_dependence_check(const Node* node) const { return true; }
|
||||
|
||||
virtual void barrier_insertion_phase(Compile* C, PhaseIterGVN &igvn) const { }
|
||||
|
||||
enum CompilePhase {
|
||||
BeforeOptimize, /* post_parse = true */
|
||||
BeforeExpand, /* post_parse = false */
|
||||
BeforeOptimize,
|
||||
BeforeLateInsertion,
|
||||
BeforeMacroExpand,
|
||||
BeforeCodeGen
|
||||
};
|
||||
virtual void verify_gc_barriers(Compile* compile, CompilePhase phase) const {}
|
||||
|
||||
virtual bool flatten_gc_alias_type(const TypePtr*& adr_type) const { return false; }
|
||||
#ifdef ASSERT
|
||||
virtual bool verify_gc_alias_type(const TypePtr* adr_type, int offset) const { return false; }
|
||||
virtual void verify_gc_barriers(Compile* compile, CompilePhase phase) const {}
|
||||
#endif
|
||||
|
||||
virtual bool final_graph_reshaping(Compile* compile, Node* n, uint opcode) const { return false; }
|
||||
@ -310,8 +314,8 @@ public:
|
||||
virtual bool matcher_find_shared_post_visit(Matcher* matcher, Node* n, uint opcode) const { return false; };
|
||||
virtual bool matcher_is_store_load_barrier(Node* x, uint xop) const { return false; }
|
||||
|
||||
virtual void igvn_add_users_to_worklist(PhaseIterGVN* igvn, Node* use) const {}
|
||||
virtual void ccp_analyze(PhaseCCP* ccp, Unique_Node_List& worklist, Node* use) const {}
|
||||
virtual void igvn_add_users_to_worklist(PhaseIterGVN* igvn, Node* use) const { }
|
||||
virtual void ccp_analyze(PhaseCCP* ccp, Unique_Node_List& worklist, Node* use) const { }
|
||||
|
||||
virtual Node* split_if_pre(PhaseIdealLoop* phase, Node* n) const { return NULL; }
|
||||
virtual bool build_loop_late_post(PhaseIdealLoop* phase, Node* n) const { return false; }
|
||||
|
||||
@ -33,8 +33,6 @@
|
||||
#include "utilities/defaultStream.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
size_t MinHeapSize = 0;
|
||||
|
||||
size_t HeapAlignment = 0;
|
||||
size_t SpaceAlignment = 0;
|
||||
|
||||
@ -122,7 +120,7 @@ void GCArguments::assert_size_info() {
|
||||
assert(MaxHeapSize >= MinHeapSize, "Ergonomics decided on incompatible minimum and maximum heap sizes");
|
||||
assert(InitialHeapSize >= MinHeapSize, "Ergonomics decided on incompatible initial and minimum heap sizes");
|
||||
assert(MaxHeapSize >= InitialHeapSize, "Ergonomics decided on incompatible initial and maximum heap sizes");
|
||||
assert(MaxHeapSize % HeapAlignment == 0, "MinHeapSize alignment");
|
||||
assert(MinHeapSize % HeapAlignment == 0, "MinHeapSize alignment");
|
||||
assert(InitialHeapSize % HeapAlignment == 0, "InitialHeapSize alignment");
|
||||
assert(MaxHeapSize % HeapAlignment == 0, "MaxHeapSize alignment");
|
||||
}
|
||||
@ -149,7 +147,7 @@ void GCArguments::initialize_heap_flags_and_sizes() {
|
||||
if (FLAG_IS_CMDLINE(InitialHeapSize) && InitialHeapSize > MaxHeapSize) {
|
||||
vm_exit_during_initialization("Initial heap size set to a larger value than the maximum heap size");
|
||||
}
|
||||
if (MinHeapSize != 0 && MaxHeapSize < MinHeapSize) {
|
||||
if (FLAG_IS_CMDLINE(MinHeapSize) && MaxHeapSize < MinHeapSize) {
|
||||
vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified");
|
||||
}
|
||||
}
|
||||
@ -166,28 +164,28 @@ void GCArguments::initialize_heap_flags_and_sizes() {
|
||||
}
|
||||
|
||||
// User inputs from -Xmx and -Xms must be aligned
|
||||
MinHeapSize = align_up(MinHeapSize, HeapAlignment);
|
||||
size_t aligned_initial_heap_size = align_up(InitialHeapSize, HeapAlignment);
|
||||
size_t aligned_max_heap_size = align_up(MaxHeapSize, HeapAlignment);
|
||||
|
||||
// Write back to flags if the values changed
|
||||
if (aligned_initial_heap_size != InitialHeapSize) {
|
||||
FLAG_SET_ERGO(InitialHeapSize, aligned_initial_heap_size);
|
||||
if (!is_aligned(MinHeapSize, HeapAlignment)) {
|
||||
FLAG_SET_ERGO(MinHeapSize, align_up(MinHeapSize, HeapAlignment));
|
||||
}
|
||||
if (aligned_max_heap_size != MaxHeapSize) {
|
||||
FLAG_SET_ERGO(MaxHeapSize, aligned_max_heap_size);
|
||||
if (!is_aligned(InitialHeapSize, HeapAlignment)) {
|
||||
FLAG_SET_ERGO(InitialHeapSize, align_up(InitialHeapSize, HeapAlignment));
|
||||
}
|
||||
if (!is_aligned(MaxHeapSize, HeapAlignment)) {
|
||||
FLAG_SET_ERGO(MaxHeapSize, align_up(MaxHeapSize, HeapAlignment));
|
||||
}
|
||||
|
||||
if (FLAG_IS_CMDLINE(InitialHeapSize) && MinHeapSize != 0 &&
|
||||
if (FLAG_IS_CMDLINE(InitialHeapSize) && FLAG_IS_CMDLINE(MinHeapSize) &&
|
||||
InitialHeapSize < MinHeapSize) {
|
||||
vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified");
|
||||
}
|
||||
|
||||
if (!FLAG_IS_DEFAULT(InitialHeapSize) && InitialHeapSize > MaxHeapSize) {
|
||||
FLAG_SET_ERGO(MaxHeapSize, InitialHeapSize);
|
||||
} else if (!FLAG_IS_DEFAULT(MaxHeapSize) && InitialHeapSize > MaxHeapSize) {
|
||||
FLAG_SET_ERGO(InitialHeapSize, MaxHeapSize);
|
||||
if (InitialHeapSize < MinHeapSize) {
|
||||
MinHeapSize = InitialHeapSize;
|
||||
FLAG_SET_ERGO(MinHeapSize, InitialHeapSize);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -30,8 +30,6 @@
|
||||
|
||||
class CollectedHeap;
|
||||
|
||||
extern size_t MinHeapSize;
|
||||
|
||||
extern size_t HeapAlignment;
|
||||
extern size_t SpaceAlignment;
|
||||
|
||||
|
||||
@ -713,6 +713,10 @@
|
||||
"to move") \
|
||||
\
|
||||
/* gc parameters */ \
|
||||
product(size_t, MinHeapSize, 0, \
|
||||
"Minimum heap size (in bytes); zero means use ergonomics") \
|
||||
constraint(MinHeapSizeConstraintFunc,AfterErgo) \
|
||||
\
|
||||
product(size_t, InitialHeapSize, 0, \
|
||||
"Initial heap size (in bytes); zero means use ergonomics") \
|
||||
constraint(InitialHeapSizeConstraintFunc,AfterErgo) \
|
||||
|
||||
@ -90,7 +90,7 @@ void GenArguments::initialize_heap_flags_and_sizes() {
|
||||
}
|
||||
// If needed, synchronize MinHeapSize size and InitialHeapSize
|
||||
if (MinHeapSize < smallest_heap_size) {
|
||||
MinHeapSize = smallest_heap_size;
|
||||
FLAG_SET_ERGO(MinHeapSize, smallest_heap_size);
|
||||
if (InitialHeapSize < MinHeapSize) {
|
||||
FLAG_SET_ERGO(InitialHeapSize, smallest_heap_size);
|
||||
}
|
||||
|
||||
@ -319,6 +319,10 @@ static JVMFlag::Error MaxSizeForHeapAlignment(const char* name, size_t value, bo
|
||||
return MaxSizeForAlignment(name, value, heap_alignment, verbose);
|
||||
}
|
||||
|
||||
JVMFlag::Error MinHeapSizeConstraintFunc(size_t value, bool verbose) {
|
||||
return MaxSizeForHeapAlignment("MinHeapSize", value, verbose);
|
||||
}
|
||||
|
||||
JVMFlag::Error InitialHeapSizeConstraintFunc(size_t value, bool verbose) {
|
||||
return MaxSizeForHeapAlignment("InitialHeapSize", value, verbose);
|
||||
}
|
||||
|
||||
@ -59,6 +59,7 @@ JVMFlag::Error MaxTenuringThresholdConstraintFunc(uintx value, bool verbose);
|
||||
JVMFlag::Error MaxGCPauseMillisConstraintFunc(uintx value, bool verbose);
|
||||
JVMFlag::Error GCPauseIntervalMillisConstraintFunc(uintx value, bool verbose);
|
||||
JVMFlag::Error InitialBootClassLoaderMetaspaceSizeConstraintFunc(size_t value, bool verbose);
|
||||
JVMFlag::Error MinHeapSizeConstraintFunc(size_t value, bool verbose);
|
||||
JVMFlag::Error InitialHeapSizeConstraintFunc(size_t value, bool verbose);
|
||||
JVMFlag::Error MaxHeapSizeConstraintFunc(size_t value, bool verbose);
|
||||
JVMFlag::Error SoftMaxHeapSizeConstraintFunc(size_t value, bool verbose);
|
||||
|
||||
@ -912,8 +912,6 @@ void ShenandoahBarrierSetC2::eliminate_useless_gc_barriers(Unique_Node_List &use
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierSetC2::add_users_to_worklist(Unique_Node_List* worklist) const {}
|
||||
|
||||
void* ShenandoahBarrierSetC2::create_barrier_state(Arena* comp_arena) const {
|
||||
return new(comp_arena) ShenandoahBarrierSetC2State(comp_arena);
|
||||
}
|
||||
@ -928,7 +926,7 @@ bool ShenandoahBarrierSetC2::expand_macro_nodes(PhaseMacroExpand* macro) const {
|
||||
|
||||
#ifdef ASSERT
|
||||
void ShenandoahBarrierSetC2::verify_gc_barriers(Compile* compile, CompilePhase phase) const {
|
||||
if (ShenandoahVerifyOptoBarriers && phase == BarrierSetC2::BeforeExpand) {
|
||||
if (ShenandoahVerifyOptoBarriers && phase == BarrierSetC2::BeforeMacroExpand) {
|
||||
ShenandoahBarrierC2Support::verify(Compile::current()->root());
|
||||
} else if (phase == BarrierSetC2::BeforeCodeGen) {
|
||||
// Verify G1 pre-barriers
|
||||
|
||||
@ -126,7 +126,6 @@ public:
|
||||
virtual void eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const;
|
||||
virtual void enqueue_useful_gc_barrier(PhaseIterGVN* igvn, Node* node) const;
|
||||
virtual void eliminate_useless_gc_barriers(Unique_Node_List &useful, Compile* C) const;
|
||||
virtual void add_users_to_worklist(Unique_Node_List* worklist) const;
|
||||
|
||||
// Allow barrier sets to have shared state that is preserved across a compilation unit.
|
||||
// This could for example comprise macro nodes to be expanded during macro expansion.
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
|
||||
* Copyright (c) 2017, 2019, Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
@ -122,7 +122,7 @@ public:
|
||||
void possibly_parallel_blobs_do(CodeBlobClosure* f);
|
||||
};
|
||||
|
||||
class ShenandoahCodeRoots : public CHeapObj<mtGC> {
|
||||
class ShenandoahCodeRoots : public AllStatic {
|
||||
friend class ShenandoahHeap;
|
||||
friend class ShenandoahCodeRootsIterator;
|
||||
|
||||
|
||||
@ -60,8 +60,7 @@ ShenandoahSerialRoots::ShenandoahSerialRoots() :
|
||||
_object_synchronizer_root(&ObjectSynchronizer::oops_do, ShenandoahPhaseTimings::ObjectSynchronizerRoots),
|
||||
_management_root(&Management::oops_do, ShenandoahPhaseTimings::ManagementRoots),
|
||||
_system_dictionary_root(&SystemDictionary::oops_do, ShenandoahPhaseTimings::SystemDictionaryRoots),
|
||||
_jvmti_root(&JvmtiExport::oops_do, ShenandoahPhaseTimings::JVMTIRoots),
|
||||
_jni_handle_root(&JNIHandles::oops_do, ShenandoahPhaseTimings::JNIRoots) {
|
||||
_jvmti_root(&JvmtiExport::oops_do, ShenandoahPhaseTimings::JVMTIRoots) {
|
||||
}
|
||||
|
||||
void ShenandoahSerialRoots::oops_do(OopClosure* cl, uint worker_id) {
|
||||
@ -70,7 +69,10 @@ void ShenandoahSerialRoots::oops_do(OopClosure* cl, uint worker_id) {
|
||||
_management_root.oops_do(cl, worker_id);
|
||||
_system_dictionary_root.oops_do(cl, worker_id);
|
||||
_jvmti_root.oops_do(cl, worker_id);
|
||||
_jni_handle_root.oops_do(cl, worker_id);
|
||||
}
|
||||
|
||||
ShenandoahJNIHandleRoots::ShenandoahJNIHandleRoots() :
|
||||
ShenandoahSerialRoot(&JNIHandles::oops_do, ShenandoahPhaseTimings::JNIRoots) {
|
||||
}
|
||||
|
||||
ShenandoahThreadRoots::ShenandoahThreadRoots(bool is_par) : _is_par(is_par) {
|
||||
@ -160,6 +162,7 @@ void ShenandoahRootEvacuator::roots_do(uint worker_id, OopClosure* oops) {
|
||||
AlwaysTrueClosure always_true;
|
||||
|
||||
_serial_roots.oops_do(oops, worker_id);
|
||||
_jni_roots.oops_do(oops, worker_id);
|
||||
|
||||
_thread_roots.oops_do(oops, NULL, worker_id);
|
||||
_cld_roots.clds_do(&clds, &clds, worker_id);
|
||||
@ -189,6 +192,7 @@ void ShenandoahRootAdjuster::roots_do(uint worker_id, OopClosure* oops) {
|
||||
AlwaysTrueClosure always_true;
|
||||
|
||||
_serial_roots.oops_do(oops, worker_id);
|
||||
_jni_roots.oops_do(oops, worker_id);
|
||||
|
||||
_thread_roots.oops_do(oops, NULL, worker_id);
|
||||
_cld_roots.clds_do(&adjust_cld_closure, NULL, worker_id);
|
||||
|
||||
@ -56,12 +56,16 @@ private:
|
||||
ShenandoahSerialRoot _management_root;
|
||||
ShenandoahSerialRoot _system_dictionary_root;
|
||||
ShenandoahSerialRoot _jvmti_root;
|
||||
ShenandoahSerialRoot _jni_handle_root;
|
||||
public:
|
||||
ShenandoahSerialRoots();
|
||||
void oops_do(OopClosure* cl, uint worker_id);
|
||||
};
|
||||
|
||||
class ShenandoahJNIHandleRoots : public ShenandoahSerialRoot {
|
||||
public:
|
||||
ShenandoahJNIHandleRoots();
|
||||
};
|
||||
|
||||
class ShenandoahThreadRoots {
|
||||
private:
|
||||
const bool _is_par;
|
||||
@ -126,6 +130,7 @@ template <typename ITR>
|
||||
class ShenandoahRootScanner : public ShenandoahRootProcessor {
|
||||
private:
|
||||
ShenandoahSerialRoots _serial_roots;
|
||||
ShenandoahJNIHandleRoots _jni_roots;
|
||||
ShenandoahClassLoaderDataRoots _cld_roots;
|
||||
ShenandoahThreadRoots _thread_roots;
|
||||
ShenandoahCodeCacheRoots<ITR> _code_roots;
|
||||
@ -152,6 +157,7 @@ typedef ShenandoahRootScanner<ShenandoahCsetCodeRootsIterator> ShenandoahCSetRoo
|
||||
class ShenandoahRootEvacuator : public ShenandoahRootProcessor {
|
||||
private:
|
||||
ShenandoahSerialRoots _serial_roots;
|
||||
ShenandoahJNIHandleRoots _jni_roots;
|
||||
ShenandoahClassLoaderDataRoots _cld_roots;
|
||||
ShenandoahThreadRoots _thread_roots;
|
||||
ShenandoahWeakRoots _weak_roots;
|
||||
@ -168,6 +174,7 @@ public:
|
||||
class ShenandoahRootUpdater : public ShenandoahRootProcessor {
|
||||
private:
|
||||
ShenandoahSerialRoots _serial_roots;
|
||||
ShenandoahJNIHandleRoots _jni_roots;
|
||||
ShenandoahClassLoaderDataRoots _cld_roots;
|
||||
ShenandoahThreadRoots _thread_roots;
|
||||
ShenandoahWeakRoots _weak_roots;
|
||||
@ -186,6 +193,7 @@ public:
|
||||
class ShenandoahRootAdjuster : public ShenandoahRootProcessor {
|
||||
private:
|
||||
ShenandoahSerialRoots _serial_roots;
|
||||
ShenandoahJNIHandleRoots _jni_roots;
|
||||
ShenandoahClassLoaderDataRoots _cld_roots;
|
||||
ShenandoahThreadRoots _thread_roots;
|
||||
ShenandoahWeakRoots _weak_roots;
|
||||
|
||||
@ -99,6 +99,7 @@ void ShenandoahRootScanner<ITR>::roots_do(uint worker_id, OopClosure* oops, CLDC
|
||||
ResourceMark rm;
|
||||
|
||||
_serial_roots.oops_do(oops, worker_id);
|
||||
_jni_roots.oops_do(oops, worker_id);
|
||||
_cld_roots.clds_do(clds, clds, worker_id);
|
||||
_thread_roots.threads_do(&tc_cl, worker_id);
|
||||
|
||||
@ -118,6 +119,7 @@ void ShenandoahRootScanner<ITR>::roots_do_unchecked(OopClosure* oops) {
|
||||
ResourceMark rm;
|
||||
|
||||
_serial_roots.oops_do(oops, 0);
|
||||
_jni_roots.oops_do(oops, 0);
|
||||
_cld_roots.clds_do(&clds, &clds, 0);
|
||||
_thread_roots.threads_do(&tc_cl, 0);
|
||||
_code_roots.code_blobs_do(&code, 0);
|
||||
@ -130,6 +132,7 @@ void ShenandoahRootScanner<ITR>::strong_roots_do(uint worker_id, OopClosure* oop
|
||||
ResourceMark rm;
|
||||
|
||||
_serial_roots.oops_do(oops, worker_id);
|
||||
_jni_roots.oops_do(oops, worker_id);
|
||||
_cld_roots.clds_do(clds, NULL, worker_id);
|
||||
_thread_roots.threads_do(&tc_cl, worker_id);
|
||||
}
|
||||
@ -141,6 +144,7 @@ void ShenandoahRootUpdater::roots_do(uint worker_id, IsAlive* is_alive, KeepAliv
|
||||
CLDToOopClosure* weak_clds = ShenandoahHeap::heap()->unload_classes() ? NULL : &clds;
|
||||
|
||||
_serial_roots.oops_do(keep_alive, worker_id);
|
||||
_jni_roots.oops_do(keep_alive, worker_id);
|
||||
|
||||
_thread_roots.oops_do(keep_alive, NULL, worker_id);
|
||||
_cld_roots.clds_do(&clds, weak_clds, worker_id);
|
||||
|
||||
@ -69,15 +69,18 @@ void ShenandoahRootVerifier::oops_do(OopClosure* oops) {
|
||||
|
||||
if (verify(SerialRoots)) {
|
||||
shenandoah_assert_safepoint();
|
||||
|
||||
Universe::oops_do(oops);
|
||||
Management::oops_do(oops);
|
||||
JvmtiExport::oops_do(oops);
|
||||
JNIHandles::oops_do(oops);
|
||||
ObjectSynchronizer::oops_do(oops);
|
||||
SystemDictionary::oops_do(oops);
|
||||
}
|
||||
|
||||
if (verify(JNIHandleRoots)) {
|
||||
shenandoah_assert_safepoint();
|
||||
JNIHandles::oops_do(oops);
|
||||
}
|
||||
|
||||
if (verify(WeakRoots)) {
|
||||
shenandoah_assert_safepoint();
|
||||
AlwaysTrueClosure always_true;
|
||||
|
||||
@ -36,7 +36,8 @@ public:
|
||||
CLDGRoots = 1 << 3,
|
||||
WeakRoots = 1 << 4,
|
||||
StringDedupRoots = 1 << 5,
|
||||
AllRoots = (SerialRoots | ThreadRoots | CodeRoots | CLDGRoots | WeakRoots | StringDedupRoots)
|
||||
JNIHandleRoots = 1 << 6,
|
||||
AllRoots = (SerialRoots | ThreadRoots | CodeRoots | CLDGRoots | WeakRoots | StringDedupRoots | JNIHandleRoots)
|
||||
};
|
||||
|
||||
private:
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -29,15 +29,33 @@
|
||||
#include "opto/node.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
|
||||
class ZCompareAndSwapPNode : public CompareAndSwapPNode {
|
||||
public:
|
||||
ZCompareAndSwapPNode(Node* c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : CompareAndSwapPNode(c, mem, adr, val, ex, mem_ord) { }
|
||||
virtual int Opcode() const;
|
||||
};
|
||||
|
||||
class ZWeakCompareAndSwapPNode : public WeakCompareAndSwapPNode {
|
||||
public:
|
||||
ZWeakCompareAndSwapPNode(Node* c, Node *mem, Node *adr, Node *val, Node *ex, MemNode::MemOrd mem_ord) : WeakCompareAndSwapPNode(c, mem, adr, val, ex, mem_ord) { }
|
||||
virtual int Opcode() const;
|
||||
};
|
||||
|
||||
class ZCompareAndExchangePNode : public CompareAndExchangePNode {
|
||||
public:
|
||||
ZCompareAndExchangePNode(Node* c, Node *mem, Node *adr, Node *val, Node *ex, const TypePtr* at, const Type* t, MemNode::MemOrd mem_ord) : CompareAndExchangePNode(c, mem, adr, val, ex, at, t, mem_ord) { }
|
||||
virtual int Opcode() const;
|
||||
};
|
||||
|
||||
class ZGetAndSetPNode : public GetAndSetPNode {
|
||||
public:
|
||||
ZGetAndSetPNode(Node* c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t) : GetAndSetPNode(c, mem, adr, val, at, t) { }
|
||||
virtual int Opcode() const;
|
||||
};
|
||||
|
||||
class LoadBarrierNode : public MultiNode {
|
||||
private:
|
||||
bool _weak; // On strong or weak oop reference
|
||||
bool _writeback; // Controls if the barrier writes the healed oop back to memory
|
||||
// A swap on a memory location must never write back the healed oop
|
||||
bool _oop_reload_allowed; // Controls if the barrier are allowed to reload the oop from memory
|
||||
// before healing, otherwise both the oop and the address must be
|
||||
// passed to the barrier from the oop
|
||||
|
||||
static bool is_dominator(PhaseIdealLoop* phase, bool linear_only, Node *d, Node *n);
|
||||
void push_dominated_barriers(PhaseIterGVN* igvn) const;
|
||||
|
||||
@ -57,9 +75,7 @@ public:
|
||||
Node* mem,
|
||||
Node* val,
|
||||
Node* adr,
|
||||
bool weak,
|
||||
bool writeback,
|
||||
bool oop_reload_allowed);
|
||||
bool weak);
|
||||
|
||||
virtual int Opcode() const;
|
||||
virtual uint size_of() const;
|
||||
@ -86,17 +102,11 @@ public:
|
||||
bool is_weak() const {
|
||||
return _weak;
|
||||
}
|
||||
|
||||
bool is_writeback() const {
|
||||
return _writeback;
|
||||
}
|
||||
|
||||
bool oop_reload_allowed() const {
|
||||
return _oop_reload_allowed;
|
||||
}
|
||||
};
|
||||
|
||||
class LoadBarrierSlowRegNode : public LoadPNode {
|
||||
private:
|
||||
bool _is_weak;
|
||||
public:
|
||||
LoadBarrierSlowRegNode(Node *c,
|
||||
Node *mem,
|
||||
@ -104,8 +114,9 @@ public:
|
||||
const TypePtr *at,
|
||||
const TypePtr* t,
|
||||
MemOrd mo,
|
||||
bool weak = false,
|
||||
ControlDependency control_dependency = DependsOnlyOnTest) :
|
||||
LoadPNode(c, mem, adr, at, t, mo, control_dependency) {
|
||||
LoadPNode(c, mem, adr, at, t, mo, control_dependency), _is_weak(weak) {
|
||||
init_class_id(Class_LoadBarrierSlowReg);
|
||||
}
|
||||
|
||||
@ -118,30 +129,8 @@ public:
|
||||
}
|
||||
|
||||
virtual int Opcode() const;
|
||||
};
|
||||
|
||||
class LoadBarrierWeakSlowRegNode : public LoadPNode {
|
||||
public:
|
||||
LoadBarrierWeakSlowRegNode(Node *c,
|
||||
Node *mem,
|
||||
Node *adr,
|
||||
const TypePtr *at,
|
||||
const TypePtr* t,
|
||||
MemOrd mo,
|
||||
ControlDependency control_dependency = DependsOnlyOnTest) :
|
||||
LoadPNode(c, mem, adr, at, t, mo, control_dependency) {
|
||||
init_class_id(Class_LoadBarrierWeakSlowReg);
|
||||
}
|
||||
|
||||
virtual const char * name() {
|
||||
return "LoadBarrierWeakSlowRegNode";
|
||||
}
|
||||
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
virtual int Opcode() const;
|
||||
bool is_weak() { return _is_weak; }
|
||||
};
|
||||
|
||||
class ZBarrierSetC2State : public ResourceObj {
|
||||
@ -157,15 +146,17 @@ public:
|
||||
LoadBarrierNode* load_barrier_node(int idx) const;
|
||||
};
|
||||
|
||||
enum BarrierInfo {
|
||||
NoBarrier = 0,
|
||||
RequireBarrier = 1,
|
||||
WeakBarrier = 3, // Inclusive with RequireBarrier
|
||||
ExpandedBarrier = 4
|
||||
};
|
||||
|
||||
class ZBarrierSetC2 : public BarrierSetC2 {
|
||||
private:
|
||||
ZBarrierSetC2State* state() const;
|
||||
Node* make_cas_loadbarrier(C2AtomicParseAccess& access) const;
|
||||
Node* make_cmpx_loadbarrier(C2AtomicParseAccess& access) const;
|
||||
void expand_loadbarrier_basic(PhaseMacroExpand* phase, LoadBarrierNode *barrier) const;
|
||||
void expand_loadbarrier_node(PhaseMacroExpand* phase, LoadBarrierNode* barrier) const;
|
||||
void expand_loadbarrier_optimized(PhaseMacroExpand* phase, LoadBarrierNode *barrier) const;
|
||||
const TypeFunc* load_barrier_Type() const;
|
||||
|
||||
#ifdef ASSERT
|
||||
void verify_gc_barriers(bool post_parse) const;
|
||||
@ -186,41 +177,42 @@ protected:
|
||||
const Type* val_type) const;
|
||||
|
||||
public:
|
||||
Node* load_barrier(GraphKit* kit,
|
||||
Node* val,
|
||||
Node* adr,
|
||||
bool weak = false,
|
||||
bool writeback = true,
|
||||
bool oop_reload_allowed = true) const;
|
||||
|
||||
virtual void* create_barrier_state(Arena* comp_arena) const;
|
||||
|
||||
virtual bool has_load_barriers() const { return true; }
|
||||
virtual bool is_gc_barrier_node(Node* node) const;
|
||||
virtual void eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const { }
|
||||
virtual void eliminate_useless_gc_barriers(Unique_Node_List &useful, Compile* C) const;
|
||||
virtual void add_users_to_worklist(Unique_Node_List* worklist) const;
|
||||
virtual void enqueue_useful_gc_barrier(PhaseIterGVN* igvn, Node* node) const;
|
||||
virtual Node* step_over_gc_barrier(Node* c) const;
|
||||
|
||||
virtual void register_potential_barrier_node(Node* node) const;
|
||||
virtual void unregister_potential_barrier_node(Node* node) const;
|
||||
virtual void eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const { }
|
||||
virtual void enqueue_useful_gc_barrier(PhaseIterGVN* igvn, Node* node) const;
|
||||
virtual void eliminate_useless_gc_barriers(Unique_Node_List &useful, Compile* C) const;
|
||||
|
||||
virtual bool array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, ArrayCopyPhase phase) const;
|
||||
virtual Node* step_over_gc_barrier(Node* c) const;
|
||||
// If the BarrierSetC2 state has kept barrier nodes in its compilation unit state to be
|
||||
// expanded later, then now is the time to do so.
|
||||
|
||||
virtual bool expand_barriers(Compile* C, PhaseIterGVN& igvn) const;
|
||||
|
||||
static void find_dominating_barriers(PhaseIterGVN& igvn);
|
||||
static void loop_optimize_gc_barrier(PhaseIdealLoop* phase, Node* node, bool last_round);
|
||||
|
||||
virtual bool final_graph_reshaping(Compile* compile, Node* n, uint opcode) const;
|
||||
|
||||
virtual bool matcher_find_shared_visit(Matcher* matcher, Matcher::MStack& mstack, Node* n, uint opcode, bool& mem_op, int& mem_addr_idx) const;
|
||||
virtual bool matcher_find_shared_post_visit(Matcher* matcher, Node* n, uint opcode) const;
|
||||
virtual bool needs_anti_dependence_check(const Node* node) const;
|
||||
|
||||
#ifdef ASSERT
|
||||
virtual void verify_gc_barriers(Compile* compile, CompilePhase phase) const;
|
||||
#endif
|
||||
|
||||
virtual bool escape_add_to_con_graph(ConnectionGraph* conn_graph, PhaseGVN* gvn, Unique_Node_List* delayed_worklist, Node* n, uint opcode) const;
|
||||
virtual bool escape_add_final_edges(ConnectionGraph* conn_graph, PhaseGVN* gvn, Node* n, uint opcode) const;
|
||||
// Load barrier insertion and expansion external
|
||||
virtual void barrier_insertion_phase(Compile* C, PhaseIterGVN &igvn) const;
|
||||
virtual bool optimize_loops(PhaseIdealLoop* phase, LoopOptsMode mode, VectorSet& visited, Node_Stack& nstack, Node_List& worklist) const;
|
||||
virtual bool is_gc_specific_loop_opts_pass(LoopOptsMode mode) const { return (mode == LoopOptsZBarrierInsertion); }
|
||||
|
||||
private:
|
||||
// Load barrier insertion and expansion internal
|
||||
void insert_barriers_on_unsafe(PhaseIdealLoop* phase) const;
|
||||
void clean_catch_blocks(PhaseIdealLoop* phase) const;
|
||||
void insert_load_barriers(PhaseIdealLoop* phase) const;
|
||||
LoadNode* insert_one_loadbarrier(PhaseIdealLoop* phase, LoadNode* load, Node* ctrl) const;
|
||||
void insert_one_loadbarrier_inner(PhaseIdealLoop* phase, LoadNode* load, Node* ctrl, VectorSet visited) const;
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_Z_C2_ZBARRIERSETC2_HPP
|
||||
|
||||
@ -328,46 +328,9 @@ void ZHeap::mark_flush_and_free(Thread* thread) {
|
||||
_mark.flush_and_free(thread);
|
||||
}
|
||||
|
||||
class ZFixupPartialLoadsClosure : public ZRootsIteratorClosure {
|
||||
public:
|
||||
virtual void do_oop(oop* p) {
|
||||
ZBarrier::mark_barrier_on_root_oop_field(p);
|
||||
}
|
||||
|
||||
virtual void do_oop(narrowOop* p) {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
};
|
||||
|
||||
class ZFixupPartialLoadsTask : public ZTask {
|
||||
private:
|
||||
ZThreadRootsIterator _thread_roots;
|
||||
|
||||
public:
|
||||
ZFixupPartialLoadsTask() :
|
||||
ZTask("ZFixupPartialLoadsTask"),
|
||||
_thread_roots() {}
|
||||
|
||||
virtual void work() {
|
||||
ZFixupPartialLoadsClosure cl;
|
||||
_thread_roots.oops_do(&cl);
|
||||
}
|
||||
};
|
||||
|
||||
void ZHeap::fixup_partial_loads() {
|
||||
ZFixupPartialLoadsTask task;
|
||||
_workers.run_parallel(&task);
|
||||
}
|
||||
|
||||
bool ZHeap::mark_end() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
|
||||
|
||||
// C2 can generate code where a safepoint poll is inserted
|
||||
// between a load and the associated load barrier. To handle
|
||||
// this case we need to rescan the thread stack here to make
|
||||
// sure such oops are marked.
|
||||
fixup_partial_loads();
|
||||
|
||||
// Try end marking
|
||||
if (!_mark.end()) {
|
||||
// Marking not completed, continue concurrent mark
|
||||
@ -508,8 +471,8 @@ void ZHeap::relocate() {
|
||||
void ZHeap::object_iterate(ObjectClosure* cl, bool visit_referents) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
|
||||
|
||||
ZHeapIterator iter(visit_referents);
|
||||
iter.objects_do(cl);
|
||||
ZHeapIterator iter;
|
||||
iter.objects_do(cl, visit_referents);
|
||||
}
|
||||
|
||||
void ZHeap::serviceability_initialize() {
|
||||
|
||||
@ -51,18 +51,29 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
template <bool Concurrent, bool Weak>
|
||||
class ZHeapIteratorRootOopClosure : public ZRootsIteratorClosure {
|
||||
private:
|
||||
ZHeapIterator* const _iter;
|
||||
|
||||
oop load_oop(oop* p) {
|
||||
if (Weak) {
|
||||
return NativeAccess<AS_NO_KEEPALIVE | ON_PHANTOM_OOP_REF>::oop_load(p);
|
||||
}
|
||||
|
||||
if (Concurrent) {
|
||||
return NativeAccess<AS_NO_KEEPALIVE>::oop_load(p);
|
||||
}
|
||||
|
||||
return RawAccess<>::oop_load(p);
|
||||
}
|
||||
|
||||
public:
|
||||
ZHeapIteratorRootOopClosure(ZHeapIterator* iter) :
|
||||
_iter(iter) {}
|
||||
|
||||
virtual void do_oop(oop* p) {
|
||||
// Load barrier needed here, even on non-concurrent strong roots,
|
||||
// for the same reason we need fixup_partial_loads() in ZHeap::mark_end().
|
||||
const oop obj = NativeAccess<AS_NO_KEEPALIVE>::oop_load(p);
|
||||
const oop obj = load_oop(p);
|
||||
_iter->push(obj);
|
||||
}
|
||||
|
||||
@ -71,28 +82,27 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
template <bool VisitReferents>
|
||||
class ZHeapIteratorOopClosure : public BasicOopIterateClosure {
|
||||
private:
|
||||
ZHeapIterator* const _iter;
|
||||
const oop _base;
|
||||
const bool _visit_referents;
|
||||
|
||||
oop load_oop(oop* p) const {
|
||||
if (_visit_referents) {
|
||||
return HeapAccess<ON_UNKNOWN_OOP_REF | AS_NO_KEEPALIVE>::oop_load_at(_base, _base->field_offset(p));
|
||||
} else {
|
||||
return HeapAccess<AS_NO_KEEPALIVE>::oop_load(p);
|
||||
oop load_oop(oop* p) {
|
||||
if (VisitReferents) {
|
||||
return HeapAccess<AS_NO_KEEPALIVE | ON_UNKNOWN_OOP_REF>::oop_load_at(_base, _base->field_offset(p));
|
||||
}
|
||||
|
||||
return HeapAccess<AS_NO_KEEPALIVE>::oop_load(p);
|
||||
}
|
||||
|
||||
public:
|
||||
ZHeapIteratorOopClosure(ZHeapIterator* iter, oop base, bool visit_referents) :
|
||||
ZHeapIteratorOopClosure(ZHeapIterator* iter, oop base) :
|
||||
_iter(iter),
|
||||
_base(base),
|
||||
_visit_referents(visit_referents) {}
|
||||
_base(base) {}
|
||||
|
||||
virtual ReferenceIterationMode reference_iteration_mode() {
|
||||
return _visit_referents ? DO_FIELDS : DO_FIELDS_EXCEPT_REFERENT;
|
||||
return VisitReferents ? DO_FIELDS : DO_FIELDS_EXCEPT_REFERENT;
|
||||
}
|
||||
|
||||
virtual void do_oop(oop* p) {
|
||||
@ -111,10 +121,9 @@ public:
|
||||
#endif
|
||||
};
|
||||
|
||||
ZHeapIterator::ZHeapIterator(bool visit_referents) :
|
||||
ZHeapIterator::ZHeapIterator() :
|
||||
_visit_stack(),
|
||||
_visit_map(),
|
||||
_visit_referents(visit_referents) {}
|
||||
_visit_map() {}
|
||||
|
||||
ZHeapIterator::~ZHeapIterator() {
|
||||
ZVisitMapIterator iter(&_visit_map);
|
||||
@ -162,49 +171,45 @@ void ZHeapIterator::push(oop obj) {
|
||||
_visit_stack.push(obj);
|
||||
}
|
||||
|
||||
template <typename RootsIterator, bool Concurrent, bool Weak>
|
||||
void ZHeapIterator::push_roots() {
|
||||
ZHeapIteratorRootOopClosure<Concurrent, Weak> cl(this);
|
||||
RootsIterator roots;
|
||||
roots.oops_do(&cl);
|
||||
}
|
||||
|
||||
template <bool VisitReferents>
|
||||
void ZHeapIterator::push_fields(oop obj) {
|
||||
ZHeapIteratorOopClosure<VisitReferents> cl(this, obj);
|
||||
obj->oop_iterate(&cl);
|
||||
}
|
||||
|
||||
template <bool VisitReferents>
|
||||
void ZHeapIterator::objects_do(ObjectClosure* cl) {
|
||||
// Note that the heap iterator visits all reachable objects, including
|
||||
// objects that might be unreachable from the application, such as a
|
||||
// not yet cleared JNIWeakGloablRef. However, also note that visiting
|
||||
// the JVMTI tag map is a requirement to make sure we visit all tagged
|
||||
// objects, even those that might now have become phantom reachable.
|
||||
// If we didn't do this the application would have expected to see
|
||||
// ObjectFree events for phantom reachable objects in the tag map.
|
||||
|
||||
ZStatTimerDisable disable;
|
||||
ZHeapIteratorRootOopClosure root_cl(this);
|
||||
|
||||
// Push strong roots onto stack
|
||||
{
|
||||
ZRootsIterator roots;
|
||||
roots.oops_do(&root_cl);
|
||||
}
|
||||
|
||||
{
|
||||
ZConcurrentRootsIterator roots;
|
||||
roots.oops_do(&root_cl);
|
||||
}
|
||||
|
||||
// Push weak roots onto stack
|
||||
{
|
||||
ZWeakRootsIterator roots;
|
||||
roots.oops_do(&root_cl);
|
||||
}
|
||||
|
||||
{
|
||||
ZConcurrentWeakRootsIterator roots;
|
||||
roots.oops_do(&root_cl);
|
||||
}
|
||||
// Push roots to visit
|
||||
push_roots<ZRootsIterator, false /* Concurrent */, false /* Weak */>();
|
||||
push_roots<ZConcurrentRootsIterator, true /* Concurrent */, false /* Weak */>();
|
||||
push_roots<ZWeakRootsIterator, false /* Concurrent */, true /* Weak */>();
|
||||
push_roots<ZConcurrentWeakRootsIterator, true /* Concurrent */, true /* Weak */>();
|
||||
|
||||
// Drain stack
|
||||
while (!_visit_stack.is_empty()) {
|
||||
const oop obj = _visit_stack.pop();
|
||||
|
||||
// Visit
|
||||
// Visit object
|
||||
cl->do_object(obj);
|
||||
|
||||
// Push members to visit
|
||||
ZHeapIteratorOopClosure push_cl(this, obj, _visit_referents);
|
||||
obj->oop_iterate(&push_cl);
|
||||
// Push fields to visit
|
||||
push_fields<VisitReferents>(obj);
|
||||
}
|
||||
}
|
||||
|
||||
void ZHeapIterator::objects_do(ObjectClosure* cl, bool visit_referents) {
|
||||
if (visit_referents) {
|
||||
objects_do<true /* VisitReferents */>(cl);
|
||||
} else {
|
||||
objects_do<false /* VisitReferents */>(cl);
|
||||
}
|
||||
}
|
||||
|
||||
@ -32,8 +32,8 @@ class ObjectClosure;
|
||||
class ZHeapIteratorBitMap;
|
||||
|
||||
class ZHeapIterator : public StackObj {
|
||||
friend class ZHeapIteratorRootOopClosure;
|
||||
friend class ZHeapIteratorOopClosure;
|
||||
template<bool Concurrent, bool Weak> friend class ZHeapIteratorRootOopClosure;
|
||||
template<bool VisitReferents> friend class ZHeapIteratorOopClosure;
|
||||
|
||||
private:
|
||||
typedef ZGranuleMap<ZHeapIteratorBitMap*> ZVisitMap;
|
||||
@ -42,16 +42,19 @@ private:
|
||||
|
||||
ZVisitStack _visit_stack;
|
||||
ZVisitMap _visit_map;
|
||||
const bool _visit_referents;
|
||||
|
||||
ZHeapIteratorBitMap* object_map(oop obj);
|
||||
void push(oop obj);
|
||||
|
||||
template <typename RootsIterator, bool Concurrent, bool Weak> void push_roots();
|
||||
template <bool VisitReferents> void push_fields(oop obj);
|
||||
template <bool VisitReferents> void objects_do(ObjectClosure* cl);
|
||||
|
||||
public:
|
||||
ZHeapIterator(bool visit_referents);
|
||||
ZHeapIterator();
|
||||
~ZHeapIterator();
|
||||
|
||||
void objects_do(ObjectClosure* cl);
|
||||
void objects_do(ObjectClosure* cl, bool visit_referents);
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_Z_ZHEAPITERATOR_HPP
|
||||
|
||||
@ -82,9 +82,6 @@
|
||||
diagnostic(bool, ZVerifyForwarding, false, \
|
||||
"Verify forwarding tables") \
|
||||
\
|
||||
diagnostic(bool, ZOptimizeLoadBarriers, true, \
|
||||
"Apply load barrier optimizations") \
|
||||
\
|
||||
develop(bool, ZVerifyLoadBarriers, false, \
|
||||
"Verify that reference loads are followed by barriers")
|
||||
|
||||
|
||||
@ -438,7 +438,13 @@ void JfrStartFlightRecordingDCmd::execute(DCmdSource source, TRAPS) {
|
||||
|
||||
jobjectArray settings = NULL;
|
||||
if (_settings.is_set()) {
|
||||
const int length = _settings.value()->array()->length();
|
||||
int length = _settings.value()->array()->length();
|
||||
if (length == 1) {
|
||||
const char* c_str = _settings.value()->array()->at(0);
|
||||
if (strcmp(c_str, "none") == 0) {
|
||||
length = 0;
|
||||
}
|
||||
}
|
||||
settings = JfrJavaSupport::new_string_array(length, CHECK);
|
||||
assert(settings != NULL, "invariant");
|
||||
for (int i = 0; i < length; ++i) {
|
||||
|
||||
@ -184,7 +184,7 @@ void JfrJavaArguments::Parameters::copy(JavaCallArguments& args, TRAPS) const {
|
||||
}
|
||||
}
|
||||
|
||||
JfrJavaArguments::JfrJavaArguments(JavaValue* result) : _result(result), _klass(NULL), _name(NULL), _signature(NULL), _array_length(0) {
|
||||
JfrJavaArguments::JfrJavaArguments(JavaValue* result) : _result(result), _klass(NULL), _name(NULL), _signature(NULL), _array_length(-1) {
|
||||
assert(result != NULL, "invariant");
|
||||
}
|
||||
|
||||
@ -193,7 +193,7 @@ JfrJavaArguments::JfrJavaArguments(JavaValue* result, const char* klass_name, co
|
||||
_klass(NULL),
|
||||
_name(NULL),
|
||||
_signature(NULL),
|
||||
_array_length(0) {
|
||||
_array_length(-1) {
|
||||
assert(result != NULL, "invariant");
|
||||
if (klass_name != NULL) {
|
||||
set_klass(klass_name, CHECK);
|
||||
@ -210,7 +210,7 @@ JfrJavaArguments::JfrJavaArguments(JavaValue* result, const Klass* klass, const
|
||||
_klass(NULL),
|
||||
_name(NULL),
|
||||
_signature(NULL),
|
||||
_array_length(0) {
|
||||
_array_length(-1) {
|
||||
assert(result != NULL, "invariant");
|
||||
if (klass != NULL) {
|
||||
set_klass(klass);
|
||||
|
||||
@ -168,7 +168,7 @@ static void create_object(JfrJavaArguments* args, JavaValue* result, TRAPS) {
|
||||
|
||||
const int array_length = args->array_length();
|
||||
|
||||
if (array_length > 0) {
|
||||
if (array_length >= 0) {
|
||||
array_construction(args, result, klass, array_length, CHECK);
|
||||
} else {
|
||||
object_construction(args, result, klass, THREAD);
|
||||
|
||||
@ -562,6 +562,7 @@ JVMCI::CodeInstallResult CodeInstaller::gather_metadata(JVMCIObject target, JVMC
|
||||
metadata.set_pc_desc(_debug_recorder->pcs(), _debug_recorder->pcs_length());
|
||||
metadata.set_scopes(_debug_recorder->stream()->buffer(), _debug_recorder->data_size());
|
||||
metadata.set_exception_table(&_exception_handler_table);
|
||||
metadata.set_implicit_exception_table(&_implicit_exception_table);
|
||||
|
||||
RelocBuffer* reloc_buffer = metadata.get_reloc_buffer();
|
||||
|
||||
@ -637,7 +638,7 @@ JVMCI::CodeInstallResult CodeInstaller::install(JVMCICompiler* compiler,
|
||||
JVMCIObject mirror = installed_code;
|
||||
nmethod* nm = NULL;
|
||||
result = runtime()->register_method(jvmci_env(), method, nm, entry_bci, &_offsets, _orig_pc_offset, &buffer,
|
||||
stack_slots, _debug_recorder->_oopmaps, &_exception_handler_table,
|
||||
stack_slots, _debug_recorder->_oopmaps, &_exception_handler_table, &_implicit_exception_table,
|
||||
compiler, _debug_recorder, _dependencies, id,
|
||||
has_unsafe_access, _has_wide_vector, compiled_code, mirror,
|
||||
failed_speculations, speculations, speculations_len);
|
||||
@ -870,6 +871,10 @@ JVMCI::CodeInstallResult CodeInstaller::initialize_buffer(CodeBuffer& buffer, bo
|
||||
if (_orig_pc_offset < 0) {
|
||||
JVMCI_ERROR_OK("method contains safepoint, but has no deopt rescue slot");
|
||||
}
|
||||
if (JVMCIENV->equals(reason, jvmci_env()->get_site_InfopointReason_IMPLICIT_EXCEPTION())) {
|
||||
TRACE_jvmci_4("implicit exception at %i", pc_offset);
|
||||
_implicit_exception_table.add_deoptimize(pc_offset);
|
||||
}
|
||||
} else {
|
||||
TRACE_jvmci_4("infopoint at %i", pc_offset);
|
||||
site_Infopoint(buffer, pc_offset, site, JVMCI_CHECK_OK);
|
||||
|
||||
@ -85,6 +85,8 @@ public:
|
||||
|
||||
ExceptionHandlerTable* get_exception_table() { return _exception_table; }
|
||||
|
||||
ImplicitExceptionTable* get_implicit_exception_table() { return _implicit_exception_table; }
|
||||
|
||||
void set_pc_desc(PcDesc* desc, int count) {
|
||||
_pc_desc = desc;
|
||||
_nr_pc_desc = count;
|
||||
@ -105,6 +107,10 @@ public:
|
||||
_exception_table = table;
|
||||
}
|
||||
|
||||
void set_implicit_exception_table(ImplicitExceptionTable* table) {
|
||||
_implicit_exception_table = table;
|
||||
}
|
||||
|
||||
private:
|
||||
CodeBlob* _cb;
|
||||
PcDesc* _pc_desc;
|
||||
@ -118,6 +124,7 @@ private:
|
||||
AOTOopRecorder* _oop_recorder;
|
||||
#endif
|
||||
ExceptionHandlerTable* _exception_table;
|
||||
ImplicitExceptionTable* _implicit_exception_table;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -185,6 +192,7 @@ private:
|
||||
DebugInformationRecorder* _debug_recorder;
|
||||
Dependencies* _dependencies;
|
||||
ExceptionHandlerTable _exception_handler_table;
|
||||
ImplicitExceptionTable _implicit_exception_table;
|
||||
|
||||
bool _immutable_pic_compilation; // Installer is called for Immutable PIC compilation.
|
||||
|
||||
|
||||
@ -453,6 +453,9 @@ C2V_VMENTRY_NULL(jobject, findUniqueConcreteMethod, (JNIEnv* env, jobject, jobje
|
||||
if (holder->is_interface()) {
|
||||
JVMCI_THROW_MSG_NULL(InternalError, err_msg("Interface %s should be handled in Java code", holder->external_name()));
|
||||
}
|
||||
if (method->can_be_statically_bound()) {
|
||||
JVMCI_THROW_MSG_NULL(InternalError, err_msg("Effectively static method %s.%s should be handled in Java code", method->method_holder()->external_name(), method->external_name()));
|
||||
}
|
||||
|
||||
methodHandle ucm;
|
||||
{
|
||||
@ -921,6 +924,14 @@ C2V_VMENTRY_0(jint, getMetadata, (JNIEnv *env, jobject, jobject target, jobject
|
||||
}
|
||||
HotSpotJVMCI::HotSpotMetaData::set_exceptionBytes(JVMCIENV, metadata_handle, exceptionArray);
|
||||
|
||||
ImplicitExceptionTable* implicit = code_metadata.get_implicit_exception_table();
|
||||
int implicit_table_size = implicit->size_in_bytes();
|
||||
JVMCIPrimitiveArray implicitExceptionArray = JVMCIENV->new_byteArray(implicit_table_size, JVMCI_CHECK_(JVMCI::cache_full));
|
||||
if (implicit_table_size > 0) {
|
||||
implicit->copy_bytes_to((address) HotSpotJVMCI::resolve(implicitExceptionArray)->byte_at_addr(0), implicit_table_size);
|
||||
}
|
||||
HotSpotJVMCI::HotSpotMetaData::set_implicitExceptionBytes(JVMCIENV, metadata_handle, implicitExceptionArray);
|
||||
|
||||
return result;
|
||||
#else
|
||||
JVMCI_THROW_MSG_0(InternalError, "unimplemented");
|
||||
|
||||
@ -334,6 +334,7 @@
|
||||
primarray_field(HotSpotMetaData, scopesDescBytes, "[B") \
|
||||
primarray_field(HotSpotMetaData, relocBytes, "[B") \
|
||||
primarray_field(HotSpotMetaData, exceptionBytes, "[B") \
|
||||
primarray_field(HotSpotMetaData, implicitExceptionBytes, "[B") \
|
||||
primarray_field(HotSpotMetaData, oopMaps, "[B") \
|
||||
object_field(HotSpotMetaData, metadata, "[Ljava/lang/Object;") \
|
||||
end_class \
|
||||
|
||||
@ -1339,6 +1339,18 @@ JVMCI::CodeInstallResult JVMCIRuntime::validate_compile_task_dependencies(Depend
|
||||
return JVMCI::dependencies_invalid;
|
||||
}
|
||||
|
||||
// Reports a pending exception and exits the VM.
|
||||
static void fatal_exception_in_compile(JVMCIEnv* JVMCIENV, JavaThread* thread, const char* msg) {
|
||||
// Only report a fatal JVMCI compilation exception once
|
||||
static volatile int report_init_failure = 0;
|
||||
if (!report_init_failure && Atomic::cmpxchg(1, &report_init_failure, 0) == 0) {
|
||||
tty->print_cr("%s:", msg);
|
||||
JVMCIENV->describe_pending_exception(true);
|
||||
}
|
||||
JVMCIENV->clear_pending_exception();
|
||||
before_exit(thread);
|
||||
vm_exit(-1);
|
||||
}
|
||||
|
||||
void JVMCIRuntime::compile_method(JVMCIEnv* JVMCIENV, JVMCICompiler* compiler, const methodHandle& method, int entry_bci) {
|
||||
JVMCI_EXCEPTION_CONTEXT
|
||||
@ -1360,9 +1372,7 @@ void JVMCIRuntime::compile_method(JVMCIEnv* JVMCIENV, JVMCICompiler* compiler, c
|
||||
HandleMark hm;
|
||||
JVMCIObject receiver = get_HotSpotJVMCIRuntime(JVMCIENV);
|
||||
if (JVMCIENV->has_pending_exception()) {
|
||||
JVMCIENV->describe_pending_exception(true);
|
||||
compile_state->set_failure(false, "exception getting HotSpotJVMCIRuntime object");
|
||||
return;
|
||||
fatal_exception_in_compile(JVMCIENV, thread, "Exception during HotSpotJVMCIRuntime initialization");
|
||||
}
|
||||
JVMCIObject jvmci_method = JVMCIENV->get_jvmci_method(method, JVMCIENV);
|
||||
if (JVMCIENV->has_pending_exception()) {
|
||||
@ -1397,16 +1407,7 @@ void JVMCIRuntime::compile_method(JVMCIEnv* JVMCIENV, JVMCICompiler* compiler, c
|
||||
} else {
|
||||
// An uncaught exception here implies failure during compiler initialization.
|
||||
// The only sensible thing to do here is to exit the VM.
|
||||
|
||||
// Only report initialization failure once
|
||||
static volatile int report_init_failure = 0;
|
||||
if (!report_init_failure && Atomic::cmpxchg(1, &report_init_failure, 0) == 0) {
|
||||
tty->print_cr("Exception during JVMCI compiler initialization:");
|
||||
JVMCIENV->describe_pending_exception(true);
|
||||
}
|
||||
JVMCIENV->clear_pending_exception();
|
||||
before_exit((JavaThread*) THREAD);
|
||||
vm_exit(-1);
|
||||
fatal_exception_in_compile(JVMCIENV, thread, "Exception during JVMCI compiler initialization");
|
||||
}
|
||||
if (compiler->is_bootstrapping()) {
|
||||
compiler->set_bootstrap_compilation_request_handled();
|
||||
@ -1425,6 +1426,7 @@ JVMCI::CodeInstallResult JVMCIRuntime::register_method(JVMCIEnv* JVMCIENV,
|
||||
int frame_words,
|
||||
OopMapSet* oop_map_set,
|
||||
ExceptionHandlerTable* handler_table,
|
||||
ImplicitExceptionTable* implicit_exception_table,
|
||||
AbstractCompiler* compiler,
|
||||
DebugInformationRecorder* debug_info,
|
||||
Dependencies* dependencies,
|
||||
@ -1494,7 +1496,6 @@ JVMCI::CodeInstallResult JVMCIRuntime::register_method(JVMCIEnv* JVMCIENV,
|
||||
// as in C2, then it must be freed.
|
||||
//code_buffer->free_blob();
|
||||
} else {
|
||||
ImplicitExceptionTable implicit_tbl;
|
||||
nm = nmethod::new_nmethod(method,
|
||||
compile_id,
|
||||
entry_bci,
|
||||
@ -1502,7 +1503,7 @@ JVMCI::CodeInstallResult JVMCIRuntime::register_method(JVMCIEnv* JVMCIENV,
|
||||
orig_pc_offset,
|
||||
debug_info, dependencies, code_buffer,
|
||||
frame_words, oop_map_set,
|
||||
handler_table, &implicit_tbl,
|
||||
handler_table, implicit_exception_table,
|
||||
compiler, comp_level,
|
||||
speculations, speculations_len,
|
||||
nmethod_mirror_index, nmethod_mirror_name, failed_speculations);
|
||||
|
||||
@ -222,6 +222,7 @@ class JVMCIRuntime: public CHeapObj<mtJVMCI> {
|
||||
int frame_words,
|
||||
OopMapSet* oop_map_set,
|
||||
ExceptionHandlerTable* handler_table,
|
||||
ImplicitExceptionTable* implicit_exception_table,
|
||||
AbstractCompiler* compiler,
|
||||
DebugInformationRecorder* debug_info,
|
||||
Dependencies* dependencies,
|
||||
|
||||
@ -1080,7 +1080,7 @@ ReservedSpace FileMapInfo::reserve_shared_memory() {
|
||||
}
|
||||
|
||||
// Memory map a region in the address space.
|
||||
static const char* shared_region_name[] = { "MiscData", "ReadWrite", "ReadOnly", "MiscCode", "OptionalData",
|
||||
static const char* shared_region_name[] = { "MiscData", "ReadWrite", "ReadOnly", "MiscCode",
|
||||
"String1", "String2", "OpenArchive1", "OpenArchive2" };
|
||||
|
||||
char* FileMapInfo::map_regions(int regions[], char* saved_base[], size_t len) {
|
||||
@ -1094,7 +1094,7 @@ char* FileMapInfo::map_regions(int regions[], char* saved_base[], size_t len) {
|
||||
return NULL;
|
||||
}
|
||||
if (i > 0) {
|
||||
// We require that mc->rw->ro->md->od to be laid out consecutively, with no
|
||||
// We require that mc->rw->ro->md to be laid out consecutively, with no
|
||||
// gaps between them. That way, we can ensure that the OS won't be able to
|
||||
// allocate any new memory spaces inside _shared_metaspace_{base,top}, which
|
||||
// would mess up the simple comparision in MetaspaceShared::is_in_shared_metaspace().
|
||||
|
||||
@ -45,7 +45,7 @@ void MetaspaceClosure::push_impl(MetaspaceClosure::Ref* ref) {
|
||||
}
|
||||
|
||||
void MetaspaceClosure::do_push(MetaspaceClosure::Ref* ref) {
|
||||
if (ref->not_null()) { // FIXME: make this configurable, so DynamicArchiveBuilder mark all pointers
|
||||
if (ref->not_null()) {
|
||||
bool read_only;
|
||||
Writability w = ref->writability();
|
||||
switch (w) {
|
||||
|
||||
@ -90,18 +90,17 @@ void* MetaspaceShared::_shared_metaspace_static_top = NULL;
|
||||
// rw - read-write metadata
|
||||
// ro - read-only metadata and read-only tables
|
||||
// md - misc data (the c++ vtables)
|
||||
// od - optional data (original class files)
|
||||
//
|
||||
// ca0 - closed archive heap space #0
|
||||
// ca1 - closed archive heap space #1 (may be empty)
|
||||
// oa0 - open archive heap space #0
|
||||
// oa1 - open archive heap space #1 (may be empty)
|
||||
//
|
||||
// The mc, rw, ro, md and od regions are linearly allocated, starting from
|
||||
// SharedBaseAddress, in the order of mc->rw->ro->md->od. The size of these 5 regions
|
||||
// The mc, rw, ro, and md regions are linearly allocated, starting from
|
||||
// SharedBaseAddress, in the order of mc->rw->ro->md. The size of these 4 regions
|
||||
// are page-aligned, and there's no gap between any consecutive regions.
|
||||
//
|
||||
// These 5 regions are populated in the following steps:
|
||||
// These 4 regions are populated in the following steps:
|
||||
// [1] All classes are loaded in MetaspaceShared::preload_classes(). All metadata are
|
||||
// temporarily allocated outside of the shared regions. Only the method entry
|
||||
// trampolines are written into the mc region.
|
||||
@ -110,10 +109,9 @@ void* MetaspaceShared::_shared_metaspace_static_top = NULL;
|
||||
// [4] SymbolTable, StringTable, SystemDictionary, and a few other read-only data
|
||||
// are copied into the ro region as read-only tables.
|
||||
// [5] C++ vtables are copied into the md region.
|
||||
// [6] Original class files are copied into the od region.
|
||||
//
|
||||
// The s0/s1 and oa0/oa1 regions are populated inside HeapShared::archive_java_heap_objects.
|
||||
// Their layout is independent of the other 5 regions.
|
||||
// Their layout is independent of the other 4 regions.
|
||||
|
||||
char* DumpRegion::expand_top_to(char* newtop) {
|
||||
assert(is_allocatable(), "must be initialized and not packed");
|
||||
@ -174,7 +172,7 @@ void DumpRegion::pack(DumpRegion* next) {
|
||||
}
|
||||
}
|
||||
|
||||
DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md"), _od_region("od");
|
||||
DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md");
|
||||
size_t _total_closed_archive_region_size = 0, _total_open_archive_region_size = 0;
|
||||
|
||||
void MetaspaceShared::init_shared_dump_space(DumpRegion* first_space, address first_space_bottom) {
|
||||
@ -198,10 +196,6 @@ DumpRegion* MetaspaceShared::read_only_dump_space() {
|
||||
return &_ro_region;
|
||||
}
|
||||
|
||||
DumpRegion* MetaspaceShared::optional_data_dump_space() {
|
||||
return &_od_region;
|
||||
}
|
||||
|
||||
void MetaspaceShared::pack_dump_space(DumpRegion* current, DumpRegion* next,
|
||||
ReservedSpace* rs) {
|
||||
current->pack(next);
|
||||
@ -290,10 +284,10 @@ void MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() {
|
||||
//
|
||||
// +-- SharedBaseAddress (default = 0x800000000)
|
||||
// v
|
||||
// +-..---------+---------+ ... +----+----+----+----+----+---------------+
|
||||
// | Heap | Archive | | MC | RW | RO | MD | OD | class space |
|
||||
// +-..---------+---------+ ... +----+----+----+----+----+---------------+
|
||||
// |<-- MaxHeapSize -->| |<-- UnscaledClassSpaceMax = 4GB ------->|
|
||||
// +-..---------+---------+ ... +----+----+----+----+---------------+
|
||||
// | Heap | Archive | | MC | RW | RO | MD | class space |
|
||||
// +-..---------+---------+ ... +----+----+----+----+---------------+
|
||||
// |<-- MaxHeapSize -->| |<-- UnscaledClassSpaceMax = 4GB -->|
|
||||
//
|
||||
const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
|
||||
const size_t cds_total = align_down(UnscaledClassSpaceMax, reserve_alignment);
|
||||
@ -1074,7 +1068,7 @@ void DumpAllocStats::print_stats(int ro_all, int rw_all, int mc_all, int md_all)
|
||||
|
||||
LogMessage(cds) msg;
|
||||
|
||||
msg.info("Detailed metadata info (excluding od/st regions; rw stats include md/mc regions):");
|
||||
msg.info("Detailed metadata info (excluding st regions; rw stats include md/mc regions):");
|
||||
msg.info("%s", hdr);
|
||||
msg.info("%s", sep);
|
||||
for (int type = 0; type < int(_number_of_types); type ++) {
|
||||
|
||||
@ -304,7 +304,6 @@ class MetaspaceShared : AllStatic {
|
||||
static DumpRegion* misc_code_dump_space();
|
||||
static DumpRegion* read_write_dump_space();
|
||||
static DumpRegion* read_only_dump_space();
|
||||
static DumpRegion* optional_data_dump_space();
|
||||
static void pack_dump_space(DumpRegion* current, DumpRegion* next,
|
||||
ReservedSpace* rs);
|
||||
|
||||
|
||||
@ -196,7 +196,10 @@ macro(LoadS)
|
||||
#endif
|
||||
zgcmacro(LoadBarrier)
|
||||
zgcmacro(LoadBarrierSlowReg)
|
||||
zgcmacro(LoadBarrierWeakSlowReg)
|
||||
zgcmacro(ZCompareAndSwapP)
|
||||
zgcmacro(ZWeakCompareAndSwapP)
|
||||
zgcmacro(ZCompareAndExchangeP)
|
||||
zgcmacro(ZGetAndSetP)
|
||||
macro(Lock)
|
||||
macro(Loop)
|
||||
macro(LoopLimit)
|
||||
|
||||
@ -2211,8 +2211,8 @@ void Compile::Optimize() {
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef ASSERT
|
||||
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
|
||||
#ifdef ASSERT
|
||||
bs->verify_gc_barriers(this, BarrierSetC2::BeforeOptimize);
|
||||
#endif
|
||||
|
||||
@ -2371,7 +2371,6 @@ void Compile::Optimize() {
|
||||
igvn = ccp;
|
||||
igvn.optimize();
|
||||
}
|
||||
|
||||
print_method(PHASE_ITER_GVN2, 2);
|
||||
|
||||
if (failing()) return;
|
||||
@ -2382,12 +2381,6 @@ void Compile::Optimize() {
|
||||
return;
|
||||
}
|
||||
|
||||
#if INCLUDE_ZGC
|
||||
if (UseZGC) {
|
||||
ZBarrierSetC2::find_dominating_barriers(igvn);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (failing()) return;
|
||||
|
||||
// Ensure that major progress is now clear
|
||||
@ -2407,28 +2400,33 @@ void Compile::Optimize() {
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
|
||||
bs->verify_gc_barriers(this, BarrierSetC2::BeforeExpand);
|
||||
bs->verify_gc_barriers(this, BarrierSetC2::BeforeLateInsertion);
|
||||
#endif
|
||||
|
||||
bs->barrier_insertion_phase(C, igvn);
|
||||
if (failing()) return;
|
||||
|
||||
#ifdef ASSERT
|
||||
bs->verify_gc_barriers(this, BarrierSetC2::BeforeMacroExpand);
|
||||
#endif
|
||||
|
||||
{
|
||||
TracePhase tp("macroExpand", &timers[_t_macroExpand]);
|
||||
PhaseMacroExpand mex(igvn);
|
||||
print_method(PHASE_BEFORE_MACRO_EXPANSION, 2);
|
||||
if (mex.expand_macro_nodes()) {
|
||||
assert(failing(), "must bail out w/ explicit message");
|
||||
return;
|
||||
}
|
||||
print_method(PHASE_MACRO_EXPANSION, 2);
|
||||
}
|
||||
|
||||
{
|
||||
TracePhase tp("barrierExpand", &timers[_t_barrierExpand]);
|
||||
print_method(PHASE_BEFORE_BARRIER_EXPAND, 2);
|
||||
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
|
||||
if (bs->expand_barriers(this, igvn)) {
|
||||
assert(failing(), "must bail out w/ explicit message");
|
||||
return;
|
||||
}
|
||||
print_method(PHASE_BARRIER_EXPANSION, 2);
|
||||
}
|
||||
|
||||
if (opaque4_count() > 0) {
|
||||
@ -2824,7 +2822,7 @@ void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
|
||||
MemBarNode* mb = n->as_MemBar();
|
||||
if (mb->trailing_store() || mb->trailing_load_store()) {
|
||||
assert(mb->leading_membar()->trailing_membar() == mb, "bad membar pair");
|
||||
Node* mem = mb->in(MemBarNode::Precedent);
|
||||
Node* mem = BarrierSet::barrier_set()->barrier_set_c2()->step_over_gc_barrier(mb->in(MemBarNode::Precedent));
|
||||
assert((mb->trailing_store() && mem->is_Store() && mem->as_Store()->is_release()) ||
|
||||
(mb->trailing_load_store() && mem->is_LoadStore()), "missing mem op");
|
||||
} else if (mb->leading()) {
|
||||
|
||||
@ -52,6 +52,7 @@ class C2Compiler;
|
||||
class CallGenerator;
|
||||
class CloneMap;
|
||||
class ConnectionGraph;
|
||||
class IdealGraphPrinter;
|
||||
class InlineTree;
|
||||
class Int_Array;
|
||||
class LoadBarrierNode;
|
||||
@ -95,9 +96,9 @@ enum LoopOptsMode {
|
||||
LoopOptsNone,
|
||||
LoopOptsShenandoahExpand,
|
||||
LoopOptsShenandoahPostExpand,
|
||||
LoopOptsZBarrierInsertion,
|
||||
LoopOptsSkipSplitIf,
|
||||
LoopOptsVerify,
|
||||
LoopOptsLastRound
|
||||
LoopOptsVerify
|
||||
};
|
||||
|
||||
typedef unsigned int node_idx_t;
|
||||
@ -658,6 +659,7 @@ class Compile : public Phase {
|
||||
void set_do_cleanup(bool z) { _do_cleanup = z; }
|
||||
int do_cleanup() const { return _do_cleanup; }
|
||||
void set_major_progress() { _major_progress++; }
|
||||
void restore_major_progress(int progress) { _major_progress += progress; }
|
||||
void clear_major_progress() { _major_progress = 0; }
|
||||
int max_inline_size() const { return _max_inline_size; }
|
||||
void set_freq_inline_size(int n) { _freq_inline_size = n; }
|
||||
@ -747,7 +749,15 @@ class Compile : public Phase {
|
||||
C->_latest_stage_start_counter.stamp();
|
||||
}
|
||||
|
||||
void print_method(CompilerPhaseType cpt, int level = 1) {
|
||||
bool should_print(int level = 1) {
|
||||
#ifndef PRODUCT
|
||||
return (_printer && _printer->should_print(level));
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
void print_method(CompilerPhaseType cpt, int level = 1, int idx = 0) {
|
||||
EventCompilerPhase event;
|
||||
if (event.should_commit()) {
|
||||
event.set_starttime(C->_latest_stage_start_counter);
|
||||
@ -757,10 +767,15 @@ class Compile : public Phase {
|
||||
event.commit();
|
||||
}
|
||||
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (_printer && _printer->should_print(level)) {
|
||||
_printer->print_method(CompilerPhaseTypeHelper::to_string(cpt), level);
|
||||
if (should_print(level)) {
|
||||
char output[1024];
|
||||
if (idx != 0) {
|
||||
sprintf(output, "%s:%d", CompilerPhaseTypeHelper::to_string(cpt), idx);
|
||||
} else {
|
||||
sprintf(output, "%s", CompilerPhaseTypeHelper::to_string(cpt));
|
||||
}
|
||||
_printer->print_method(output, level);
|
||||
}
|
||||
#endif
|
||||
C->_latest_stage_start_counter.stamp();
|
||||
|
||||
@ -350,14 +350,6 @@ void IdealGraphPrinter::end_method() {
|
||||
_xml->flush();
|
||||
}
|
||||
|
||||
// Print indent
|
||||
void IdealGraphPrinter::print_indent() {
|
||||
tty->print_cr("printing indent %d", _depth);
|
||||
for (int i = 0; i < _depth; i++) {
|
||||
_xml->print("%s", INDENT);
|
||||
}
|
||||
}
|
||||
|
||||
bool IdealGraphPrinter::traverse_outs() {
|
||||
return _traverse_outs;
|
||||
}
|
||||
@ -663,14 +655,16 @@ void IdealGraphPrinter::walk_nodes(Node *start, bool edges, VectorSet* temp_set)
|
||||
}
|
||||
}
|
||||
|
||||
void IdealGraphPrinter::print_method(const char *name, int level, bool clear_nodes) {
|
||||
print(name, (Node *)C->root(), level, clear_nodes);
|
||||
void IdealGraphPrinter::print_method(const char *name, int level) {
|
||||
if (should_print(level)) {
|
||||
print(name, (Node *) C->root());
|
||||
}
|
||||
}
|
||||
|
||||
// Print current ideal graph
|
||||
void IdealGraphPrinter::print(const char *name, Node *node, int level, bool clear_nodes) {
|
||||
void IdealGraphPrinter::print(const char *name, Node *node) {
|
||||
|
||||
if (!_current_method || !_should_send_method || !should_print(level)) return;
|
||||
if (!_current_method || !_should_send_method) return;
|
||||
|
||||
// Warning, unsafe cast?
|
||||
_chaitin = (PhaseChaitin *)C->regalloc();
|
||||
|
||||
@ -81,11 +81,7 @@ class IdealGraphPrinter : public CHeapObj<mtCompiler> {
|
||||
static const char *METHOD_SHORT_NAME_PROPERTY;
|
||||
static const char *ASSEMBLY_ELEMENT;
|
||||
|
||||
elapsedTimer _walk_time;
|
||||
elapsedTimer _output_time;
|
||||
elapsedTimer _build_blocks_time;
|
||||
|
||||
static int _file_count;
|
||||
static int _file_count;
|
||||
networkStream *_stream;
|
||||
xmlStream *_xml;
|
||||
outputStream *_output;
|
||||
@ -97,10 +93,6 @@ class IdealGraphPrinter : public CHeapObj<mtCompiler> {
|
||||
bool _traverse_outs;
|
||||
Compile *C;
|
||||
|
||||
static void pre_node(Node* node, void *env);
|
||||
static void post_node(Node* node, void *env);
|
||||
|
||||
void print_indent();
|
||||
void print_method(ciMethod *method, int bci, InlineTree *tree);
|
||||
void print_inline_tree(InlineTree *tree);
|
||||
void visit_node(Node *n, bool edges, VectorSet* temp_set);
|
||||
@ -116,7 +108,6 @@ class IdealGraphPrinter : public CHeapObj<mtCompiler> {
|
||||
void tail(const char *name);
|
||||
void head(const char *name);
|
||||
void text(const char *s);
|
||||
intptr_t get_node_id(Node *n);
|
||||
IdealGraphPrinter();
|
||||
~IdealGraphPrinter();
|
||||
|
||||
@ -130,9 +121,8 @@ class IdealGraphPrinter : public CHeapObj<mtCompiler> {
|
||||
void print_inlining();
|
||||
void begin_method();
|
||||
void end_method();
|
||||
void print_method(const char *name, int level=1, bool clear_nodes = false);
|
||||
void print(const char *name, Node *root, int level=1, bool clear_nodes = false);
|
||||
void print_xml(const char *name);
|
||||
void print_method(const char *name, int level = 0);
|
||||
void print(const char *name, Node *root);
|
||||
bool should_print(int level);
|
||||
void set_compile(Compile* compile) {C = compile; }
|
||||
};
|
||||
|
||||
@ -171,7 +171,6 @@ void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allo
|
||||
case Op_LoadL:
|
||||
case Op_LoadP:
|
||||
case Op_LoadBarrierSlowReg:
|
||||
case Op_LoadBarrierWeakSlowReg:
|
||||
case Op_LoadN:
|
||||
case Op_LoadS:
|
||||
case Op_LoadKlass:
|
||||
|
||||
@ -978,7 +978,7 @@ void LoopNode::verify_strip_mined(int expect_skeleton) const {
|
||||
wq.push(u);
|
||||
bool found_sfpt = false;
|
||||
for (uint next = 0; next < wq.size() && !found_sfpt; next++) {
|
||||
Node *n = wq.at(next);
|
||||
Node* n = wq.at(next);
|
||||
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && !found_sfpt; i++) {
|
||||
Node* u = n->fast_out(i);
|
||||
if (u == sfpt) {
|
||||
@ -992,6 +992,19 @@ void LoopNode::verify_strip_mined(int expect_skeleton) const {
|
||||
assert(found_sfpt, "no node in loop that's not input to safepoint");
|
||||
}
|
||||
}
|
||||
|
||||
if (UseZGC && !inner_out->in(0)->is_CountedLoopEnd()) {
|
||||
// In some very special cases there can be a load that has no other uses than the
|
||||
// counted loop safepoint. Then its loadbarrier will be placed between the inner
|
||||
// loop exit and the safepoint. This is very rare
|
||||
|
||||
Node* ifnode = inner_out->in(1)->in(0);
|
||||
// Region->IfTrue->If == Region->Iffalse->If
|
||||
if (ifnode == inner_out->in(2)->in(0)) {
|
||||
inner_out = ifnode->in(0);
|
||||
}
|
||||
}
|
||||
|
||||
CountedLoopEndNode* cle = inner_out->in(0)->as_CountedLoopEnd();
|
||||
assert(cle == inner->loopexit_or_null(), "mismatch");
|
||||
bool has_skeleton = outer_le->in(1)->bottom_type()->singleton() && outer_le->in(1)->bottom_type()->is_int()->get_con() == 0;
|
||||
@ -2761,7 +2774,7 @@ bool PhaseIdealLoop::process_expensive_nodes() {
|
||||
// Create a PhaseLoop. Build the ideal Loop tree. Map each Ideal Node to
|
||||
// its corresponding LoopNode. If 'optimize' is true, do some loop cleanups.
|
||||
void PhaseIdealLoop::build_and_optimize(LoopOptsMode mode) {
|
||||
bool do_split_ifs = (mode == LoopOptsDefault || mode == LoopOptsLastRound);
|
||||
bool do_split_ifs = (mode == LoopOptsDefault);
|
||||
bool skip_loop_opts = (mode == LoopOptsNone);
|
||||
|
||||
int old_progress = C->major_progress();
|
||||
@ -2921,9 +2934,7 @@ void PhaseIdealLoop::build_and_optimize(LoopOptsMode mode) {
|
||||
build_loop_late( visited, worklist, nstack );
|
||||
|
||||
if (_verify_only) {
|
||||
// restore major progress flag
|
||||
for (int i = 0; i < old_progress; i++)
|
||||
C->set_major_progress();
|
||||
C->restore_major_progress(old_progress);
|
||||
assert(C->unique() == unique, "verification mode made Nodes? ? ?");
|
||||
assert(_igvn._worklist.size() == orig_worklist_size, "shouldn't push anything");
|
||||
return;
|
||||
@ -2967,9 +2978,7 @@ void PhaseIdealLoop::build_and_optimize(LoopOptsMode mode) {
|
||||
|
||||
if (skip_loop_opts) {
|
||||
// restore major progress flag
|
||||
for (int i = 0; i < old_progress; i++) {
|
||||
C->set_major_progress();
|
||||
}
|
||||
C->restore_major_progress(old_progress);
|
||||
|
||||
// Cleanup any modified bits
|
||||
_igvn.optimize();
|
||||
@ -3018,11 +3027,8 @@ void PhaseIdealLoop::build_and_optimize(LoopOptsMode mode) {
|
||||
// that require basic-block info (like cloning through Phi's)
|
||||
if( SplitIfBlocks && do_split_ifs ) {
|
||||
visited.Clear();
|
||||
split_if_with_blocks( visited, nstack, mode == LoopOptsLastRound );
|
||||
split_if_with_blocks( visited, nstack);
|
||||
NOT_PRODUCT( if( VerifyLoopOptimizations ) verify(); );
|
||||
if (mode == LoopOptsLastRound) {
|
||||
C->set_major_progress();
|
||||
}
|
||||
}
|
||||
|
||||
if (!C->major_progress() && do_expensive_nodes && process_expensive_nodes()) {
|
||||
@ -3157,8 +3163,7 @@ void PhaseIdealLoop::verify() const {
|
||||
_ltree_root->verify_tree(loop_verify._ltree_root, NULL);
|
||||
// Reset major-progress. It was cleared by creating a verify version of
|
||||
// PhaseIdealLoop.
|
||||
for( int i=0; i<old_progress; i++ )
|
||||
C->set_major_progress();
|
||||
C->restore_major_progress(old_progress);
|
||||
}
|
||||
|
||||
//------------------------------verify_compare---------------------------------
|
||||
@ -4288,7 +4293,6 @@ void PhaseIdealLoop::build_loop_late_post_work(Node *n, bool pinned) {
|
||||
case Op_LoadS:
|
||||
case Op_LoadP:
|
||||
case Op_LoadBarrierSlowReg:
|
||||
case Op_LoadBarrierWeakSlowReg:
|
||||
case Op_LoadN:
|
||||
case Op_LoadRange:
|
||||
case Op_LoadD_unaligned:
|
||||
|
||||
@ -824,6 +824,7 @@ public:
|
||||
// pull such a subsumed block out of the array, we write back the final
|
||||
// correct block.
|
||||
Node *get_ctrl( Node *i ) {
|
||||
|
||||
assert(has_node(i), "");
|
||||
Node *n = get_ctrl_no_update(i);
|
||||
_nodes.map( i->_idx, (Node*)((intptr_t)n + 1) );
|
||||
@ -1306,9 +1307,9 @@ public:
|
||||
|
||||
// Check for aggressive application of 'split-if' optimization,
|
||||
// using basic block level info.
|
||||
void split_if_with_blocks ( VectorSet &visited, Node_Stack &nstack, bool last_round );
|
||||
void split_if_with_blocks ( VectorSet &visited, Node_Stack &nstack);
|
||||
Node *split_if_with_blocks_pre ( Node *n );
|
||||
void split_if_with_blocks_post( Node *n, bool last_round );
|
||||
void split_if_with_blocks_post( Node *n );
|
||||
Node *has_local_phi_input( Node *n );
|
||||
// Mark an IfNode as being dominated by a prior test,
|
||||
// without actually altering the CFG (and hence IDOM info).
|
||||
|
||||
@ -1195,11 +1195,11 @@ bool PhaseIdealLoop::can_split_if(Node* n_ctrl) {
|
||||
// Do the real work in a non-recursive function. CFG hackery wants to be
|
||||
// in the post-order, so it can dirty the I-DOM info and not use the dirtied
|
||||
// info.
|
||||
void PhaseIdealLoop::split_if_with_blocks_post(Node *n, bool last_round) {
|
||||
void PhaseIdealLoop::split_if_with_blocks_post(Node *n) {
|
||||
|
||||
// Cloning Cmp through Phi's involves the split-if transform.
|
||||
// FastLock is not used by an If
|
||||
if (n->is_Cmp() && !n->is_FastLock() && !last_round) {
|
||||
if (n->is_Cmp() && !n->is_FastLock()) {
|
||||
Node *n_ctrl = get_ctrl(n);
|
||||
// Determine if the Node has inputs from some local Phi.
|
||||
// Returns the block to clone thru.
|
||||
@ -1451,18 +1451,12 @@ void PhaseIdealLoop::split_if_with_blocks_post(Node *n, bool last_round) {
|
||||
get_loop(get_ctrl(n)) == get_loop(get_ctrl(n->in(1))) ) {
|
||||
_igvn.replace_node( n, n->in(1) );
|
||||
}
|
||||
|
||||
#if INCLUDE_ZGC
|
||||
if (UseZGC) {
|
||||
ZBarrierSetC2::loop_optimize_gc_barrier(this, n, last_round);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
//------------------------------split_if_with_blocks---------------------------
|
||||
// Check for aggressive application of 'split-if' optimization,
|
||||
// using basic block level info.
|
||||
void PhaseIdealLoop::split_if_with_blocks(VectorSet &visited, Node_Stack &nstack, bool last_round) {
|
||||
void PhaseIdealLoop::split_if_with_blocks(VectorSet &visited, Node_Stack &nstack) {
|
||||
Node* root = C->root();
|
||||
visited.set(root->_idx); // first, mark root as visited
|
||||
// Do pre-visit work for root
|
||||
@ -1488,7 +1482,7 @@ void PhaseIdealLoop::split_if_with_blocks(VectorSet &visited, Node_Stack &nstack
|
||||
// All of n's children have been processed, complete post-processing.
|
||||
if (cnt != 0 && !n->is_Con()) {
|
||||
assert(has_node(n), "no dead nodes");
|
||||
split_if_with_blocks_post(n, last_round);
|
||||
split_if_with_blocks_post(n);
|
||||
}
|
||||
if (must_throttle_split_if()) {
|
||||
nstack.clear();
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -908,14 +908,6 @@ static bool skip_through_membars(Compile::AliasType* atp, const TypeInstPtr* tp,
|
||||
// a load node that reads from the source array so we may be able to
|
||||
// optimize out the ArrayCopy node later.
|
||||
Node* LoadNode::can_see_arraycopy_value(Node* st, PhaseGVN* phase) const {
|
||||
#if INCLUDE_ZGC
|
||||
if (UseZGC) {
|
||||
if (bottom_type()->make_oopptr() != NULL) {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
Node* ld_adr = in(MemNode::Address);
|
||||
intptr_t ld_off = 0;
|
||||
AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off);
|
||||
@ -2811,7 +2803,8 @@ const Type* SCMemProjNode::Value(PhaseGVN* phase) const
|
||||
LoadStoreNode::LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required )
|
||||
: Node(required),
|
||||
_type(rt),
|
||||
_adr_type(at)
|
||||
_adr_type(at),
|
||||
_has_barrier(false)
|
||||
{
|
||||
init_req(MemNode::Control, c );
|
||||
init_req(MemNode::Memory , mem);
|
||||
@ -3105,16 +3098,6 @@ Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#if INCLUDE_ZGC
|
||||
if (UseZGC) {
|
||||
if (req() == (Precedent+1) && in(MemBarNode::Precedent)->in(0) != NULL && in(MemBarNode::Precedent)->in(0)->is_LoadBarrier()) {
|
||||
Node* load_node = in(MemBarNode::Precedent)->in(0)->in(LoadBarrierNode::Oop);
|
||||
set_req(MemBarNode::Precedent, load_node);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
bool progress = false;
|
||||
// Eliminate volatile MemBars for scalar replaced objects.
|
||||
if (can_reshape && req() == (Precedent+1)) {
|
||||
|
||||
@ -164,6 +164,7 @@ public:
|
||||
Pinned,
|
||||
DependsOnlyOnTest
|
||||
};
|
||||
|
||||
private:
|
||||
// LoadNode::hash() doesn't take the _control_dependency field
|
||||
// into account: If the graph already has a non-pinned LoadNode and
|
||||
@ -182,6 +183,8 @@ private:
|
||||
// this field.
|
||||
const MemOrd _mo;
|
||||
|
||||
uint _barrier; // Bit field with barrier information
|
||||
|
||||
protected:
|
||||
virtual bool cmp(const Node &n) const;
|
||||
virtual uint size_of() const; // Size is bigger
|
||||
@ -193,7 +196,7 @@ protected:
|
||||
public:
|
||||
|
||||
LoadNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, MemOrd mo, ControlDependency control_dependency)
|
||||
: MemNode(c,mem,adr,at), _control_dependency(control_dependency), _mo(mo), _type(rt) {
|
||||
: MemNode(c,mem,adr,at), _control_dependency(control_dependency), _mo(mo), _barrier(0), _type(rt) {
|
||||
init_class_id(Class_Load);
|
||||
}
|
||||
inline bool is_unordered() const { return !is_acquire(); }
|
||||
@ -262,6 +265,10 @@ public:
|
||||
Node* convert_to_unsigned_load(PhaseGVN& gvn);
|
||||
Node* convert_to_signed_load(PhaseGVN& gvn);
|
||||
|
||||
void copy_barrier_info(const Node* src) { _barrier = src->as_Load()->_barrier; }
|
||||
uint barrier_data() { return _barrier; }
|
||||
void set_barrier_data(uint barrier_data) { _barrier |= barrier_data; }
|
||||
|
||||
#ifndef PRODUCT
|
||||
virtual void dump_spec(outputStream *st) const;
|
||||
#endif
|
||||
@ -810,6 +817,7 @@ class LoadStoreNode : public Node {
|
||||
private:
|
||||
const Type* const _type; // What kind of value is loaded?
|
||||
const TypePtr* _adr_type; // What kind of memory is being addressed?
|
||||
bool _has_barrier;
|
||||
virtual uint size_of() const; // Size is bigger
|
||||
public:
|
||||
LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required );
|
||||
@ -822,6 +830,8 @@ public:
|
||||
|
||||
bool result_not_used() const;
|
||||
MemBarNode* trailing_membar() const;
|
||||
void set_has_barrier() { _has_barrier = true; };
|
||||
bool has_barrier() const { return _has_barrier; };
|
||||
};
|
||||
|
||||
class LoadStoreConditionalNode : public LoadStoreNode {
|
||||
|
||||
@ -546,6 +546,9 @@ Node *Node::clone() const {
|
||||
if (n->is_SafePoint()) {
|
||||
n->as_SafePoint()->clone_replaced_nodes();
|
||||
}
|
||||
if (n->is_Load()) {
|
||||
n->as_Load()->copy_barrier_info(this);
|
||||
}
|
||||
return n; // Return the clone
|
||||
}
|
||||
|
||||
@ -564,7 +567,6 @@ void Node::setup_is_top() {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
//------------------------------~Node------------------------------------------
|
||||
// Fancy destructor; eagerly attempt to reclaim Node numberings and storage
|
||||
void Node::destruct() {
|
||||
@ -1454,13 +1456,16 @@ bool Node::rematerialize() const {
|
||||
//------------------------------needs_anti_dependence_check---------------------
|
||||
// Nodes which use memory without consuming it, hence need antidependences.
|
||||
bool Node::needs_anti_dependence_check() const {
|
||||
if( req() < 2 || (_flags & Flag_needs_anti_dependence_check) == 0 )
|
||||
if (req() < 2 || (_flags & Flag_needs_anti_dependence_check) == 0) {
|
||||
return false;
|
||||
else
|
||||
return in(1)->bottom_type()->has_memory();
|
||||
}
|
||||
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
|
||||
if (!bs->needs_anti_dependence_check(this)) {
|
||||
return false;
|
||||
}
|
||||
return in(1)->bottom_type()->has_memory();
|
||||
}
|
||||
|
||||
|
||||
// Get an integer constant from a ConNode (or CastIINode).
|
||||
// Return a default value if there is no apparent constant here.
|
||||
const TypeInt* Node::find_int_type() const {
|
||||
|
||||
@ -83,8 +83,8 @@ class JumpProjNode;
|
||||
class LoadNode;
|
||||
class LoadBarrierNode;
|
||||
class LoadBarrierSlowRegNode;
|
||||
class LoadBarrierWeakSlowRegNode;
|
||||
class LoadStoreNode;
|
||||
class LoadStoreConditionalNode;
|
||||
class LockNode;
|
||||
class LoopNode;
|
||||
class MachBranchNode;
|
||||
@ -688,8 +688,7 @@ public:
|
||||
DEFINE_CLASS_ID(Mem, Node, 4)
|
||||
DEFINE_CLASS_ID(Load, Mem, 0)
|
||||
DEFINE_CLASS_ID(LoadVector, Load, 0)
|
||||
DEFINE_CLASS_ID(LoadBarrierSlowReg, Load, 1)
|
||||
DEFINE_CLASS_ID(LoadBarrierWeakSlowReg, Load, 2)
|
||||
DEFINE_CLASS_ID(LoadBarrierSlowReg, Load, 1)
|
||||
DEFINE_CLASS_ID(Store, Mem, 1)
|
||||
DEFINE_CLASS_ID(StoreVector, Store, 0)
|
||||
DEFINE_CLASS_ID(LoadStore, Mem, 2)
|
||||
@ -830,9 +829,9 @@ public:
|
||||
DEFINE_CLASS_QUERY(JumpProj)
|
||||
DEFINE_CLASS_QUERY(Load)
|
||||
DEFINE_CLASS_QUERY(LoadStore)
|
||||
DEFINE_CLASS_QUERY(LoadStoreConditional)
|
||||
DEFINE_CLASS_QUERY(LoadBarrier)
|
||||
DEFINE_CLASS_QUERY(LoadBarrierSlowReg)
|
||||
DEFINE_CLASS_QUERY(LoadBarrierWeakSlowReg)
|
||||
DEFINE_CLASS_QUERY(Lock)
|
||||
DEFINE_CLASS_QUERY(Loop)
|
||||
DEFINE_CLASS_QUERY(Mach)
|
||||
|
||||
@ -1003,9 +1003,6 @@ PhaseIterGVN::PhaseIterGVN( PhaseGVN *gvn ) : PhaseGVN(gvn),
|
||||
n->is_Mem() )
|
||||
add_users_to_worklist(n);
|
||||
}
|
||||
|
||||
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
|
||||
bs->add_users_to_worklist(&_worklist);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@ -52,8 +52,11 @@ enum CompilerPhaseType {
|
||||
PHASE_MATCHING,
|
||||
PHASE_INCREMENTAL_INLINE,
|
||||
PHASE_INCREMENTAL_BOXING_INLINE,
|
||||
PHASE_BEFORE_BARRIER_EXPAND,
|
||||
PHASE_BEFORE_MACRO_EXPANSION,
|
||||
PHASE_CALL_CATCH_CLEANUP,
|
||||
PHASE_INSERT_BARRIER,
|
||||
PHASE_MACRO_EXPANSION,
|
||||
PHASE_BARRIER_EXPANSION,
|
||||
PHASE_ADD_UNSAFE_BARRIER,
|
||||
PHASE_END,
|
||||
PHASE_FAILURE,
|
||||
|
||||
@ -90,8 +93,11 @@ class CompilerPhaseTypeHelper {
|
||||
case PHASE_MATCHING: return "After matching";
|
||||
case PHASE_INCREMENTAL_INLINE: return "Incremental Inline";
|
||||
case PHASE_INCREMENTAL_BOXING_INLINE: return "Incremental Boxing Inline";
|
||||
case PHASE_BEFORE_BARRIER_EXPAND: return "Before Barrier Expand";
|
||||
case PHASE_BEFORE_MACRO_EXPANSION: return "Before macro expansion";
|
||||
case PHASE_CALL_CATCH_CLEANUP: return "Call catch cleanup";
|
||||
case PHASE_INSERT_BARRIER: return "Insert barrier";
|
||||
case PHASE_MACRO_EXPANSION: return "Macro expand";
|
||||
case PHASE_BARRIER_EXPANSION: return "Barrier expand";
|
||||
case PHASE_ADD_UNSAFE_BARRIER: return "Add barrier to unsafe op";
|
||||
case PHASE_END: return "End";
|
||||
case PHASE_FAILURE: return "Failure";
|
||||
default:
|
||||
|
||||
@ -298,7 +298,6 @@ void VectorNode::vector_operands(Node* n, uint* start, uint* end) {
|
||||
case Op_LoadF: case Op_LoadD:
|
||||
case Op_LoadP: case Op_LoadN:
|
||||
case Op_LoadBarrierSlowReg:
|
||||
case Op_LoadBarrierWeakSlowReg:
|
||||
*start = 0;
|
||||
*end = 0; // no vector operands
|
||||
break;
|
||||
|
||||
@ -1630,8 +1630,8 @@ void Arguments::set_use_compressed_oops() {
|
||||
#ifdef _LP64
|
||||
// MaxHeapSize is not set up properly at this point, but
|
||||
// the only value that can override MaxHeapSize if we are
|
||||
// to use UseCompressedOops is InitialHeapSize.
|
||||
size_t max_heap_size = MAX2(MaxHeapSize, InitialHeapSize);
|
||||
// to use UseCompressedOops are InitialHeapSize and MinHeapSize.
|
||||
size_t max_heap_size = MAX3(MaxHeapSize, InitialHeapSize, MinHeapSize);
|
||||
|
||||
if (max_heap_size <= max_heap_for_compressed_oops()) {
|
||||
#if !defined(COMPILER1) || defined(TIERED)
|
||||
@ -1832,6 +1832,8 @@ void Arguments::set_heap_size() {
|
||||
// after call to limit_by_allocatable_memory because that
|
||||
// method might reduce the allocation size.
|
||||
reasonable_max = MAX2(reasonable_max, (julong)InitialHeapSize);
|
||||
} else if (!FLAG_IS_DEFAULT(MinHeapSize)) {
|
||||
reasonable_max = MAX2(reasonable_max, (julong)MinHeapSize);
|
||||
}
|
||||
|
||||
log_trace(gc, heap)(" Maximum heap size " SIZE_FORMAT, (size_t) reasonable_max);
|
||||
@ -1855,13 +1857,13 @@ void Arguments::set_heap_size() {
|
||||
|
||||
reasonable_initial = limit_by_allocatable_memory(reasonable_initial);
|
||||
|
||||
log_trace(gc, heap)(" Initial heap size " SIZE_FORMAT, (size_t)reasonable_initial);
|
||||
FLAG_SET_ERGO(InitialHeapSize, (size_t)reasonable_initial);
|
||||
log_trace(gc, heap)(" Initial heap size " SIZE_FORMAT, InitialHeapSize);
|
||||
}
|
||||
// If the minimum heap size has not been set (via -Xms),
|
||||
// If the minimum heap size has not been set (via -Xms or -XX:MinHeapSize),
|
||||
// synchronize with InitialHeapSize to avoid errors with the default value.
|
||||
if (MinHeapSize == 0) {
|
||||
MinHeapSize = MIN2((size_t)reasonable_minimum, InitialHeapSize);
|
||||
FLAG_SET_ERGO(MinHeapSize, MIN2((size_t)reasonable_minimum, InitialHeapSize));
|
||||
log_trace(gc, heap)(" Minimum heap size " SIZE_FORMAT, MinHeapSize);
|
||||
}
|
||||
}
|
||||
@ -1903,8 +1905,9 @@ jint Arguments::set_aggressive_heap_flags() {
|
||||
if (FLAG_SET_CMDLINE(InitialHeapSize, initHeapSize) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
// Currently the minimum size and the initial heap sizes are the same.
|
||||
MinHeapSize = initHeapSize;
|
||||
if (FLAG_SET_CMDLINE(MinHeapSize, initHeapSize) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(NewSize)) {
|
||||
// Make the young generation 3/8ths of the total heap.
|
||||
@ -2595,19 +2598,19 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, bool* patch_m
|
||||
}
|
||||
// -Xms
|
||||
} else if (match_option(option, "-Xms", &tail)) {
|
||||
julong long_initial_heap_size = 0;
|
||||
julong size = 0;
|
||||
// an initial heap size of 0 means automatically determine
|
||||
ArgsRange errcode = parse_memory_size(tail, &long_initial_heap_size, 0);
|
||||
ArgsRange errcode = parse_memory_size(tail, &size, 0);
|
||||
if (errcode != arg_in_range) {
|
||||
jio_fprintf(defaultStream::error_stream(),
|
||||
"Invalid initial heap size: %s\n", option->optionString);
|
||||
describe_range_error(errcode);
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
MinHeapSize = (size_t)long_initial_heap_size;
|
||||
// Currently the minimum size and the initial heap sizes are the same.
|
||||
// Can be overridden with -XX:InitialHeapSize.
|
||||
if (FLAG_SET_CMDLINE(InitialHeapSize, (size_t)long_initial_heap_size) != JVMFlag::SUCCESS) {
|
||||
if (FLAG_SET_CMDLINE(MinHeapSize, (size_t)size) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
if (FLAG_SET_CMDLINE(InitialHeapSize, (size_t)size) != JVMFlag::SUCCESS) {
|
||||
return JNI_EINVAL;
|
||||
}
|
||||
// -Xmx
|
||||
|
||||
@ -310,7 +310,7 @@ void print_statistics() {
|
||||
// CodeHeap State Analytics.
|
||||
// Does also call NMethodSweeper::print(tty)
|
||||
if (PrintCodeHeapAnalytics) {
|
||||
CompileBroker::print_heapinfo(NULL, "all", "4096"); // details
|
||||
CompileBroker::print_heapinfo(NULL, "all", 4096); // details
|
||||
} else if (PrintMethodFlushingStatistics) {
|
||||
NMethodSweeper::print(tty);
|
||||
}
|
||||
@ -378,7 +378,7 @@ void print_statistics() {
|
||||
// CodeHeap State Analytics.
|
||||
// Does also call NMethodSweeper::print(tty)
|
||||
if (PrintCodeHeapAnalytics) {
|
||||
CompileBroker::print_heapinfo(NULL, "all", "4096"); // details
|
||||
CompileBroker::print_heapinfo(NULL, "all", 4096); // details
|
||||
} else if (PrintMethodFlushingStatistics) {
|
||||
NMethodSweeper::print(tty);
|
||||
}
|
||||
|
||||
@ -764,18 +764,9 @@ void SharedRuntime::throw_StackOverflowError_common(JavaThread* thread, bool del
|
||||
throw_and_post_jvmti_exception(thread, exception);
|
||||
}
|
||||
|
||||
#if INCLUDE_JVMCI
|
||||
address SharedRuntime::deoptimize_for_implicit_exception(JavaThread* thread, address pc, CompiledMethod* nm, int deopt_reason) {
|
||||
assert(deopt_reason > Deoptimization::Reason_none && deopt_reason < Deoptimization::Reason_LIMIT, "invalid deopt reason");
|
||||
thread->set_jvmci_implicit_exception_pc(pc);
|
||||
thread->set_pending_deoptimization(Deoptimization::make_trap_request((Deoptimization::DeoptReason)deopt_reason, Deoptimization::Action_reinterpret));
|
||||
return (SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap());
|
||||
}
|
||||
#endif // INCLUDE_JVMCI
|
||||
|
||||
address SharedRuntime::continuation_for_implicit_exception(JavaThread* thread,
|
||||
address pc,
|
||||
SharedRuntime::ImplicitExceptionKind exception_kind)
|
||||
ImplicitExceptionKind exception_kind)
|
||||
{
|
||||
address target_pc = NULL;
|
||||
|
||||
@ -876,19 +867,7 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* thread,
|
||||
#ifndef PRODUCT
|
||||
_implicit_null_throws++;
|
||||
#endif
|
||||
#if INCLUDE_JVMCI
|
||||
if (cm->is_compiled_by_jvmci() && cm->pc_desc_at(pc) != NULL) {
|
||||
// If there's no PcDesc then we'll die way down inside of
|
||||
// deopt instead of just getting normal error reporting,
|
||||
// so only go there if it will succeed.
|
||||
return deoptimize_for_implicit_exception(thread, pc, cm, Deoptimization::Reason_null_check);
|
||||
} else {
|
||||
#endif // INCLUDE_JVMCI
|
||||
assert (cm->is_nmethod(), "Expect nmethod");
|
||||
target_pc = ((nmethod*)cm)->continuation_for_implicit_exception(pc);
|
||||
#if INCLUDE_JVMCI
|
||||
}
|
||||
#endif // INCLUDE_JVMCI
|
||||
target_pc = cm->continuation_for_implicit_null_exception(pc);
|
||||
// If there's an unexpected fault, target_pc might be NULL,
|
||||
// in which case we want to fall through into the normal
|
||||
// error handling code.
|
||||
@ -904,15 +883,7 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* thread,
|
||||
#ifndef PRODUCT
|
||||
_implicit_div0_throws++;
|
||||
#endif
|
||||
#if INCLUDE_JVMCI
|
||||
if (cm->is_compiled_by_jvmci() && cm->pc_desc_at(pc) != NULL) {
|
||||
return deoptimize_for_implicit_exception(thread, pc, cm, Deoptimization::Reason_div0_check);
|
||||
} else {
|
||||
#endif // INCLUDE_JVMCI
|
||||
target_pc = cm->continuation_for_implicit_exception(pc);
|
||||
#if INCLUDE_JVMCI
|
||||
}
|
||||
#endif // INCLUDE_JVMCI
|
||||
target_pc = cm->continuation_for_implicit_div0_exception(pc);
|
||||
// If there's an unexpected fault, target_pc might be NULL,
|
||||
// in which case we want to fall through into the normal
|
||||
// error handling code.
|
||||
|
||||
@ -204,9 +204,6 @@ class SharedRuntime: AllStatic {
|
||||
static address continuation_for_implicit_exception(JavaThread* thread,
|
||||
address faulting_pc,
|
||||
ImplicitExceptionKind exception_kind);
|
||||
#if INCLUDE_JVMCI
|
||||
static address deoptimize_for_implicit_exception(JavaThread* thread, address pc, CompiledMethod* nm, int deopt_reason);
|
||||
#endif
|
||||
|
||||
// Post-slow-path-allocation, pre-initializing-stores step for
|
||||
// implementing e.g. ReduceInitialCardMarks
|
||||
|
||||
@ -133,16 +133,12 @@ StackValue* StackValue::create_stack_value(const frame* fr, const RegisterMap* r
|
||||
}
|
||||
#endif
|
||||
// Deoptimization must make sure all oops have passed load barriers
|
||||
#if INCLUDE_ZGC
|
||||
if (UseZGC) {
|
||||
val = ZBarrier::load_barrier_on_oop_field_preloaded((oop*)value_addr, val);
|
||||
}
|
||||
#endif
|
||||
#if INCLUDE_SHENANDOAHGC
|
||||
if (UseShenandoahGC) {
|
||||
val = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(val);
|
||||
}
|
||||
#endif
|
||||
assert(oopDesc::is_oop_or_null(val, false), "bad oop found");
|
||||
Handle h(Thread::current(), val); // Wrap a handle around the oop
|
||||
return new StackValue(h);
|
||||
}
|
||||
|
||||
@ -946,13 +946,20 @@ void CodeCacheDCmd::execute(DCmdSource source, TRAPS) {
|
||||
CodeHeapAnalyticsDCmd::CodeHeapAnalyticsDCmd(outputStream* output, bool heap) :
|
||||
DCmdWithParser(output, heap),
|
||||
_function("function", "Function to be performed (aggregate, UsedSpace, FreeSpace, MethodCount, MethodSpace, MethodAge, MethodNames, discard", "STRING", false, "all"),
|
||||
_granularity("granularity", "Detail level - smaller value -> more detail", "STRING", false, "4096") {
|
||||
_granularity("granularity", "Detail level - smaller value -> more detail", "INT", false, "4096") {
|
||||
_dcmdparser.add_dcmd_argument(&_function);
|
||||
_dcmdparser.add_dcmd_argument(&_granularity);
|
||||
}
|
||||
|
||||
void CodeHeapAnalyticsDCmd::execute(DCmdSource source, TRAPS) {
|
||||
CompileBroker::print_heapinfo(output(), _function.value(), _granularity.value());
|
||||
jlong granularity = _granularity.value();
|
||||
if (granularity < 1) {
|
||||
Exceptions::fthrow(THREAD_AND_LOCATION, vmSymbols::java_lang_IllegalArgumentException(),
|
||||
"Invalid granularity value " JLONG_FORMAT ". Should be positive.\n", granularity);
|
||||
return;
|
||||
}
|
||||
|
||||
CompileBroker::print_heapinfo(output(), _function.value(), granularity);
|
||||
}
|
||||
|
||||
int CodeHeapAnalyticsDCmd::num_arguments() {
|
||||
|
||||
@ -645,7 +645,7 @@ public:
|
||||
class CodeHeapAnalyticsDCmd : public DCmdWithParser {
|
||||
protected:
|
||||
DCmdArgument<char*> _function;
|
||||
DCmdArgument<char*> _granularity;
|
||||
DCmdArgument<jlong> _granularity;
|
||||
public:
|
||||
CodeHeapAnalyticsDCmd(outputStream* output, bool heap);
|
||||
static const char* name() {
|
||||
|
||||
@ -152,6 +152,12 @@ class GenericGrowableArray : public ResourceObj {
|
||||
template<class E> class GrowableArrayIterator;
|
||||
template<class E, class UnaryPredicate> class GrowableArrayFilterIterator;
|
||||
|
||||
template<class E>
|
||||
class CompareClosure : public Closure {
|
||||
public:
|
||||
virtual int do_compare(const E&, const E&) = 0;
|
||||
};
|
||||
|
||||
template<class E> class GrowableArray : public GenericGrowableArray {
|
||||
friend class VMStructs;
|
||||
|
||||
@ -443,6 +449,37 @@ template<class E> class GrowableArray : public GenericGrowableArray {
|
||||
}
|
||||
return min;
|
||||
}
|
||||
|
||||
E insert_sorted(CompareClosure<E>* cc, const E& key) {
|
||||
bool found;
|
||||
int location = find_sorted(cc, key, found);
|
||||
if (!found) {
|
||||
insert_before(location, key);
|
||||
}
|
||||
return at(location);
|
||||
}
|
||||
|
||||
template<typename K>
|
||||
int find_sorted(CompareClosure<E>* cc, const K& key, bool& found) {
|
||||
found = false;
|
||||
int min = 0;
|
||||
int max = length() - 1;
|
||||
|
||||
while (max >= min) {
|
||||
int mid = (int)(((uint)max + min) / 2);
|
||||
E value = at(mid);
|
||||
int diff = cc->do_compare(key, value);
|
||||
if (diff > 0) {
|
||||
min = mid + 1;
|
||||
} else if (diff < 0) {
|
||||
max = mid - 1;
|
||||
} else {
|
||||
found = true;
|
||||
return mid;
|
||||
}
|
||||
}
|
||||
return min;
|
||||
}
|
||||
};
|
||||
|
||||
// Global GrowableArray methods (one instance in the library per each 'E' type).
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user