8307139: Fix signed integer overflow in compiler code, part 1

Reviewed-by: thartmann, rcastanedalo, kvn
This commit is contained in:
Dean Long 2023-05-11 18:06:43 +00:00
parent 5d6cce0f85
commit 7fcb0fdcd4
22 changed files with 48 additions and 64 deletions

View File

@ -3235,7 +3235,7 @@ operand immL_32bits()
operand immI_Pow2M1()
%{
predicate((n->get_int() > 0)
&& is_power_of_2(n->get_int() + 1));
&& is_power_of_2((juint)n->get_int() + 1));
match(ConI);
op_cost(20);

View File

@ -89,9 +89,9 @@ void Canonicalizer::do_Op2(Op2* x) {
{ jint a = x->x()->type()->as_IntConstant()->value();
jint b = x->y()->type()->as_IntConstant()->value();
switch (x->op()) {
case Bytecodes::_iadd: set_constant(a + b); return;
case Bytecodes::_isub: set_constant(a - b); return;
case Bytecodes::_imul: set_constant(a * b); return;
case Bytecodes::_iadd: set_constant(java_add(a, b)); return;
case Bytecodes::_isub: set_constant(java_subtract(a, b)); return;
case Bytecodes::_imul: set_constant(java_multiply(a, b)); return;
case Bytecodes::_idiv:
if (b != 0) {
if (a == min_jint && b == -1) {
@ -335,9 +335,9 @@ void Canonicalizer::do_NegateOp(NegateOp* x) {
ValueType* t = x->x()->type();
if (t->is_constant()) {
switch (t->tag()) {
case intTag : set_constant(-t->as_IntConstant ()->value()); return;
case longTag : set_constant(-t->as_LongConstant ()->value()); return;
case floatTag : set_constant(-t->as_FloatConstant ()->value()); return;
case intTag : set_constant(java_negate(t->as_IntConstant()->value())); return;
case longTag : set_constant(java_negate(t->as_LongConstant()->value())); return;
case floatTag : set_constant(-t->as_FloatConstant()->value()); return;
case doubleTag: set_constant(-t->as_DoubleConstant()->value()); return;
default : ShouldNotReachHere();
}

View File

@ -843,8 +843,8 @@ void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** ex
if (expected_type != NULL) {
BasicType t = expected_type->element_type()->basic_type();
int element_size = type2aelembytes(t);
if (((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) &&
((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0)) {
if (((arrayOopDesc::base_offset_in_bytes(t) + (uint)s_offs * element_size) % HeapWordSize == 0) &&
((arrayOopDesc::base_offset_in_bytes(t) + (uint)d_offs * element_size) % HeapWordSize == 0)) {
flags &= ~LIR_OpArrayCopy::unaligned;
}
}

View File

@ -270,10 +270,11 @@ void RangeCheckEliminator::Visitor::do_ArithmeticOp(ArithmeticOp *ao) {
Bound * bound = _rce->get_bound(y);
if (bound->has_upper() && bound->has_lower()) {
int new_lower = bound->lower() + const_value;
// TODO: consider using __builtin_add_overflow
jlong new_lowerl = ((jlong)bound->lower()) + const_value;
int new_upper = bound->upper() + const_value;
jint new_lower = low(new_lowerl);
jlong new_upperl = ((jlong)bound->upper()) + const_value;
jint new_upper = low(new_upperl);
if (((jlong)new_lower) == new_lowerl && ((jlong)new_upper == new_upperl)) {
Bound *newBound = new Bound(new_lower, bound->lower_instr(), new_upper, bound->upper_instr());

View File

@ -133,7 +133,7 @@ class CompilerToVM {
}
static unsigned cstring_hash(const char* const& s) {
int h = 0;
unsigned h = 0;
const char* p = s;
while (*p != '\0') {
h = 31 * h + *p;

View File

@ -415,7 +415,7 @@ void VirtualSpaceNode::verify() const {
verify_locked();
}
volatile int test_access = 0;
volatile uint test_access = 0;
// Verify counters and basic structure. Slow mode: verify all chunks in depth
void VirtualSpaceNode::verify_locked() const {
@ -436,7 +436,7 @@ void VirtualSpaceNode::verify_locked() const {
SOMETIMES(
for (MetaWord* p = base(); p < base() + used_words(); p += os::vm_page_size()) {
if (_commit_mask.is_committed_address(p)) {
test_access += *(int*)p;
test_access += *(uint*)p;
}
}
)

View File

@ -133,7 +133,7 @@
notproduct(bool, OptoBreakpointOSR, false, \
"insert breakpoint at osr method entry") \
\
notproduct(intx, BreakAtNode, 0, \
notproduct(uint64_t, BreakAtNode, 0, \
"Break at construction of this Node (either _idx or _debug_idx)") \
\
notproduct(bool, OptoBreakpointC2R, false, \

View File

@ -578,7 +578,6 @@ void Compile::print_ideal_ir(const char* phase_name) {
// ============================================================================
//------------------------------Compile standard-------------------------------
debug_only( int Compile::_debug_idx = 100000; )
// Compile a method. entry_bci is -1 for normal compilations and indicates
// the continuation bci for on stack replacement.

View File

@ -373,7 +373,6 @@ class Compile : public Phase {
DEBUG_ONLY(Unique_Node_List* _modified_nodes;) // List of nodes which inputs were modified
DEBUG_ONLY(bool _phase_optimize_finished;) // Used for live node verification while creating new nodes
debug_only(static int _debug_idx;) // Monotonic counter (not reset), use -XX:BreakAtNode=<idx>
Arena _node_arena; // Arena for new-space Nodes
Arena _old_arena; // Arena for old-space Nodes, lifetime during xform
RootNode* _root; // Unique root of compilation, or null after bail-out.
@ -780,8 +779,6 @@ class Compile : public Phase {
uint unique() const { return _unique; }
uint next_unique() { return _unique++; }
void set_unique(uint i) { _unique = i; }
static int debug_idx() { return debug_only(_debug_idx)+0; }
static void set_debug_idx(int i) { debug_only(_debug_idx = i); }
Arena* node_arena() { return &_node_arena; }
Arena* old_arena() { return &_old_arena; }
RootNode* root() const { return _root; }

View File

@ -377,9 +377,6 @@ void IdealGraphPrinter::visit_node(Node *n, bool edges, VectorSet* temp_set) {
const Type *t = node->bottom_type();
print_prop("type", t->msg());
print_prop("idx", node->_idx);
#ifdef ASSERT
print_prop("debug_idx", node->_debug_idx);
#endif
if (C->cfg() != nullptr) {
Block* block = C->cfg()->get_block_for_node(node);

View File

@ -244,12 +244,12 @@ static const Type* bitshuffle_value(const TypeInteger* src_type, const TypeInteg
int bitcount = population_count(static_cast<julong>(bt == T_INT ? maskcon & 0xFFFFFFFFL : maskcon));
if (opc == Op_CompressBits) {
// Bit compression selects the source bits corresponding to true mask bits
// and lays them out contiguously at desitination bit poistions starting from
// and lays them out contiguously at destination bit positions starting from
// LSB, remaining higher order bits are set to zero.
// Thus, it will always generates a +ve value i.e. sign bit set to 0 if
// Thus, it will always generate a +ve value i.e. sign bit set to 0 if
// any bit of constant mask value is zero.
lo = 0L;
hi = (1L << bitcount) - 1;
hi = (1UL << bitcount) - 1;
} else {
assert(opc == Op_ExpandBits, "");
// Expansion sequentially reads source bits starting from LSB

View File

@ -861,7 +861,7 @@ bool PhaseIdealLoop::create_loop_nest(IdealLoopTree* loop, Node_List &old_new) {
// not a loop after all
return false;
}
julong orig_iters = hi->hi_as_long() - lo->lo_as_long();
julong orig_iters = (julong)hi->hi_as_long() - lo->lo_as_long();
iters_limit = checked_cast<int>(MIN2((julong)iters_limit, orig_iters));
// We need a safepoint to insert empty predicates for the inner loop.

View File

@ -324,8 +324,8 @@ address PhaseMacroExpand::basictype2arraycopy(BasicType t,
int s_offs = src_offset_inttype->get_con();
int d_offs = dest_offset_inttype->get_con();
int element_size = type2aelembytes(t);
aligned = ((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) &&
((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0);
aligned = ((arrayOopDesc::base_offset_in_bytes(t) + (uint)s_offs * element_size) % HeapWordSize == 0) &&
((arrayOopDesc::base_offset_in_bytes(t) + (uint)d_offs * element_size) % HeapWordSize == 0);
if (s_offs >= d_offs) disjoint = true;
} else if (src_offset == dest_offset && src_offset != nullptr) {
// This can occur if the offsets are identical non-constants.

View File

@ -38,7 +38,7 @@ public:
typedef typename TypeClass::NativeType NativeType;
static bool will_overflow(NativeType value1, NativeType value2) {
NativeType result = value1 + value2;
NativeType result = java_add(value1, value2);
// Hacker's Delight 2-12 Overflow if both arguments have the opposite sign of the result
if (((value1 ^ result) & (value2 ^ result)) >= 0) {
return false;
@ -61,7 +61,7 @@ public:
typedef typename TypeClass::NativeType NativeType;
static bool will_overflow(NativeType value1, NativeType value2) {
NativeType result = value1 - value2;
NativeType result = java_subtract(value1, value2);
// hacker's delight 2-12 overflow iff the arguments have different signs and
// the sign of the result is different than the sign of arg1
if (((value1 ^ value2) & (value1 ^ result)) >= 0) {

View File

@ -775,7 +775,7 @@ Node *AndLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
if( t12 && t12->is_con() ) { // Shift is by a constant
int shift = t12->get_con();
shift &= BitsPerJavaLong - 1; // semantics of Java shifts
const jlong sign_bits_mask = ~(((jlong)CONST64(1) << (jlong)(BitsPerJavaLong - shift)) -1);
const julong sign_bits_mask = ~(((julong)CONST64(1) << (julong)(BitsPerJavaLong - shift)) -1);
// If the AND'ing of the 2 masks has no bits, then only original shifted
// bits survive. NO sign-extension bits survive the maskings.
if( (sign_bits_mask & mask) == 0 ) {

View File

@ -68,30 +68,17 @@ extern int nodes_created;
// Set a breakpoint here to identify where a particular node index is built.
void Node::verify_construction() {
_debug_orig = nullptr;
int old_debug_idx = Compile::debug_idx();
int new_debug_idx = old_debug_idx + 1;
if (new_debug_idx > 0) {
// Arrange that the lowest five decimal digits of _debug_idx
// will repeat those of _idx. In case this is somehow pathological,
// we continue to assign negative numbers (!) consecutively.
const int mod = 100000;
int bump = (int)(_idx - new_debug_idx) % mod;
if (bump < 0) {
bump += mod;
}
assert(bump >= 0 && bump < mod, "");
new_debug_idx += bump;
}
Compile::set_debug_idx(new_debug_idx);
set_debug_idx(new_debug_idx);
// The decimal digits of _debug_idx are <compile_id> followed by 10 digits of <_idx>
Compile* C = Compile::current();
assert(C->unique() < (INT_MAX - 1), "Node limit exceeded INT_MAX");
uint64_t new_debug_idx = (uint64_t)C->compile_id() * 10000000000 + _idx;
set_debug_idx(new_debug_idx);
if (!C->phase_optimize_finished()) {
// Only check assert during parsing and optimization phase. Skip it while generating code.
assert(C->live_nodes() <= C->max_node_limit(), "Live Node limit exceeded limit");
}
if (BreakAtNode != 0 && (_debug_idx == BreakAtNode || (int)_idx == BreakAtNode)) {
tty->print_cr("BreakAtNode: _idx=%d _debug_idx=%d", _idx, _debug_idx);
if (BreakAtNode != 0 && (_debug_idx == BreakAtNode || (uint64_t)_idx == BreakAtNode)) {
tty->print_cr("BreakAtNode: _idx=%d _debug_idx=" UINT64_FORMAT, _idx, _debug_idx);
BREAKPOINT;
}
#if OPTO_DU_ITERATOR_ASSERT
@ -2493,8 +2480,8 @@ void Node::set_debug_orig(Node* orig) {
if (not_a_node(orig)) orig = nullptr;
int trip = 10;
while (orig != nullptr) {
if (orig->debug_idx() == BreakAtNode || (int)orig->_idx == BreakAtNode) {
tty->print_cr("BreakAtNode: _idx=%d _debug_idx=%d orig._idx=%d orig._debug_idx=%d",
if (orig->debug_idx() == BreakAtNode || (uintx)orig->_idx == BreakAtNode) {
tty->print_cr("BreakAtNode: _idx=%d _debug_idx=" UINT64_FORMAT " orig._idx=%d orig._debug_idx=" UINT64_FORMAT,
this->_idx, this->debug_idx(), orig->_idx, orig->debug_idx());
BREAKPOINT;
}
@ -2526,7 +2513,7 @@ void Node::dump(const char* suffix, bool mark, outputStream* st, DumpConfig* dc)
if (is_disconnected(this)) {
#ifdef ASSERT
st->print(" [%d]",debug_idx());
st->print(" [" UINT64_FORMAT "]", debug_idx());
dump_orig(st);
#endif
st->cr();
@ -2542,7 +2529,7 @@ void Node::dump(const char* suffix, bool mark, outputStream* st, DumpConfig* dc)
#ifdef ASSERT
// Dump the non-reset _debug_idx
if (Verbose && WizardMode) {
st->print(" [%d]",debug_idx());
st->print(" [" UINT64_FORMAT "]", debug_idx());
}
#endif

View File

@ -1244,9 +1244,9 @@ public:
void set_debug_orig(Node* orig); // _debug_orig = orig
void dump_orig(outputStream *st, bool print_key = true) const;
int _debug_idx; // Unique value assigned to every node.
int debug_idx() const { return _debug_idx; }
void set_debug_idx( int debug_idx ) { _debug_idx = debug_idx; }
uint64_t _debug_idx; // Unique value assigned to every node.
uint64_t debug_idx() const { return _debug_idx; }
void set_debug_idx(uint64_t debug_idx) { _debug_idx = debug_idx; }
int _hash_lock; // Barrier to modifications of nodes in the hash table
void enter_hash_lock() { ++_hash_lock; assert(_hash_lock < 99, "in too many hash tables?"); }

View File

@ -734,7 +734,7 @@ void Parse::linear_search_switch_ranges(Node* key_val, SwitchRange*& lo, SwitchR
// It pays off: emit the test for the most common range
assert(most_freq.cnt() > 0, "must be taken");
Node* val = _gvn.transform(new SubINode(key_val, _gvn.intcon(most_freq.lo())));
Node* cmp = _gvn.transform(new CmpUNode(val, _gvn.intcon(most_freq.hi() - most_freq.lo())));
Node* cmp = _gvn.transform(new CmpUNode(val, _gvn.intcon(java_subtract(most_freq.hi(), most_freq.lo()))));
Node* tst = _gvn.transform(new BoolNode(cmp, BoolTest::le));
IfNode* iff = create_and_map_if(control(), tst, if_prob(most_freq.cnt(), total_cnt), if_cnt(most_freq.cnt()));
jump_if_true_fork(iff, most_freq.dest(), false);

View File

@ -1175,7 +1175,7 @@ Node* PhaseStringOpts::int_stringSize(GraphKit& kit, Node* arg) {
if (arg_val > p) {
return __ intcon(i + d);
}
p = 10 * p;
p = java_multiply(10, p);
}
return __ intcon(10 + d);
}

View File

@ -175,7 +175,7 @@ Node *SubINode::Ideal(PhaseGVN *phase, bool can_reshape){
if( t2->base() == Type::Int ){ // Might be bottom or top...
const TypeInt *i = t2->is_int();
if( i->is_con() )
return new AddINode(in1, phase->intcon(-i->get_con()));
return new AddINode(in1, phase->intcon(java_negate(i->get_con())));
}
// Convert "(x+c0) - y" into (x-y) + c0"
@ -205,7 +205,7 @@ Node *SubINode::Ideal(PhaseGVN *phase, bool can_reshape){
} else {
// Match x
Node* sub2 = phase->transform(new SubINode(in1, in21));
Node* neg_c0 = phase->intcon(-c0);
Node* neg_c0 = phase->intcon(java_negate(c0));
return new AddINode(sub2, neg_c0);
}
}

View File

@ -1687,7 +1687,7 @@ const Type *TypeInt::widen( const Type *old, const Type* limit ) const {
// If neither endpoint is extremal yet, push out the endpoint
// which is closer to its respective limit.
if (_lo >= 0 || // easy common case
(juint)(_lo - min) >= (juint)(max - _hi)) {
((juint)_lo - min) >= ((juint)max - _hi)) {
// Try to widen to an unsigned range type of 31 bits:
return make(_lo, max, WidenMax);
} else {
@ -1997,8 +1997,8 @@ const Type *TypeLong::narrow( const Type *old ) const {
// The new type narrows the old type, so look for a "death march".
// See comments on PhaseTransform::saturate.
julong nrange = _hi - _lo;
julong orange = ohi - olo;
julong nrange = (julong)_hi - _lo;
julong orange = (julong)ohi - olo;
if (nrange < max_julong - 1 && nrange > (orange >> 1) + (SMALLINT*2)) {
// Use the new type only if the range shrinks a lot.
// We do not want the optimizer computing 2^31 point by point.

View File

@ -1225,6 +1225,9 @@ JAVA_INTEGER_OP(+, java_add, jlong, julong)
JAVA_INTEGER_OP(-, java_subtract, jlong, julong)
JAVA_INTEGER_OP(*, java_multiply, jlong, julong)
inline jint java_negate(jint v) { return java_subtract((jint) 0, v); }
inline jlong java_negate(jlong v) { return java_subtract((jlong)0, v); }
#undef JAVA_INTEGER_OP
// Provide integer shift operations with Java semantics. No overflow
@ -1314,7 +1317,7 @@ inline int64_t multiply_high_signed(const int64_t x, const int64_t y) {
const jlong y1 = java_shift_right((jlong)y, 32);
const jlong y2 = y & 0xFFFFFFFF;
const uint64_t z2 = x2 * y2;
const uint64_t z2 = (uint64_t)x2 * y2;
const int64_t t = x1 * y2 + (z2 >> 32u); // Unsigned shift
int64_t z1 = t & 0xFFFFFFFF;
const int64_t z0 = java_shift_right((jlong)t, 32);