mirror of
https://github.com/openjdk/jdk.git
synced 2026-01-28 03:58:21 +00:00
8367341: C2: apply KnownBits and unsigned bounds to And / Or operations
Reviewed-by: hgreule, epeter
This commit is contained in:
parent
00050f84d4
commit
e67805067a
@ -31,8 +31,8 @@
|
||||
#include "opto/movenode.hpp"
|
||||
#include "opto/mulnode.hpp"
|
||||
#include "opto/phaseX.hpp"
|
||||
#include "opto/rangeinference.hpp"
|
||||
#include "opto/subnode.hpp"
|
||||
#include "opto/utilities/xor.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
|
||||
// Portions of code courtesy of Clifford Click
|
||||
@ -1011,35 +1011,8 @@ Node* OrINode::Ideal(PhaseGVN* phase, bool can_reshape) {
|
||||
// the logical operations the ring's ADD is really a logical OR function.
|
||||
// This also type-checks the inputs for sanity. Guaranteed never to
|
||||
// be passed a TOP or BOTTOM type, these are filtered out by pre-check.
|
||||
const Type *OrINode::add_ring( const Type *t0, const Type *t1 ) const {
|
||||
const TypeInt *r0 = t0->is_int(); // Handy access
|
||||
const TypeInt *r1 = t1->is_int();
|
||||
|
||||
// If both args are bool, can figure out better types
|
||||
if ( r0 == TypeInt::BOOL ) {
|
||||
if ( r1 == TypeInt::ONE) {
|
||||
return TypeInt::ONE;
|
||||
} else if ( r1 == TypeInt::BOOL ) {
|
||||
return TypeInt::BOOL;
|
||||
}
|
||||
} else if ( r0 == TypeInt::ONE ) {
|
||||
if ( r1 == TypeInt::BOOL ) {
|
||||
return TypeInt::ONE;
|
||||
}
|
||||
}
|
||||
|
||||
// If either input is all ones, the output is all ones.
|
||||
// x | ~0 == ~0 <==> x | -1 == -1
|
||||
if (r0 == TypeInt::MINUS_1 || r1 == TypeInt::MINUS_1) {
|
||||
return TypeInt::MINUS_1;
|
||||
}
|
||||
|
||||
// If either input is not a constant, just return all integers.
|
||||
if( !r0->is_con() || !r1->is_con() )
|
||||
return TypeInt::INT; // Any integer, but still no symbols.
|
||||
|
||||
// Otherwise just OR them bits.
|
||||
return TypeInt::make( r0->get_con() | r1->get_con() );
|
||||
const Type* OrINode::add_ring(const Type* t1, const Type* t2) const {
|
||||
return RangeInference::infer_or(t1->is_int(), t2->is_int());
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
@ -1087,22 +1060,8 @@ Node* OrLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
|
||||
}
|
||||
|
||||
//------------------------------add_ring---------------------------------------
|
||||
const Type *OrLNode::add_ring( const Type *t0, const Type *t1 ) const {
|
||||
const TypeLong *r0 = t0->is_long(); // Handy access
|
||||
const TypeLong *r1 = t1->is_long();
|
||||
|
||||
// If either input is all ones, the output is all ones.
|
||||
// x | ~0 == ~0 <==> x | -1 == -1
|
||||
if (r0 == TypeLong::MINUS_1 || r1 == TypeLong::MINUS_1) {
|
||||
return TypeLong::MINUS_1;
|
||||
}
|
||||
|
||||
// If either input is not a constant, just return all integers.
|
||||
if( !r0->is_con() || !r1->is_con() )
|
||||
return TypeLong::LONG; // Any integer, but still no symbols.
|
||||
|
||||
// Otherwise just OR them bits.
|
||||
return TypeLong::make( r0->get_con() | r1->get_con() );
|
||||
const Type* OrLNode::add_ring(const Type* t1, const Type* t2) const {
|
||||
return RangeInference::infer_or(t1->is_long(), t2->is_long());
|
||||
}
|
||||
|
||||
//---------------------------Helper -------------------------------------------
|
||||
@ -1189,46 +1148,14 @@ const Type* XorINode::Value(PhaseGVN* phase) const {
|
||||
// the logical operations the ring's ADD is really a logical OR function.
|
||||
// This also type-checks the inputs for sanity. Guaranteed never to
|
||||
// be passed a TOP or BOTTOM type, these are filtered out by pre-check.
|
||||
const Type *XorINode::add_ring( const Type *t0, const Type *t1 ) const {
|
||||
const TypeInt *r0 = t0->is_int(); // Handy access
|
||||
const TypeInt *r1 = t1->is_int();
|
||||
|
||||
if (r0->is_con() && r1->is_con()) {
|
||||
// compute constant result
|
||||
return TypeInt::make(r0->get_con() ^ r1->get_con());
|
||||
}
|
||||
|
||||
// At least one of the arguments is not constant
|
||||
|
||||
if (r0->_lo >= 0 && r1->_lo >= 0) {
|
||||
// Combine [r0->_lo, r0->_hi] ^ [r0->_lo, r1->_hi] -> [0, upper_bound]
|
||||
jint upper_bound = xor_upper_bound_for_ranges<jint, juint>(r0->_hi, r1->_hi);
|
||||
return TypeInt::make(0, upper_bound, MAX2(r0->_widen, r1->_widen));
|
||||
}
|
||||
|
||||
return TypeInt::INT;
|
||||
const Type* XorINode::add_ring(const Type* t1, const Type* t2) const {
|
||||
return RangeInference::infer_xor(t1->is_int(), t2->is_int());
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
//------------------------------add_ring---------------------------------------
|
||||
const Type *XorLNode::add_ring( const Type *t0, const Type *t1 ) const {
|
||||
const TypeLong *r0 = t0->is_long(); // Handy access
|
||||
const TypeLong *r1 = t1->is_long();
|
||||
|
||||
if (r0->is_con() && r1->is_con()) {
|
||||
// compute constant result
|
||||
return TypeLong::make(r0->get_con() ^ r1->get_con());
|
||||
}
|
||||
|
||||
// At least one of the arguments is not constant
|
||||
|
||||
if (r0->_lo >= 0 && r1->_lo >= 0) {
|
||||
// Combine [r0->_lo, r0->_hi] ^ [r0->_lo, r1->_hi] -> [0, upper_bound]
|
||||
julong upper_bound = xor_upper_bound_for_ranges<jlong, julong>(r0->_hi, r1->_hi);
|
||||
return TypeLong::make(0, upper_bound, MAX2(r0->_widen, r1->_widen));
|
||||
}
|
||||
|
||||
return TypeLong::LONG;
|
||||
const Type* XorLNode::add_ring(const Type* t1, const Type* t2) const {
|
||||
return RangeInference::infer_xor(t1->is_long(), t2->is_long());
|
||||
}
|
||||
|
||||
Node* XorLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
|
||||
|
||||
@ -29,6 +29,7 @@
|
||||
#include "opto/memnode.hpp"
|
||||
#include "opto/mulnode.hpp"
|
||||
#include "opto/phaseX.hpp"
|
||||
#include "opto/rangeinference.hpp"
|
||||
#include "opto/subnode.hpp"
|
||||
#include "utilities/powerOfTwo.hpp"
|
||||
|
||||
@ -620,80 +621,14 @@ const Type* MulHiValue(const Type *t1, const Type *t2, const Type *bot) {
|
||||
return TypeLong::LONG;
|
||||
}
|
||||
|
||||
template<typename IntegerType>
|
||||
static const IntegerType* and_value(const IntegerType* r0, const IntegerType* r1) {
|
||||
typedef typename IntegerType::NativeType NativeType;
|
||||
static_assert(std::is_signed<NativeType>::value, "Native type of IntegerType must be signed!");
|
||||
|
||||
int widen = MAX2(r0->_widen, r1->_widen);
|
||||
|
||||
// If both types are constants, we can calculate a constant result.
|
||||
if (r0->is_con() && r1->is_con()) {
|
||||
return IntegerType::make(r0->get_con() & r1->get_con());
|
||||
}
|
||||
|
||||
// If both ranges are positive, the result will range from 0 up to the hi value of the smaller range. The minimum
|
||||
// of the two constrains the upper bound because any higher value in the other range will see all zeroes, so it will be masked out.
|
||||
if (r0->_lo >= 0 && r1->_lo >= 0) {
|
||||
return IntegerType::make(0, MIN2(r0->_hi, r1->_hi), widen);
|
||||
}
|
||||
|
||||
// If only one range is positive, the result will range from 0 up to that range's maximum value.
|
||||
// For the operation 'x & C' where C is a positive constant, the result will be in the range [0..C]. With that observation,
|
||||
// we can say that for any integer c such that 0 <= c <= C will also be in the range [0..C]. Therefore, 'x & [c..C]'
|
||||
// where c >= 0 will be in the range [0..C].
|
||||
if (r0->_lo >= 0) {
|
||||
return IntegerType::make(0, r0->_hi, widen);
|
||||
}
|
||||
|
||||
if (r1->_lo >= 0) {
|
||||
return IntegerType::make(0, r1->_hi, widen);
|
||||
}
|
||||
|
||||
// At this point, all positive ranges will have already been handled, so the only remaining cases will be negative ranges
|
||||
// and constants.
|
||||
|
||||
assert(r0->_lo < 0 && r1->_lo < 0, "positive ranges should already be handled!");
|
||||
|
||||
// As two's complement means that both numbers will start with leading 1s, the lower bound of both ranges will contain
|
||||
// the common leading 1s of both minimum values. In order to count them with count_leading_zeros, the bits are inverted.
|
||||
NativeType sel_val = ~MIN2(r0->_lo, r1->_lo);
|
||||
|
||||
NativeType min;
|
||||
if (sel_val == 0) {
|
||||
// Since count_leading_zeros is undefined at 0, we short-circuit the condition where both ranges have a minimum of -1.
|
||||
min = -1;
|
||||
} else {
|
||||
// To get the number of bits to shift, we count the leading 0-bits and then subtract one, as the sign bit is already set.
|
||||
int shift_bits = count_leading_zeros(sel_val) - 1;
|
||||
min = std::numeric_limits<NativeType>::min() >> shift_bits;
|
||||
}
|
||||
|
||||
NativeType max;
|
||||
if (r0->_hi < 0 && r1->_hi < 0) {
|
||||
// If both ranges are negative, then the same optimization as both positive ranges will apply, and the smaller hi
|
||||
// value will mask off any bits set by higher values.
|
||||
max = MIN2(r0->_hi, r1->_hi);
|
||||
} else {
|
||||
// In the case of ranges that cross zero, negative values can cause the higher order bits to be set, so the maximum
|
||||
// positive value can be as high as the larger hi value.
|
||||
max = MAX2(r0->_hi, r1->_hi);
|
||||
}
|
||||
|
||||
return IntegerType::make(min, max, widen);
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
//------------------------------mul_ring---------------------------------------
|
||||
// Supplied function returns the product of the inputs IN THE CURRENT RING.
|
||||
// For the logical operations the ring's MUL is really a logical AND function.
|
||||
// This also type-checks the inputs for sanity. Guaranteed never to
|
||||
// be passed a TOP or BOTTOM type, these are filtered out by pre-check.
|
||||
const Type *AndINode::mul_ring( const Type *t0, const Type *t1 ) const {
|
||||
const TypeInt* r0 = t0->is_int();
|
||||
const TypeInt* r1 = t1->is_int();
|
||||
|
||||
return and_value<TypeInt>(r0, r1);
|
||||
const Type* AndINode::mul_ring(const Type* t1, const Type* t2) const {
|
||||
return RangeInference::infer_and(t1->is_int(), t2->is_int());
|
||||
}
|
||||
|
||||
static bool AndIL_is_zero_element_under_mask(const PhaseGVN* phase, const Node* expr, const Node* mask, BasicType bt);
|
||||
@ -822,11 +757,8 @@ Node *AndINode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
// For the logical operations the ring's MUL is really a logical AND function.
|
||||
// This also type-checks the inputs for sanity. Guaranteed never to
|
||||
// be passed a TOP or BOTTOM type, these are filtered out by pre-check.
|
||||
const Type *AndLNode::mul_ring( const Type *t0, const Type *t1 ) const {
|
||||
const TypeLong* r0 = t0->is_long();
|
||||
const TypeLong* r1 = t1->is_long();
|
||||
|
||||
return and_value<TypeLong>(r0, r1);
|
||||
const Type* AndLNode::mul_ring(const Type* t1, const Type* t2) const {
|
||||
return RangeInference::infer_and(t1->is_long(), t2->is_long());
|
||||
}
|
||||
|
||||
const Type* AndLNode::Value(PhaseGVN* phase) const {
|
||||
|
||||
@ -25,7 +25,6 @@
|
||||
#include "opto/rangeinference.hpp"
|
||||
#include "opto/type.hpp"
|
||||
#include "utilities/intn_t.hpp"
|
||||
#include "utilities/tuple.hpp"
|
||||
|
||||
// If the cardinality of a TypeInt is below this threshold, use min widen, see
|
||||
// TypeIntPrototype<S, U>::normalize_widen
|
||||
@ -688,6 +687,8 @@ template class TypeIntPrototype<intn_t<1>, uintn_t<1>>;
|
||||
template class TypeIntPrototype<intn_t<2>, uintn_t<2>>;
|
||||
template class TypeIntPrototype<intn_t<3>, uintn_t<3>>;
|
||||
template class TypeIntPrototype<intn_t<4>, uintn_t<4>>;
|
||||
template class TypeIntPrototype<intn_t<5>, uintn_t<5>>;
|
||||
template class TypeIntPrototype<intn_t<6>, uintn_t<6>>;
|
||||
|
||||
// Compute the meet of 2 types. When dual is true, the subset relation in CT is
|
||||
// reversed. This means that the result of 2 CTs would be the intersection of
|
||||
@ -709,10 +710,7 @@ const Type* TypeIntHelper::int_type_xmeet(const CT* i1, const Type* t2) {
|
||||
|
||||
if (!i1->_is_dual) {
|
||||
// meet (a.k.a union)
|
||||
return CT::make_or_top(TypeIntPrototype<S, U>{{MIN2(i1->_lo, i2->_lo), MAX2(i1->_hi, i2->_hi)},
|
||||
{MIN2(i1->_ulo, i2->_ulo), MAX2(i1->_uhi, i2->_uhi)},
|
||||
{i1->_bits._zeros & i2->_bits._zeros, i1->_bits._ones & i2->_bits._ones}},
|
||||
MAX2(i1->_widen, i2->_widen), false);
|
||||
return int_type_union(i1, i2);
|
||||
} else {
|
||||
// join (a.k.a intersection)
|
||||
return CT::make_or_top(TypeIntPrototype<S, U>{{MAX2(i1->_lo, i2->_lo), MIN2(i1->_hi, i2->_hi)},
|
||||
|
||||
@ -25,6 +25,7 @@
|
||||
#ifndef SHARE_OPTO_RANGEINFERENCE_HPP
|
||||
#define SHARE_OPTO_RANGEINFERENCE_HPP
|
||||
|
||||
#include "cppstdlib/limits.hpp"
|
||||
#include "cppstdlib/type_traits.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
@ -92,19 +93,6 @@ public:
|
||||
RangeInt<U> _urange;
|
||||
KnownBits<U> _bits;
|
||||
|
||||
private:
|
||||
friend class TypeInt;
|
||||
friend class TypeLong;
|
||||
|
||||
template <class T1, class T2>
|
||||
friend void test_canonicalize_constraints_exhaustive();
|
||||
|
||||
template <class T1, class T2>
|
||||
friend void test_canonicalize_constraints_simple();
|
||||
|
||||
template <class T1, class T2>
|
||||
friend void test_canonicalize_constraints_random();
|
||||
|
||||
// A canonicalized version of a TypeIntPrototype, if the prototype represents
|
||||
// an empty type, _present is false, otherwise, _data is canonical
|
||||
class CanonicalizedTypeIntPrototype {
|
||||
@ -158,21 +146,33 @@ public:
|
||||
template <class CT>
|
||||
static const Type* int_type_xmeet(const CT* i1, const Type* t2);
|
||||
|
||||
template <class CT>
|
||||
static bool int_type_is_equal(const CT* t1, const CT* t2) {
|
||||
template <class CTP>
|
||||
static CTP int_type_union(CTP t1, CTP t2) {
|
||||
using CT = std::conditional_t<std::is_pointer_v<CTP>, std::remove_pointer_t<CTP>, CTP>;
|
||||
using S = std::remove_const_t<decltype(CT::_lo)>;
|
||||
using U = std::remove_const_t<decltype(CT::_ulo)>;
|
||||
return CT::make(TypeIntPrototype<S, U>{{MIN2(t1->_lo, t2->_lo), MAX2(t1->_hi, t2->_hi)},
|
||||
{MIN2(t1->_ulo, t2->_ulo), MAX2(t1->_uhi, t2->_uhi)},
|
||||
{t1->_bits._zeros & t2->_bits._zeros, t1->_bits._ones & t2->_bits._ones}},
|
||||
MAX2(t1->_widen, t2->_widen));
|
||||
}
|
||||
|
||||
template <class CTP>
|
||||
static bool int_type_is_equal(const CTP t1, const CTP t2) {
|
||||
return t1->_lo == t2->_lo && t1->_hi == t2->_hi &&
|
||||
t1->_ulo == t2->_ulo && t1->_uhi == t2->_uhi &&
|
||||
t1->_bits._zeros == t2->_bits._zeros && t1->_bits._ones == t2->_bits._ones;
|
||||
}
|
||||
|
||||
template <class CT>
|
||||
static bool int_type_is_subset(const CT* super, const CT* sub) {
|
||||
template <class CTP>
|
||||
static bool int_type_is_subset(const CTP super, const CTP sub) {
|
||||
using U = decltype(super->_ulo);
|
||||
return super->_lo <= sub->_lo && super->_hi >= sub->_hi &&
|
||||
super->_ulo <= sub->_ulo && super->_uhi >= sub->_uhi &&
|
||||
// All bits that are known in super must also be known to be the same
|
||||
// value in sub, &~ (and not) is the same as a set subtraction on bit
|
||||
// sets
|
||||
(super->_bits._zeros &~ sub->_bits._zeros) == 0 && (super->_bits._ones &~ sub->_bits._ones) == 0;
|
||||
(super->_bits._zeros &~ sub->_bits._zeros) == U(0) && (super->_bits._ones &~ sub->_bits._ones) == U(0);
|
||||
}
|
||||
|
||||
template <class CT>
|
||||
@ -195,4 +195,199 @@ public:
|
||||
#endif // PRODUCT
|
||||
};
|
||||
|
||||
// A TypeIntMirror is structurally similar to a TypeInt or a TypeLong but it decouples the range
|
||||
// inference from the Type infrastructure of the compiler. It also allows more flexibility with the
|
||||
// bit width of the integer type. As a result, it is more efficient to use for intermediate steps
|
||||
// of inference, as well as more flexible to perform testing on different integer types.
|
||||
template <class S, class U>
|
||||
class TypeIntMirror {
|
||||
public:
|
||||
S _lo;
|
||||
S _hi;
|
||||
U _ulo;
|
||||
U _uhi;
|
||||
KnownBits<U> _bits;
|
||||
int _widen = 0; // dummy field to mimic the same field in TypeInt, useful in testing
|
||||
|
||||
static TypeIntMirror make(const TypeIntPrototype<S, U>& t, int widen) {
|
||||
auto canonicalized_t = t.canonicalize_constraints();
|
||||
assert(!canonicalized_t.empty(), "must not be empty");
|
||||
return TypeIntMirror{canonicalized_t._data._srange._lo, canonicalized_t._data._srange._hi,
|
||||
canonicalized_t._data._urange._lo, canonicalized_t._data._urange._hi,
|
||||
canonicalized_t._data._bits};
|
||||
}
|
||||
|
||||
// These allow TypeIntMirror to mimick the behaviors of TypeInt* and TypeLong*, so they can be
|
||||
// passed into RangeInference methods. These are only used in testing, so they are implemented in
|
||||
// the test file.
|
||||
const TypeIntMirror* operator->() const;
|
||||
TypeIntMirror meet(const TypeIntMirror& o) const;
|
||||
bool contains(U u) const;
|
||||
bool contains(const TypeIntMirror& o) const;
|
||||
bool operator==(const TypeIntMirror& o) const;
|
||||
|
||||
template <class T>
|
||||
TypeIntMirror cast() const;
|
||||
};
|
||||
|
||||
// This class contains methods for inferring the Type of the result of several arithmetic
|
||||
// operations from those of the corresponding inputs. For example, given a, b such that the Type of
|
||||
// a is [0, 1] and the Type of b is [-1, 3], then the Type of the sum a + b is [-1, 4].
|
||||
// The methods in this class receive one or more template parameters which are often TypeInt* or
|
||||
// TypeLong*, or they can be TypeIntMirror which behave similar to TypeInt* and TypeLong* during
|
||||
// testing. This allows us to verify the correctness of the implementation without coupling with
|
||||
// the hotspot compiler allocation infrastructure.
|
||||
class RangeInference {
|
||||
private:
|
||||
// If CTP is a pointer, get the underlying type. For the test helper classes, using the struct
|
||||
// directly allows straightfoward equality comparison.
|
||||
template <class CTP>
|
||||
using CT = std::remove_const_t<std::conditional_t<std::is_pointer_v<CTP>, std::remove_pointer_t<CTP>, CTP>>;
|
||||
|
||||
// The type of CT::_lo, should be jint for TypeInt* and jlong for TypeLong*
|
||||
template <class CTP>
|
||||
using S = std::remove_const_t<decltype(CT<CTP>::_lo)>;
|
||||
|
||||
// The type of CT::_ulo, should be juint for TypeInt* and julong for TypeLong*
|
||||
template <class CTP>
|
||||
using U = std::remove_const_t<decltype(CT<CTP>::_ulo)>;
|
||||
|
||||
// A TypeInt consists of 1 or 2 simple intervals, each of which will lie either in the interval
|
||||
// [0, max_signed] or [min_signed, -1]. It is more optimal to analyze each simple interval
|
||||
// separately when doing inference. For example, consider a, b whose Types are both [-2, 2]. By
|
||||
// analyzing the interval [-2, -1] and [0, 2] separately, we can easily see that the result of
|
||||
// a & b must also be in the interval [-2, 2]. This is much harder if we want to work with the
|
||||
// whole value range at the same time.
|
||||
// This class offers a convenient way to traverse all the simple interval of a TypeInt.
|
||||
template <class CTP>
|
||||
class SimpleIntervalIterable {
|
||||
private:
|
||||
TypeIntMirror<S<CTP>, U<CTP>> _first_interval;
|
||||
TypeIntMirror<S<CTP>, U<CTP>> _second_interval;
|
||||
int _interval_num;
|
||||
|
||||
public:
|
||||
SimpleIntervalIterable(CTP t) {
|
||||
if (U<CTP>(t->_lo) <= U<CTP>(t->_hi)) {
|
||||
_interval_num = 1;
|
||||
_first_interval = TypeIntMirror<S<CTP>, U<CTP>>{t->_lo, t->_hi, t->_ulo, t->_uhi, t->_bits};
|
||||
} else {
|
||||
_interval_num = 2;
|
||||
_first_interval = TypeIntMirror<S<CTP>, U<CTP>>::make(TypeIntPrototype<S<CTP>, U<CTP>>{{t->_lo, S<CTP>(t->_uhi)}, {U<CTP>(t->_lo), t->_uhi}, t->_bits}, 0);
|
||||
_second_interval = TypeIntMirror<S<CTP>, U<CTP>>::make(TypeIntPrototype<S<CTP>, U<CTP>>{{S<CTP>(t->_ulo), t->_hi}, {t->_ulo, U<CTP>(t->_hi)}, t->_bits}, 0);
|
||||
}
|
||||
}
|
||||
|
||||
class Iterator {
|
||||
private:
|
||||
const SimpleIntervalIterable& _iterable;
|
||||
int _current_interval;
|
||||
|
||||
Iterator(const SimpleIntervalIterable& iterable) : _iterable(iterable), _current_interval(0) {}
|
||||
|
||||
friend class SimpleIntervalIterable;
|
||||
public:
|
||||
const TypeIntMirror<S<CTP>, U<CTP>>& operator*() const {
|
||||
assert(_current_interval < _iterable._interval_num, "out of bounds, %d - %d", _current_interval, _iterable._interval_num);
|
||||
if (_current_interval == 0) {
|
||||
return _iterable._first_interval;
|
||||
} else {
|
||||
return _iterable._second_interval;
|
||||
}
|
||||
}
|
||||
|
||||
Iterator& operator++() {
|
||||
assert(_current_interval < _iterable._interval_num, "out of bounds, %d - %d", _current_interval, _iterable._interval_num);
|
||||
_current_interval++;
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool operator!=(const Iterator& o) const {
|
||||
assert(&_iterable == &o._iterable, "not on the same iterable");
|
||||
return _current_interval != o._current_interval;
|
||||
}
|
||||
};
|
||||
|
||||
Iterator begin() const {
|
||||
return Iterator(*this);
|
||||
}
|
||||
|
||||
Iterator end() const {
|
||||
Iterator res(*this);
|
||||
res._current_interval = _interval_num;
|
||||
return res;
|
||||
}
|
||||
};
|
||||
|
||||
// Infer a result given the input types of a binary operation
|
||||
template <class CTP, class Inference>
|
||||
static CTP infer_binary(CTP t1, CTP t2, Inference infer) {
|
||||
CTP res;
|
||||
bool is_init = false;
|
||||
|
||||
SimpleIntervalIterable<CTP> t1_simple_intervals(t1);
|
||||
SimpleIntervalIterable<CTP> t2_simple_intervals(t2);
|
||||
|
||||
for (auto& st1 : t1_simple_intervals) {
|
||||
for (auto& st2 : t2_simple_intervals) {
|
||||
CTP current = infer(st1, st2);
|
||||
|
||||
if (is_init) {
|
||||
res = res->meet(current)->template cast<CT<CTP>>();
|
||||
} else {
|
||||
is_init = true;
|
||||
res = current;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
assert(is_init, "must be initialized");
|
||||
return res;
|
||||
}
|
||||
|
||||
public:
|
||||
template <class CTP>
|
||||
static CTP infer_and(CTP t1, CTP t2) {
|
||||
return infer_binary(t1, t2, [&](const TypeIntMirror<S<CTP>, U<CTP>>& st1, const TypeIntMirror<S<CTP>, U<CTP>>& st2) {
|
||||
S<CTP> lo = std::numeric_limits<S<CTP>>::min();
|
||||
S<CTP> hi = std::numeric_limits<S<CTP>>::max();
|
||||
U<CTP> ulo = std::numeric_limits<U<CTP>>::min();
|
||||
// The unsigned value of the result of 'and' is always not greater than both of its inputs
|
||||
// since there is no position at which the bit is 1 in the result and 0 in either input
|
||||
U<CTP> uhi = MIN2(st1._uhi, st2._uhi);
|
||||
U<CTP> zeros = st1._bits._zeros | st2._bits._zeros;
|
||||
U<CTP> ones = st1._bits._ones & st2._bits._ones;
|
||||
return CT<CTP>::make(TypeIntPrototype<S<CTP>, U<CTP>>{{lo, hi}, {ulo, uhi}, {zeros, ones}}, MAX2(t1->_widen, t2->_widen));
|
||||
});
|
||||
}
|
||||
|
||||
template <class CTP>
|
||||
static CTP infer_or(CTP t1, CTP t2) {
|
||||
return infer_binary(t1, t2, [&](const TypeIntMirror<S<CTP>, U<CTP>>& st1, const TypeIntMirror<S<CTP>, U<CTP>>& st2) {
|
||||
S<CTP> lo = std::numeric_limits<S<CTP>>::min();
|
||||
S<CTP> hi = std::numeric_limits<S<CTP>>::max();
|
||||
// The unsigned value of the result of 'or' is always not less than both of its inputs since
|
||||
// there is no position at which the bit is 0 in the result and 1 in either input
|
||||
U<CTP> ulo = MAX2(st1._ulo, st2._ulo);
|
||||
U<CTP> uhi = std::numeric_limits<U<CTP>>::max();
|
||||
U<CTP> zeros = st1._bits._zeros & st2._bits._zeros;
|
||||
U<CTP> ones = st1._bits._ones | st2._bits._ones;
|
||||
return CT<CTP>::make(TypeIntPrototype<S<CTP>, U<CTP>>{{lo, hi}, {ulo, uhi}, {zeros, ones}}, MAX2(t1->_widen, t2->_widen));
|
||||
});
|
||||
}
|
||||
|
||||
template <class CTP>
|
||||
static CTP infer_xor(CTP t1, CTP t2) {
|
||||
return infer_binary(t1, t2, [&](const TypeIntMirror<S<CTP>, U<CTP>>& st1, const TypeIntMirror<S<CTP>, U<CTP>>& st2) {
|
||||
S<CTP> lo = std::numeric_limits<S<CTP>>::min();
|
||||
S<CTP> hi = std::numeric_limits<S<CTP>>::max();
|
||||
U<CTP> ulo = std::numeric_limits<U<CTP>>::min();
|
||||
U<CTP> uhi = std::numeric_limits<U<CTP>>::max();
|
||||
U<CTP> zeros = (st1._bits._zeros & st2._bits._zeros) | (st1._bits._ones & st2._bits._ones);
|
||||
U<CTP> ones = (st1._bits._zeros & st2._bits._ones) | (st1._bits._ones & st2._bits._zeros);
|
||||
return CT<CTP>::make(TypeIntPrototype<S<CTP>, U<CTP>>{{lo, hi}, {ulo, uhi}, {zeros, ones}}, MAX2(t1->_widen, t2->_widen));
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
#endif // SHARE_OPTO_RANGEINFERENCE_HPP
|
||||
|
||||
@ -798,6 +798,7 @@ public:
|
||||
// must always specify w
|
||||
static const TypeInt* make(jint lo, jint hi, int widen);
|
||||
static const Type* make_or_top(const TypeIntPrototype<jint, juint>& t, int widen);
|
||||
static const TypeInt* make(const TypeIntPrototype<jint, juint>& t, int widen) { return make_or_top(t, widen)->is_int(); }
|
||||
|
||||
// Check for single integer
|
||||
bool is_con() const { return _lo == _hi; }
|
||||
@ -879,6 +880,7 @@ public:
|
||||
// must always specify w
|
||||
static const TypeLong* make(jlong lo, jlong hi, int widen);
|
||||
static const Type* make_or_top(const TypeIntPrototype<jlong, julong>& t, int widen);
|
||||
static const TypeLong* make(const TypeIntPrototype<jlong, julong>& t, int widen) { return make_or_top(t, widen)->is_long(); }
|
||||
|
||||
// Check for single integer
|
||||
bool is_con() const { return _lo == _hi; }
|
||||
|
||||
@ -1,47 +0,0 @@
|
||||
#ifndef SHARE_OPTO_UTILITIES_XOR_HPP
|
||||
#define SHARE_OPTO_UTILITIES_XOR_HPP
|
||||
|
||||
#include "utilities/powerOfTwo.hpp"
|
||||
// Code separated into its own header to allow access from GTEST
|
||||
|
||||
// Given 2 non-negative values in the ranges [0, hi_0] and [0, hi_1], respectively. The bitwise
|
||||
// xor of these values should also be non-negative. This method calculates an upper bound.
|
||||
|
||||
// S and U type parameters correspond to the signed and unsigned
|
||||
// variants of an integer to operate on.
|
||||
template<class S, class U>
|
||||
static S xor_upper_bound_for_ranges(const S hi_0, const S hi_1) {
|
||||
static_assert(S(-1) < S(0), "S must be signed");
|
||||
static_assert(U(-1) > U(0), "U must be unsigned");
|
||||
|
||||
assert(hi_0 >= 0, "must be non-negative");
|
||||
assert(hi_1 >= 0, "must be non-negative");
|
||||
|
||||
// x ^ y cannot have any bit set that is higher than both the highest bits set in x and y
|
||||
// x cannot have any bit set that is higher than the highest bit set in hi_0
|
||||
// y cannot have any bit set that is higher than the highest bit set in hi_1
|
||||
|
||||
// We want to find a value that has all 1 bits everywhere up to and including
|
||||
// the highest bits set in hi_0 as well as hi_1. For this, we can take the next
|
||||
// power of 2 strictly greater than both hi values and subtract 1 from it.
|
||||
|
||||
// Example 1:
|
||||
// hi_0 = 5 (0b0101) hi_1=1 (0b0001)
|
||||
// (5|1)+1 = 0b0110
|
||||
// round_up_pow2 = 0b1000
|
||||
// -1 = 0b0111 = max
|
||||
|
||||
// Example 2 - this demonstrates need for the +1:
|
||||
// hi_0 = 4 (0b0100) hi_1=4 (0b0100)
|
||||
// (4|4)+1 = 0b0101
|
||||
// round_up_pow2 = 0b1000
|
||||
// -1 = 0b0111 = max
|
||||
// Without the +1, round_up_pow2 would be 0b0100, resulting in 0b0011 as max
|
||||
|
||||
// Note: cast to unsigned happens before +1 to avoid signed overflow, and
|
||||
// round_up is safe because high bit is unset (0 <= lo <= hi)
|
||||
|
||||
return round_up_power_of_2(U(hi_0 | hi_1) + 1) - 1;
|
||||
}
|
||||
|
||||
#endif // SHARE_OPTO_UTILITIES_XOR_HPP
|
||||
@ -79,6 +79,7 @@ public:
|
||||
static_assert(min < max, "");
|
||||
|
||||
constexpr bool operator==(intn_t o) const { return (_v & _mask) == (o._v & _mask); }
|
||||
constexpr bool operator!=(intn_t o) const { return !(*this == o); }
|
||||
constexpr bool operator<(intn_t o) const { return int(*this) < int(o); }
|
||||
constexpr bool operator>(intn_t o) const { return int(*this) > int(o); }
|
||||
constexpr bool operator<=(intn_t o) const { return int(*this) <= int(o); }
|
||||
|
||||
@ -25,15 +25,16 @@
|
||||
#include "opto/rangeinference.hpp"
|
||||
#include "opto/type.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/intn_t.hpp"
|
||||
#include "unittest.hpp"
|
||||
#include "utilities/intn_t.hpp"
|
||||
#include "utilities/rbTree.hpp"
|
||||
#include <array>
|
||||
#include <limits>
|
||||
#include <type_traits>
|
||||
|
||||
template <class U>
|
||||
static U uniform_random();
|
||||
|
||||
template <>
|
||||
juint uniform_random<juint>() {
|
||||
return os::random();
|
||||
static U uniform_random() {
|
||||
return U(juint(os::random()));
|
||||
}
|
||||
|
||||
template <>
|
||||
@ -201,7 +202,7 @@ static void test_canonicalize_constraints_random() {
|
||||
}
|
||||
}
|
||||
|
||||
TEST_VM(opto, canonicalize_constraints) {
|
||||
TEST(opto, canonicalize_constraints) {
|
||||
test_canonicalize_constraints_trivial();
|
||||
test_canonicalize_constraints_exhaustive<intn_t<1>, uintn_t<1>>();
|
||||
test_canonicalize_constraints_exhaustive<intn_t<2>, uintn_t<2>>();
|
||||
@ -212,3 +213,413 @@ TEST_VM(opto, canonicalize_constraints) {
|
||||
test_canonicalize_constraints_random<jint, juint>();
|
||||
test_canonicalize_constraints_random<jlong, julong>();
|
||||
}
|
||||
|
||||
// Implementations of TypeIntMirror methods for testing purposes
|
||||
template <class S, class U>
|
||||
const TypeIntMirror<S, U>* TypeIntMirror<S, U>::operator->() const {
|
||||
return this;
|
||||
}
|
||||
|
||||
template <class S, class U>
|
||||
TypeIntMirror<S, U> TypeIntMirror<S, U>::meet(const TypeIntMirror& o) const {
|
||||
return TypeIntHelper::int_type_union(*this, o);
|
||||
}
|
||||
|
||||
template <class S, class U>
|
||||
bool TypeIntMirror<S, U>::contains(U u) const {
|
||||
S s = S(u);
|
||||
return s >= _lo && s <= _hi && u >= _ulo && u <= _uhi && _bits.is_satisfied_by(u);
|
||||
}
|
||||
|
||||
template <class S, class U>
|
||||
bool TypeIntMirror<S, U>::contains(const TypeIntMirror& o) const {
|
||||
return TypeIntHelper::int_type_is_subset(*this, o);
|
||||
}
|
||||
|
||||
template <class S, class U>
|
||||
bool TypeIntMirror<S, U>::operator==(const TypeIntMirror& o) const {
|
||||
return TypeIntHelper::int_type_is_equal(*this, o);
|
||||
}
|
||||
|
||||
template <class S, class U>
|
||||
template <class T>
|
||||
TypeIntMirror<S, U> TypeIntMirror<S, U>::cast() const {
|
||||
static_assert(std::is_same_v<T, TypeIntMirror>);
|
||||
return *this;
|
||||
}
|
||||
|
||||
// The number of TypeIntMirror instances for integral types with a few bits. These values are
|
||||
// calculated once and written down for usage in constexpr contexts.
|
||||
template <class CTP>
|
||||
static constexpr size_t all_instances_size() {
|
||||
using U = decltype(CTP::_ulo);
|
||||
constexpr juint max_unsigned = juint(std::numeric_limits<U>::max());
|
||||
if constexpr (max_unsigned == 1U) {
|
||||
// 1 bit
|
||||
return 3;
|
||||
} else if constexpr (max_unsigned == 3U) {
|
||||
// 2 bits
|
||||
return 15;
|
||||
} else if constexpr (max_unsigned == 7U) {
|
||||
// 3 bits
|
||||
return 134;
|
||||
} else {
|
||||
// 4 bits
|
||||
static_assert(max_unsigned == 15U);
|
||||
// For more than 4 bits, the number of instances is too large and it is not realistic to
|
||||
// compute all of them.
|
||||
return 1732;
|
||||
}
|
||||
}
|
||||
|
||||
template <class CTP>
|
||||
static std::array<CTP, all_instances_size<CTP>()> compute_all_instances() {
|
||||
using S = decltype(CTP::_lo);
|
||||
using U = decltype(CTP::_ulo);
|
||||
|
||||
class CTPComparator {
|
||||
public:
|
||||
static RBTreeOrdering cmp(const CTP& x, const RBNode<CTP, int>* node) {
|
||||
// Quick helper for the tediousness below
|
||||
auto f = [](auto x, auto y) {
|
||||
assert(x != y, "we only handle lt and gt cases");
|
||||
return x < y ? RBTreeOrdering::LT : RBTreeOrdering::GT;
|
||||
};
|
||||
|
||||
const CTP& y = node->key();
|
||||
if (x._lo != y._lo) {
|
||||
return f(x._lo, y._lo);
|
||||
} else if (x._hi != y._hi) {
|
||||
return f(x._hi, y._hi);
|
||||
} else if (x._ulo != y._ulo) {
|
||||
return f(x._ulo, y._ulo);
|
||||
} else if (x._uhi != y._uhi) {
|
||||
return f(x._uhi, y._uhi);
|
||||
} else if (x._bits._zeros != y._bits._zeros) {
|
||||
return f(x._bits._zeros, y._bits._zeros);
|
||||
} else if (x._bits._ones != y._bits._ones) {
|
||||
return f(x._bits._ones, y._bits._ones);
|
||||
} else {
|
||||
return RBTreeOrdering::EQ;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
RBTreeCHeap<CTP, int, CTPComparator, MemTag::mtCompiler> collector;
|
||||
for (jint lo = jint(std::numeric_limits<S>::min()); lo <= jint(std::numeric_limits<S>::max()); lo++) {
|
||||
for (jint hi = lo; hi <= jint(std::numeric_limits<S>::max()); hi++) {
|
||||
for (juint ulo = 0; ulo <= juint(std::numeric_limits<U>::max()); ulo++) {
|
||||
for (juint uhi = ulo; uhi <= juint(std::numeric_limits<U>::max()); uhi++) {
|
||||
for (juint zeros = 0; zeros <= juint(std::numeric_limits<U>::max()); zeros++) {
|
||||
for (juint ones = 0; ones <= juint(std::numeric_limits<U>::max()); ones++) {
|
||||
TypeIntPrototype<S, U> t{{S(lo), S(hi)}, {U(ulo), U(uhi)}, {U(zeros), U(ones)}};
|
||||
auto canonicalized_t = t.canonicalize_constraints();
|
||||
if (canonicalized_t.empty()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
TypeIntPrototype<S, U> ct = canonicalized_t._data;
|
||||
collector.upsert(CTP{ct._srange._lo, ct._srange._hi, ct._urange._lo, ct._urange._hi, ct._bits}, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
assert(collector.size() == all_instances_size<CTP>(), "unexpected size of all_instance, expected %d, actual %d", jint(all_instances_size<CTP>()), jint(collector.size()));
|
||||
std::array<CTP, all_instances_size<CTP>()> res;
|
||||
size_t idx = 0;
|
||||
collector.visit_in_order([&](RBNode<CTP, int>* node) {
|
||||
res[idx] = node->key();
|
||||
idx++;
|
||||
return true;
|
||||
});
|
||||
return res;
|
||||
}
|
||||
|
||||
template <class CTP>
|
||||
static const std::array<CTP, all_instances_size<CTP>()>& all_instances() {
|
||||
static std::array<CTP, all_instances_size<CTP>()> res = compute_all_instances<CTP>();
|
||||
static_assert(std::is_trivially_destructible_v<decltype(res)>);
|
||||
return res;
|
||||
}
|
||||
|
||||
// Check the correctness, that is, if v1 is an element of input1, v2 is an element of input2, then
|
||||
// op(v1, v2) must be an element of infer(input1, input2). This version does the check exhaustively
|
||||
// on all elements of input1 and input2.
|
||||
template <class InputType, class Operation, class Inference>
|
||||
static void test_binary_instance_correctness_exhaustive(Operation op, Inference infer, const InputType& input1, const InputType& input2) {
|
||||
using S = std::remove_const_t<decltype(input1->_lo)>;
|
||||
using U = std::remove_const_t<decltype(input1->_ulo)>;
|
||||
InputType result = infer(input1, input2);
|
||||
|
||||
for (juint v1 = juint(std::numeric_limits<U>::min()); v1 <= juint(std::numeric_limits<U>::max()); v1++) {
|
||||
if (!input1.contains(U(v1))) {
|
||||
continue;
|
||||
}
|
||||
|
||||
for (juint v2 = juint(std::numeric_limits<U>::min()); v2 <= juint(std::numeric_limits<U>::max()); v2++) {
|
||||
if (!input2.contains(U(v2))) {
|
||||
continue;
|
||||
}
|
||||
|
||||
U r = op(U(v1), U(v2));
|
||||
ASSERT_TRUE(result.contains(r));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check the correctness, that is, if v1 is an element of input1, v2 is an element of input2, then
|
||||
// op(v1, v2) must be an element of infer(input1, input2). This version does the check randomly on
|
||||
// a number of elements in input1 and input2.
|
||||
template <class InputType, class Operation, class Inference>
|
||||
static void test_binary_instance_correctness_samples(Operation op, Inference infer, const InputType& input1, const InputType& input2) {
|
||||
using U = std::remove_const_t<decltype(input1->_ulo)>;
|
||||
auto result = infer(input1, input2);
|
||||
|
||||
constexpr size_t sample_count = 6;
|
||||
U input1_samples[sample_count] {U(input1._lo), U(input1._hi), input1._ulo, input1._uhi, input1._ulo, input1._ulo};
|
||||
U input2_samples[sample_count] {U(input2._lo), U(input2._hi), input2._ulo, input2._uhi, input2._ulo, input2._ulo};
|
||||
|
||||
auto random_sample = [](U* samples, const InputType& input) {
|
||||
constexpr size_t max_tries = 100;
|
||||
constexpr size_t start_random_idx = 4;
|
||||
for (size_t tries = 0, idx = start_random_idx; tries < max_tries && idx < sample_count; tries++) {
|
||||
U n = uniform_random<U>();
|
||||
if (input.contains(n)) {
|
||||
samples[idx] = n;
|
||||
idx++;
|
||||
}
|
||||
}
|
||||
};
|
||||
random_sample(input1_samples, input1);
|
||||
random_sample(input2_samples, input2);
|
||||
|
||||
for (size_t i = 0; i < sample_count; i++) {
|
||||
for (size_t j = 0; j < sample_count; j++) {
|
||||
U r = op(input1_samples[i], input2_samples[j]);
|
||||
ASSERT_TRUE(result.contains(r));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check the monotonicity, that is, if input1 is a subset of super1, input2 is a subset of super2,
|
||||
// then infer(input1, input2) must be a subset of infer(super1, super2). This version does the
|
||||
// check exhaustively on all supersets of input1 and input2.
|
||||
template <class InputType, class Inference>
|
||||
static void test_binary_instance_monotonicity_exhaustive(Inference infer, const InputType& input1, const InputType& input2) {
|
||||
InputType result = infer(input1, input2);
|
||||
|
||||
for (const InputType& super1 : all_instances<InputType>()) {
|
||||
if (!super1.contains(input1) || super1 == input1) {
|
||||
continue;
|
||||
}
|
||||
|
||||
for (const InputType& super2 : all_instances<InputType>()) {
|
||||
if (!super2.contains(input2) || super2 == input2) {
|
||||
continue;
|
||||
}
|
||||
|
||||
ASSERT_TRUE(infer(input1, super2).contains(result));
|
||||
ASSERT_TRUE(infer(super1, input2).contains(result));
|
||||
ASSERT_TRUE(infer(super1, super2).contains(result));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check the monotonicity, that is, if input1 is a subset of super1, input2 is a subset of super2,
|
||||
// then infer(input1, input2) must be a subset of infer(super1, super2). This version does the
|
||||
// check randomly on a number of supersets of input1 and input2.
|
||||
template <class InputType, class Inference>
|
||||
static void test_binary_instance_monotonicity_samples(Inference infer, const InputType& input1, const InputType& input2) {
|
||||
using S = std::remove_const_t<decltype(input1->_lo)>;
|
||||
using U = std::remove_const_t<decltype(input1->_ulo)>;
|
||||
auto result = infer(input1, input2);
|
||||
|
||||
// The set that is a superset of all other sets
|
||||
InputType universe = InputType{std::numeric_limits<S>::min(), std::numeric_limits<S>::max(), U(0), U(-1), {U(0), U(0)}};
|
||||
ASSERT_TRUE(infer(universe, input2).contains(result));
|
||||
ASSERT_TRUE(infer(input1, universe).contains(result));
|
||||
ASSERT_TRUE(infer(universe, universe).contains(result));
|
||||
|
||||
auto random_superset = [](const InputType& input) {
|
||||
S lo = MIN2(input->_lo, S(uniform_random<U>()));
|
||||
S hi = MAX2(input->_hi, S(uniform_random<U>()));
|
||||
U ulo = MIN2(input->_ulo, uniform_random<U>());
|
||||
U uhi = MAX2(input->_uhi, uniform_random<U>());
|
||||
U zeros = input->_bits._zeros & uniform_random<U>();
|
||||
U ones = input->_bits._ones & uniform_random<U>();
|
||||
InputType super = InputType::make(TypeIntPrototype<S, U>{{lo, hi}, {ulo, uhi}, {zeros, ones}}, 0);
|
||||
assert(super.contains(input), "impossible");
|
||||
return super;
|
||||
};
|
||||
|
||||
InputType super1 = random_superset(input1);
|
||||
InputType super2 = random_superset(input2);
|
||||
ASSERT_TRUE(infer(super1, input2).contains(result));
|
||||
ASSERT_TRUE(infer(input1, super2).contains(result));
|
||||
ASSERT_TRUE(infer(super1, super2).contains(result));
|
||||
}
|
||||
|
||||
// Verify the correctness and monotonicity of an inference function by exhautively analyzing all
|
||||
// instances of InputType
|
||||
template <class InputType, class Operation, class Inference>
|
||||
static void test_binary_exhaustive(Operation op, Inference infer) {
|
||||
for (const InputType& input1 : all_instances<InputType>()) {
|
||||
for (const InputType& input2 : all_instances<InputType>()) {
|
||||
test_binary_instance_correctness_exhaustive(op, infer, input1, input2);
|
||||
if (all_instances<InputType>().size() < 100) {
|
||||
// This effectively covers the cases up to uintn_t<2>
|
||||
test_binary_instance_monotonicity_exhaustive(infer, input1, input2);
|
||||
} else {
|
||||
// This effectively covers the cases of uintn_t<3>
|
||||
test_binary_instance_monotonicity_samples(infer, input1, input2);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Verify the correctness and monotonicity of an inference function by randomly sampling instances
|
||||
// of InputType
|
||||
template <class InputType, class Operation, class Inference>
|
||||
static void test_binary_random(Operation op, Inference infer) {
|
||||
using S = std::remove_const_t<decltype(InputType::_lo)>;
|
||||
using U = std::remove_const_t<decltype(InputType::_ulo)>;
|
||||
|
||||
constexpr size_t sample_count = 100;
|
||||
InputType samples[sample_count];
|
||||
|
||||
// Fill with {0}
|
||||
for (size_t i = 0; i < sample_count; i++) {
|
||||
samples[i] = InputType::make(TypeIntPrototype<S, U>{{S(0), S(0)}, {U(0), U(0)}, {U(0), U(0)}}, 0);
|
||||
}
|
||||
|
||||
// {1}
|
||||
samples[1] = InputType::make(TypeIntPrototype<S, U>{{S(1), S(1)}, {U(1), U(1)}, {U(0), U(0)}}, 0);
|
||||
// {-1}
|
||||
samples[2] = InputType::make(TypeIntPrototype<S, U>{{S(-1), S(-1)}, {U(-1), U(-1)}, {U(0), U(0)}}, 0);
|
||||
// {0, 1}
|
||||
samples[3] = InputType::make(TypeIntPrototype<S, U>{{S(0), S(1)}, {U(0), U(1)}, {U(0), U(0)}}, 0);
|
||||
// {-1, 0, 1}
|
||||
samples[4] = InputType::make(TypeIntPrototype<S, U>{{S(-1), S(1)}, {U(0), U(-1)}, {U(0), U(0)}}, 0);
|
||||
// {-1, 1}
|
||||
samples[5] = InputType::make(TypeIntPrototype<S, U>{{S(-1), S(1)}, {U(1), U(-1)}, {U(0), U(0)}}, 0);
|
||||
// {0, 1, 2}
|
||||
samples[6] = InputType::make(TypeIntPrototype<S, U>{{S(0), S(2)}, {U(0), U(2)}, {U(0), U(0)}}, 0);
|
||||
// {0, 2}
|
||||
samples[7] = InputType::make(TypeIntPrototype<S, U>{{S(0), S(2)}, {U(0), U(2)}, {U(1), U(0)}}, 0);
|
||||
// [min_signed, max_signed]
|
||||
samples[8] = InputType::make(TypeIntPrototype<S, U>{{std::numeric_limits<S>::min(), std::numeric_limits<S>::max()}, {U(0), U(-1)}, {U(0), U(0)}}, 0);
|
||||
// [0, max_signed]
|
||||
samples[9] = InputType::make(TypeIntPrototype<S, U>{{S(0), std::numeric_limits<S>::max()}, {U(0), U(-1)}, {U(0), U(0)}}, 0);
|
||||
// [min_signed, 0)
|
||||
samples[10] = InputType::make(TypeIntPrototype<S, U>{{std::numeric_limits<S>::min(), S(-1)}, {U(0), U(-1)}, {U(0), U(0)}}, 0);
|
||||
|
||||
constexpr size_t max_tries = 1000;
|
||||
constexpr size_t start_random_idx = 11;
|
||||
for (size_t tries = 0, idx = start_random_idx; tries < max_tries && idx < sample_count; tries++) {
|
||||
// Try to have lo < hi
|
||||
S signed_bound1 = S(uniform_random<U>());
|
||||
S signed_bound2 = S(uniform_random<U>());
|
||||
S lo = MIN2(signed_bound1, signed_bound2);
|
||||
S hi = MAX2(signed_bound1, signed_bound2);
|
||||
|
||||
// Try to have ulo < uhi
|
||||
U unsigned_bound1 = uniform_random<U>();
|
||||
U unsigned_bound2 = uniform_random<U>();
|
||||
U ulo = MIN2(unsigned_bound1, unsigned_bound2);
|
||||
U uhi = MAX2(unsigned_bound1, unsigned_bound2);
|
||||
|
||||
// Try to have (zeros & ones) == 0
|
||||
U zeros = uniform_random<U>();
|
||||
U ones = uniform_random<U>();
|
||||
U common = zeros & ones;
|
||||
zeros = zeros ^ common;
|
||||
ones = ones ^ common;
|
||||
|
||||
TypeIntPrototype<S, U> t{{lo, hi}, {ulo, uhi}, {zeros, ones}};
|
||||
auto canonicalized_t = t.canonicalize_constraints();
|
||||
if (canonicalized_t.empty()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
samples[idx] = TypeIntMirror<S, U>{canonicalized_t._data._srange._lo, canonicalized_t._data._srange._hi,
|
||||
canonicalized_t._data._urange._lo, canonicalized_t._data._urange._hi,
|
||||
canonicalized_t._data._bits};
|
||||
idx++;
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < sample_count; i++) {
|
||||
for (size_t j = 0; j < sample_count; j++) {
|
||||
test_binary_instance_correctness_samples(op, infer, samples[i], samples[j]);
|
||||
test_binary_instance_monotonicity_samples(infer, samples[i], samples[j]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <template <class U> class Operation, template <class CTP> class Inference>
|
||||
static void test_binary() {
|
||||
test_binary_exhaustive<TypeIntMirror<intn_t<1>, uintn_t<1>>>(Operation<uintn_t<1>>(), Inference<TypeIntMirror<intn_t<1>, uintn_t<1>>>());
|
||||
test_binary_exhaustive<TypeIntMirror<intn_t<2>, uintn_t<2>>>(Operation<uintn_t<2>>(), Inference<TypeIntMirror<intn_t<2>, uintn_t<2>>>());
|
||||
test_binary_exhaustive<TypeIntMirror<intn_t<3>, uintn_t<3>>>(Operation<uintn_t<3>>(), Inference<TypeIntMirror<intn_t<3>, uintn_t<3>>>());
|
||||
test_binary_random<TypeIntMirror<intn_t<4>, uintn_t<4>>>(Operation<uintn_t<4>>(), Inference<TypeIntMirror<intn_t<4>, uintn_t<4>>>());
|
||||
test_binary_random<TypeIntMirror<intn_t<5>, uintn_t<5>>>(Operation<uintn_t<5>>(), Inference<TypeIntMirror<intn_t<5>, uintn_t<5>>>());
|
||||
test_binary_random<TypeIntMirror<intn_t<6>, uintn_t<6>>>(Operation<uintn_t<6>>(), Inference<TypeIntMirror<intn_t<6>, uintn_t<6>>>());
|
||||
test_binary_random<TypeIntMirror<jint, juint>>(Operation<juint>(), Inference<TypeIntMirror<jint, juint>>());
|
||||
test_binary_random<TypeIntMirror<jlong, julong>>(Operation<julong>(), Inference<TypeIntMirror<jlong, julong>>());
|
||||
}
|
||||
|
||||
template <class U>
|
||||
class OpAnd {
|
||||
public:
|
||||
U operator()(U v1, U v2) const {
|
||||
return v1 & v2;
|
||||
}
|
||||
};
|
||||
|
||||
template <class CTP>
|
||||
class InferAnd {
|
||||
public:
|
||||
CTP operator()(CTP t1, CTP t2) const {
|
||||
return RangeInference::infer_and(t1, t2);
|
||||
}
|
||||
};
|
||||
|
||||
template <class U>
|
||||
class OpOr {
|
||||
public:
|
||||
U operator()(U v1, U v2) const {
|
||||
return v1 | v2;
|
||||
}
|
||||
};
|
||||
|
||||
template <class CTP>
|
||||
class InferOr {
|
||||
public:
|
||||
CTP operator()(CTP t1, CTP t2) const {
|
||||
return RangeInference::infer_or(t1, t2);
|
||||
}
|
||||
};
|
||||
|
||||
template <class U>
|
||||
class OpXor {
|
||||
public:
|
||||
U operator()(U v1, U v2) const {
|
||||
return v1 ^ v2;
|
||||
}
|
||||
};
|
||||
|
||||
template <class CTP>
|
||||
class InferXor {
|
||||
public:
|
||||
CTP operator()(CTP t1, CTP t2) const {
|
||||
return RangeInference::infer_xor(t1, t2);
|
||||
}
|
||||
};
|
||||
|
||||
TEST(opto, range_inference) {
|
||||
test_binary<OpAnd, InferAnd>();
|
||||
test_binary<OpOr, InferOr>();
|
||||
test_binary<OpXor, InferXor>();
|
||||
}
|
||||
|
||||
@ -1,102 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "unittest.hpp"
|
||||
#include "opto/utilities/xor.hpp"
|
||||
#include "utilities/globalDefinitions.hpp" // For jint, juint
|
||||
|
||||
jint test_calc_max(const jint hi_0, const jint hi_1) {
|
||||
return xor_upper_bound_for_ranges<jint, juint>(hi_0, hi_1);
|
||||
}
|
||||
|
||||
jlong test_calc_max(const jlong hi_0, const jlong hi_1) {
|
||||
return xor_upper_bound_for_ranges<jlong, julong>(hi_0, hi_1);
|
||||
}
|
||||
|
||||
template <class S>
|
||||
void test_xor_bounds(S hi_0, S hi_1, S val_0, S val_1) {
|
||||
ASSERT_GE(hi_0, 0);
|
||||
ASSERT_GE(hi_1, 0);
|
||||
|
||||
// Skip out-of-bounds values for convenience
|
||||
if (val_0 > hi_0 || val_0 < S(0) || val_1 > hi_1 || val_1 < S(0)) {
|
||||
return;
|
||||
}
|
||||
|
||||
S v = val_0 ^ val_1;
|
||||
S max = test_calc_max(hi_0, hi_1);
|
||||
EXPECT_LE(v, max);
|
||||
}
|
||||
|
||||
template <class S>
|
||||
void test_sample_values(S hi_0, S hi_1) {
|
||||
for (S i = 0; i <= 3; i++) {
|
||||
for (S j = 0; j <= 3; j++) {
|
||||
// Some bit combinations near the low and high ends of the range
|
||||
test_xor_bounds(hi_0, hi_1, i, j);
|
||||
test_xor_bounds(hi_0, hi_1, hi_0 - i, hi_1 - j);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <class S>
|
||||
void test_in_ranges(S lo, S hi){
|
||||
ASSERT_GE(lo, 0);
|
||||
ASSERT_LE(lo, hi);
|
||||
|
||||
for (S hi_0 = lo; hi_0 <= hi; hi_0++) {
|
||||
for (S hi_1 = hi_0; hi_1 <=hi; hi_1++) {
|
||||
test_sample_values(hi_0, hi_1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <class S>
|
||||
void test_exhaustive(S limit) {
|
||||
for (S hi_0 = 0; hi_0 <= limit; hi_0++) {
|
||||
for (S hi_1 = hi_0; hi_1 <= limit; hi_1++) {
|
||||
for (S val_0 = 0; val_0 <= hi_0; val_0++) {
|
||||
for (S val_1 = val_0; val_1 <= hi_1; val_1++) {
|
||||
test_xor_bounds(hi_0, hi_1, val_0, val_1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <class S>
|
||||
void exec_tests() {
|
||||
S top_bit = max_power_of_2<S>();
|
||||
S prev_bit = top_bit >> 1;
|
||||
|
||||
test_exhaustive<S>(15);
|
||||
|
||||
test_in_ranges<S>(top_bit - 1, top_bit);
|
||||
test_in_ranges<S>(prev_bit - 1, prev_bit);
|
||||
}
|
||||
|
||||
TEST_VM(opto, xor_max) {
|
||||
exec_tests<jint>();
|
||||
exec_tests<jlong>();
|
||||
}
|
||||
Loading…
x
Reference in New Issue
Block a user