8346280: C2: implement late barrier elision for G1

Reviewed-by: tschatzl, aboldtch, mdoerr
This commit is contained in:
Roberto Castañeda Lozano 2025-02-18 10:23:35 +00:00
parent d7baae3ee9
commit 8193e0d53a
9 changed files with 693 additions and 288 deletions

View File

@ -529,8 +529,65 @@ int G1BarrierSetC2::get_store_barrier(C2Access& access) const {
return barriers;
}
void G1BarrierSetC2::elide_dominated_barrier(MachNode* mach) const {
uint8_t barrier_data = mach->barrier_data();
barrier_data &= ~G1C2BarrierPre;
if (CardTableBarrierSetC2::use_ReduceInitialCardMarks()) {
barrier_data &= ~G1C2BarrierPost;
barrier_data &= ~G1C2BarrierPostNotNull;
}
mach->set_barrier_data(barrier_data);
}
void G1BarrierSetC2::analyze_dominating_barriers() const {
ResourceMark rm;
PhaseCFG* const cfg = Compile::current()->cfg();
// Find allocations and memory accesses (stores and atomic operations), and
// track them in lists.
Node_List accesses;
Node_List allocations;
for (uint i = 0; i < cfg->number_of_blocks(); ++i) {
const Block* const block = cfg->get_block(i);
for (uint j = 0; j < block->number_of_nodes(); ++j) {
Node* const node = block->get_node(j);
if (node->is_Phi()) {
if (BarrierSetC2::is_allocation(node)) {
allocations.push(node);
}
continue;
} else if (!node->is_Mach()) {
continue;
}
MachNode* const mach = node->as_Mach();
switch (mach->ideal_Opcode()) {
case Op_StoreP:
case Op_StoreN:
case Op_CompareAndExchangeP:
case Op_CompareAndSwapP:
case Op_GetAndSetP:
case Op_CompareAndExchangeN:
case Op_CompareAndSwapN:
case Op_GetAndSetN:
if (mach->barrier_data() != 0) {
accesses.push(mach);
}
break;
default:
break;
}
}
}
// Find dominating allocations for each memory access (store or atomic
// operation) and elide barriers if there is no safepoint poll in between.
elide_dominated_barriers(accesses, allocations);
}
void G1BarrierSetC2::late_barrier_analysis() const {
compute_liveness_at_stubs();
analyze_dominating_barriers();
}
void G1BarrierSetC2::emit_stubs(CodeBuffer& cb) const {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -86,6 +86,9 @@ public:
};
class G1BarrierSetC2: public CardTableBarrierSetC2 {
private:
void analyze_dominating_barriers() const;
protected:
bool g1_can_remove_pre_barrier(GraphKit* kit,
PhaseValues* phase,
@ -117,6 +120,7 @@ public:
ArrayCopyNode* ac) const;
virtual void* create_barrier_state(Arena* comp_arena) const;
virtual void emit_stubs(CodeBuffer& cb) const;
virtual void elide_dominated_barrier(MachNode* mach) const;
virtual void late_barrier_analysis() const;
#ifndef PRODUCT

View File

@ -896,6 +896,262 @@ void BarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac
#undef XTOP
static bool block_has_safepoint(const Block* block, uint from, uint to) {
for (uint i = from; i < to; i++) {
if (block->get_node(i)->is_MachSafePoint()) {
// Safepoint found
return true;
}
}
// Safepoint not found
return false;
}
static bool block_has_safepoint(const Block* block) {
return block_has_safepoint(block, 0, block->number_of_nodes());
}
static uint block_index(const Block* block, const Node* node) {
for (uint j = 0; j < block->number_of_nodes(); ++j) {
if (block->get_node(j) == node) {
return j;
}
}
ShouldNotReachHere();
return 0;
}
// Look through various node aliases
static const Node* look_through_node(const Node* node) {
while (node != nullptr) {
const Node* new_node = node;
if (node->is_Mach()) {
const MachNode* const node_mach = node->as_Mach();
if (node_mach->ideal_Opcode() == Op_CheckCastPP) {
new_node = node->in(1);
}
if (node_mach->is_SpillCopy()) {
new_node = node->in(1);
}
}
if (new_node == node || new_node == nullptr) {
break;
} else {
node = new_node;
}
}
return node;
}
// Whether the given offset is undefined.
static bool is_undefined(intptr_t offset) {
return offset == Type::OffsetTop;
}
// Whether the given offset is unknown.
static bool is_unknown(intptr_t offset) {
return offset == Type::OffsetBot;
}
// Whether the given offset is concrete (defined and compile-time known).
static bool is_concrete(intptr_t offset) {
return !is_undefined(offset) && !is_unknown(offset);
}
// Compute base + offset components of the memory address accessed by mach.
// Return a node representing the base address, or null if the base cannot be
// found or the offset is undefined or a concrete negative value. If a non-null
// base is returned, the offset is a concrete, nonnegative value or unknown.
static const Node* get_base_and_offset(const MachNode* mach, intptr_t& offset) {
const TypePtr* adr_type = nullptr;
offset = 0;
const Node* base = mach->get_base_and_disp(offset, adr_type);
if (base == nullptr || base == NodeSentinel) {
return nullptr;
}
if (offset == 0 && base->is_Mach() && base->as_Mach()->ideal_Opcode() == Op_AddP) {
// The memory address is computed by 'base' and fed to 'mach' via an
// indirect memory operand (indicated by offset == 0). The ultimate base and
// offset can be fetched directly from the inputs and Ideal type of 'base'.
const TypeOopPtr* oopptr = base->bottom_type()->isa_oopptr();
if (oopptr == nullptr) return nullptr;
offset = oopptr->offset();
// Even if 'base' is not an Ideal AddP node anymore, Matcher::ReduceInst()
// guarantees that the base address is still available at the same slot.
base = base->in(AddPNode::Base);
assert(base != nullptr, "");
}
if (is_undefined(offset) || (is_concrete(offset) && offset < 0)) {
return nullptr;
}
return look_through_node(base);
}
// Whether a phi node corresponds to an array allocation.
// This test is incomplete: in some edge cases, it might return false even
// though the node does correspond to an array allocation.
static bool is_array_allocation(const Node* phi) {
precond(phi->is_Phi());
// Check whether phi has a successor cast (CheckCastPP) to Java array pointer,
// possibly below spill copies and other cast nodes. Limit the exploration to
// a single path from the phi node consisting of these node types.
const Node* current = phi;
while (true) {
const Node* next = nullptr;
for (DUIterator_Fast imax, i = current->fast_outs(imax); i < imax; i++) {
if (!current->fast_out(i)->isa_Mach()) {
continue;
}
const MachNode* succ = current->fast_out(i)->as_Mach();
if (succ->ideal_Opcode() == Op_CheckCastPP) {
if (succ->get_ptr_type()->isa_aryptr()) {
// Cast to Java array pointer: phi corresponds to an array allocation.
return true;
}
// Other cast: record as candidate for further exploration.
next = succ;
} else if (succ->is_SpillCopy() && next == nullptr) {
// Spill copy, and no better candidate found: record as candidate.
next = succ;
}
}
if (next == nullptr) {
// No evidence found that phi corresponds to an array allocation, and no
// candidates available to continue exploring.
return false;
}
// Continue exploring from the best candidate found.
current = next;
}
ShouldNotReachHere();
}
bool BarrierSetC2::is_allocation(const Node* node) {
assert(node->is_Phi(), "expected phi node");
if (node->req() != 3) {
return false;
}
const Node* const fast_node = node->in(2);
if (!fast_node->is_Mach()) {
return false;
}
const MachNode* const fast_mach = fast_node->as_Mach();
if (fast_mach->ideal_Opcode() != Op_LoadP) {
return false;
}
intptr_t offset;
const Node* const base = get_base_and_offset(fast_mach, offset);
if (base == nullptr || !base->is_Mach() || !is_concrete(offset)) {
return false;
}
const MachNode* const base_mach = base->as_Mach();
if (base_mach->ideal_Opcode() != Op_ThreadLocal) {
return false;
}
return offset == in_bytes(Thread::tlab_top_offset());
}
void BarrierSetC2::elide_dominated_barriers(Node_List& accesses, Node_List& access_dominators) const {
Compile* const C = Compile::current();
PhaseCFG* const cfg = C->cfg();
for (uint i = 0; i < accesses.size(); i++) {
MachNode* const access = accesses.at(i)->as_Mach();
intptr_t access_offset;
const Node* const access_obj = get_base_and_offset(access, access_offset);
Block* const access_block = cfg->get_block_for_node(access);
const uint access_index = block_index(access_block, access);
if (access_obj == nullptr) {
// No information available
continue;
}
for (uint j = 0; j < access_dominators.size(); j++) {
const Node* const mem = access_dominators.at(j);
if (mem->is_Phi()) {
assert(is_allocation(mem), "expected allocation phi node");
if (mem != access_obj) {
continue;
}
if (is_unknown(access_offset) && !is_array_allocation(mem)) {
// The accessed address has an unknown offset, but the allocated
// object cannot be determined to be an array. Avoid eliding in this
// case, to be on the safe side.
continue;
}
assert((is_concrete(access_offset) && access_offset >= 0) || (is_unknown(access_offset) && is_array_allocation(mem)),
"candidate allocation-dominated access offsets must be either concrete and nonnegative, or unknown (for array allocations only)");
} else {
// Access node
const MachNode* const mem_mach = mem->as_Mach();
intptr_t mem_offset;
const Node* const mem_obj = get_base_and_offset(mem_mach, mem_offset);
if (mem_obj == nullptr ||
!is_concrete(access_offset) ||
!is_concrete(mem_offset)) {
// No information available
continue;
}
if (mem_obj != access_obj || mem_offset != access_offset) {
// Not the same addresses, not a candidate
continue;
}
assert(is_concrete(access_offset) && access_offset >= 0,
"candidate non-allocation-dominated access offsets must be concrete and nonnegative");
}
Block* mem_block = cfg->get_block_for_node(mem);
const uint mem_index = block_index(mem_block, mem);
if (access_block == mem_block) {
// Earlier accesses in the same block
if (mem_index < access_index && !block_has_safepoint(mem_block, mem_index + 1, access_index)) {
elide_dominated_barrier(access);
}
} else if (mem_block->dominates(access_block)) {
// Dominating block? Look around for safepoints
ResourceMark rm;
Block_List stack;
VectorSet visited;
stack.push(access_block);
bool safepoint_found = block_has_safepoint(access_block);
while (!safepoint_found && stack.size() > 0) {
const Block* const block = stack.pop();
if (visited.test_set(block->_pre_order)) {
continue;
}
if (block_has_safepoint(block)) {
safepoint_found = true;
break;
}
if (block == mem_block) {
continue;
}
// Push predecessor blocks
for (uint p = 1; p < block->num_preds(); ++p) {
Block* const pred = cfg->get_block_for_node(block->pred(p));
stack.push(pred);
}
}
if (!safepoint_found) {
elide_dominated_barrier(access);
}
}
}
}
}
void BarrierSetC2::compute_liveness_at_stubs() const {
ResourceMark rm;
Compile* const C = Compile::current();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -364,6 +364,14 @@ public:
virtual bool matcher_find_shared_post_visit(Matcher* matcher, Node* n, uint opcode) const { return false; };
virtual bool matcher_is_store_load_barrier(Node* x, uint xop) const { return false; }
// Whether the given phi node joins OOPs from fast and slow allocation paths.
static bool is_allocation(const Node* node);
// Elide GC barriers from a Mach node according to elide_dominated_barriers().
virtual void elide_dominated_barrier(MachNode* mach) const { }
// Elide GC barriers from instructions in 'accesses' if they are dominated by
// instructions in 'access_dominators' (according to elide_mach_barrier()) and
// there is no safepoint poll in between.
void elide_dominated_barriers(Node_List& accesses, Node_List& access_dominators) const;
virtual void late_barrier_analysis() const { }
virtual void compute_liveness_at_stubs() const;
virtual int estimate_stub_size() const { return 0; }

View File

@ -120,7 +120,7 @@ void CardTableBarrierSetC2::post_barrier(GraphKit* kit,
kit->final_sync(ideal);
}
bool CardTableBarrierSetC2::use_ReduceInitialCardMarks() const {
bool CardTableBarrierSetC2::use_ReduceInitialCardMarks() {
return ReduceInitialCardMarks;
}

View File

@ -41,7 +41,7 @@ public:
virtual void eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const;
virtual bool array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, bool is_clone_instance, ArrayCopyPhase phase) const;
bool use_ReduceInitialCardMarks() const;
static bool use_ReduceInitialCardMarks();
};
#endif // SHARE_GC_SHARED_C2_CARDTABLEBARRIERSETC2_HPP

View File

@ -28,7 +28,6 @@
#include "gc/z/zBarrierSetAssembler.hpp"
#include "gc/z/zBarrierSetRuntime.hpp"
#include "opto/arraycopynode.hpp"
#include "opto/addnode.hpp"
#include "opto/block.hpp"
#include "opto/compile.hpp"
#include "opto/graphKit.hpp"
@ -38,7 +37,6 @@
#include "opto/node.hpp"
#include "opto/output.hpp"
#include "opto/regalloc.hpp"
#include "opto/rootnode.hpp"
#include "opto/runtime.hpp"
#include "opto/type.hpp"
#include "utilities/debug.hpp"
@ -475,269 +473,10 @@ void ZBarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* a
#undef XTOP
// == Dominating barrier elision ==
static bool block_has_safepoint(const Block* block, uint from, uint to) {
for (uint i = from; i < to; i++) {
if (block->get_node(i)->is_MachSafePoint()) {
// Safepoint found
return true;
}
}
// Safepoint not found
return false;
}
static bool block_has_safepoint(const Block* block) {
return block_has_safepoint(block, 0, block->number_of_nodes());
}
static uint block_index(const Block* block, const Node* node) {
for (uint j = 0; j < block->number_of_nodes(); ++j) {
if (block->get_node(j) == node) {
return j;
}
}
ShouldNotReachHere();
return 0;
}
// Look through various node aliases
static const Node* look_through_node(const Node* node) {
while (node != nullptr) {
const Node* new_node = node;
if (node->is_Mach()) {
const MachNode* const node_mach = node->as_Mach();
if (node_mach->ideal_Opcode() == Op_CheckCastPP) {
new_node = node->in(1);
}
if (node_mach->is_SpillCopy()) {
new_node = node->in(1);
}
}
if (new_node == node || new_node == nullptr) {
break;
} else {
node = new_node;
}
}
return node;
}
// Whether the given offset is undefined.
static bool is_undefined(intptr_t offset) {
return offset == Type::OffsetTop;
}
// Whether the given offset is unknown.
static bool is_unknown(intptr_t offset) {
return offset == Type::OffsetBot;
}
// Whether the given offset is concrete (defined and compile-time known).
static bool is_concrete(intptr_t offset) {
return !is_undefined(offset) && !is_unknown(offset);
}
// Compute base + offset components of the memory address accessed by mach.
// Return a node representing the base address, or null if the base cannot be
// found or the offset is undefined or a concrete negative value. If a non-null
// base is returned, the offset is a concrete, nonnegative value or unknown.
static const Node* get_base_and_offset(const MachNode* mach, intptr_t& offset) {
const TypePtr* adr_type = nullptr;
offset = 0;
const Node* base = mach->get_base_and_disp(offset, adr_type);
if (base == nullptr || base == NodeSentinel) {
return nullptr;
}
if (offset == 0 && base->is_Mach() && base->as_Mach()->ideal_Opcode() == Op_AddP) {
// The memory address is computed by 'base' and fed to 'mach' via an
// indirect memory operand (indicated by offset == 0). The ultimate base and
// offset can be fetched directly from the inputs and Ideal type of 'base'.
const TypeOopPtr* oopptr = base->bottom_type()->isa_oopptr();
if (oopptr == nullptr) return nullptr;
offset = oopptr->offset();
// Even if 'base' is not an Ideal AddP node anymore, Matcher::ReduceInst()
// guarantees that the base address is still available at the same slot.
base = base->in(AddPNode::Base);
assert(base != nullptr, "");
}
if (is_undefined(offset) || (is_concrete(offset) && offset < 0)) {
return nullptr;
}
return look_through_node(base);
}
// Whether a phi node corresponds to an array allocation.
// This test is incomplete: in some edge cases, it might return false even
// though the node does correspond to an array allocation.
static bool is_array_allocation(const Node* phi) {
precond(phi->is_Phi());
// Check whether phi has a successor cast (CheckCastPP) to Java array pointer,
// possibly below spill copies and other cast nodes. Limit the exploration to
// a single path from the phi node consisting of these node types.
const Node* current = phi;
while (true) {
const Node* next = nullptr;
for (DUIterator_Fast imax, i = current->fast_outs(imax); i < imax; i++) {
if (!current->fast_out(i)->isa_Mach()) {
continue;
}
const MachNode* succ = current->fast_out(i)->as_Mach();
if (succ->ideal_Opcode() == Op_CheckCastPP) {
if (succ->get_ptr_type()->isa_aryptr()) {
// Cast to Java array pointer: phi corresponds to an array allocation.
return true;
}
// Other cast: record as candidate for further exploration.
next = succ;
} else if (succ->is_SpillCopy() && next == nullptr) {
// Spill copy, and no better candidate found: record as candidate.
next = succ;
}
}
if (next == nullptr) {
// No evidence found that phi corresponds to an array allocation, and no
// candidates available to continue exploring.
return false;
}
// Continue exploring from the best candidate found.
current = next;
}
ShouldNotReachHere();
}
// Match the phi node that connects a TLAB allocation fast path with its slowpath
static bool is_allocation(const Node* node) {
if (node->req() != 3) {
return false;
}
const Node* const fast_node = node->in(2);
if (!fast_node->is_Mach()) {
return false;
}
const MachNode* const fast_mach = fast_node->as_Mach();
if (fast_mach->ideal_Opcode() != Op_LoadP) {
return false;
}
const TypePtr* const adr_type = nullptr;
intptr_t offset;
const Node* const base = get_base_and_offset(fast_mach, offset);
if (base == nullptr || !base->is_Mach() || !is_concrete(offset)) {
return false;
}
const MachNode* const base_mach = base->as_Mach();
if (base_mach->ideal_Opcode() != Op_ThreadLocal) {
return false;
}
return offset == in_bytes(Thread::tlab_top_offset());
}
static void elide_mach_barrier(MachNode* mach) {
void ZBarrierSetC2::elide_dominated_barrier(MachNode* mach) const {
mach->set_barrier_data(ZBarrierElided);
}
void ZBarrierSetC2::analyze_dominating_barriers_impl(Node_List& accesses, Node_List& access_dominators) const {
Compile* const C = Compile::current();
PhaseCFG* const cfg = C->cfg();
for (uint i = 0; i < accesses.size(); i++) {
MachNode* const access = accesses.at(i)->as_Mach();
intptr_t access_offset;
const Node* const access_obj = get_base_and_offset(access, access_offset);
Block* const access_block = cfg->get_block_for_node(access);
const uint access_index = block_index(access_block, access);
if (access_obj == nullptr) {
// No information available
continue;
}
for (uint j = 0; j < access_dominators.size(); j++) {
const Node* const mem = access_dominators.at(j);
if (mem->is_Phi()) {
// Allocation node
if (mem != access_obj) {
continue;
}
if (is_unknown(access_offset) && !is_array_allocation(mem)) {
// The accessed address has an unknown offset, but the allocated
// object cannot be determined to be an array. Avoid eliding in this
// case, to be on the safe side.
continue;
}
assert((is_concrete(access_offset) && access_offset >= 0) || (is_unknown(access_offset) && is_array_allocation(mem)),
"candidate allocation-dominated access offsets must be either concrete and nonnegative, or unknown (for array allocations only)");
} else {
// Access node
const MachNode* const mem_mach = mem->as_Mach();
intptr_t mem_offset;
const Node* const mem_obj = get_base_and_offset(mem_mach, mem_offset);
if (mem_obj == nullptr ||
!is_concrete(access_offset) ||
!is_concrete(mem_offset)) {
// No information available
continue;
}
if (mem_obj != access_obj || mem_offset != access_offset) {
// Not the same addresses, not a candidate
continue;
}
assert(is_concrete(access_offset) && access_offset >= 0,
"candidate non-allocation-dominated access offsets must be concrete and nonnegative");
}
Block* mem_block = cfg->get_block_for_node(mem);
const uint mem_index = block_index(mem_block, mem);
if (access_block == mem_block) {
// Earlier accesses in the same block
if (mem_index < access_index && !block_has_safepoint(mem_block, mem_index + 1, access_index)) {
elide_mach_barrier(access);
}
} else if (mem_block->dominates(access_block)) {
// Dominating block? Look around for safepoints
ResourceMark rm;
Block_List stack;
VectorSet visited;
stack.push(access_block);
bool safepoint_found = block_has_safepoint(access_block);
while (!safepoint_found && stack.size() > 0) {
const Block* const block = stack.pop();
if (visited.test_set(block->_pre_order)) {
continue;
}
if (block_has_safepoint(block)) {
safepoint_found = true;
break;
}
if (block == mem_block) {
continue;
}
// Push predecessor blocks
for (uint p = 1; p < block->num_preds(); ++p) {
Block* const pred = cfg->get_block_for_node(block->pred(p));
stack.push(pred);
}
}
if (!safepoint_found) {
elide_mach_barrier(access);
}
}
}
}
}
void ZBarrierSetC2::analyze_dominating_barriers() const {
ResourceMark rm;
Compile* const C = Compile::current();
@ -807,9 +546,9 @@ void ZBarrierSetC2::analyze_dominating_barriers() const {
}
// Step 2 - Find dominating accesses or allocations for each access
analyze_dominating_barriers_impl(loads, load_dominators);
analyze_dominating_barriers_impl(stores, store_dominators);
analyze_dominating_barriers_impl(atomics, atomic_dominators);
elide_dominated_barriers(loads, load_dominators);
elide_dominated_barriers(stores, store_dominators);
elide_dominated_barriers(atomics, atomic_dominators);
}
void ZBarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -99,7 +99,6 @@ public:
class ZBarrierSetC2 : public BarrierSetC2 {
private:
void analyze_dominating_barriers_impl(Node_List& accesses, Node_List& access_dominators) const;
void analyze_dominating_barriers() const;
protected:
@ -128,6 +127,7 @@ public:
virtual void clone_at_expansion(PhaseMacroExpand* phase,
ArrayCopyNode* ac) const;
virtual void elide_dominated_barrier(MachNode* mach) const;
virtual void late_barrier_analysis() const;
virtual int estimate_stub_size() const;
virtual void emit_stubs(CodeBuffer& cb) const;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -47,6 +47,7 @@ public class TestG1BarrierGeneration {
static final String POST_ONLY_NOT_NULL = "post notnull";
static final String PRE_AND_POST = "pre post";
static final String PRE_AND_POST_NOT_NULL = "pre post notnull";
static final String ANY = ".*";
static class Outer {
Object f;
@ -90,6 +91,9 @@ public class TestG1BarrierGeneration {
}
}
@DontInline
static void nonInlinedMethod() {}
public static void main(String[] args) {
TestFramework framework = new TestFramework();
Scenario[] scenarios = new Scenario[2*2];
@ -194,10 +198,11 @@ public class TestG1BarrierGeneration {
counts = {IRNode.G1_ENCODE_P_AND_STORE_N_WITH_BARRIER_FLAG, POST_ONLY, "1"},
phase = CompilePhase.FINAL_CODE)
@IR(applyIfAnd = {"UseCompressedOops", "false", "ReduceInitialCardMarks", "true"},
failOn = {IRNode.G1_STORE_P},
failOn = {IRNode.G1_STORE_P_WITH_BARRIER_FLAG, ANY},
phase = CompilePhase.FINAL_CODE)
@IR(applyIfAnd = {"UseCompressedOops", "true", "ReduceInitialCardMarks", "true"},
failOn = {IRNode.G1_STORE_N, IRNode.G1_ENCODE_P_AND_STORE_N},
failOn = {IRNode.G1_STORE_N_WITH_BARRIER_FLAG, ANY,
IRNode.G1_ENCODE_P_AND_STORE_N_WITH_BARRIER_FLAG, ANY},
phase = CompilePhase.FINAL_CODE)
public static Outer testStoreOnNewObject(Object o1) {
Outer o = new Outer();
@ -222,10 +227,11 @@ public class TestG1BarrierGeneration {
counts = {IRNode.G1_ENCODE_P_AND_STORE_N_WITH_BARRIER_FLAG, POST_ONLY_NOT_NULL, "1"},
phase = CompilePhase.FINAL_CODE)
@IR(applyIfAnd = {"UseCompressedOops", "false", "ReduceInitialCardMarks", "true"},
failOn = {IRNode.G1_STORE_P},
failOn = {IRNode.G1_STORE_P_WITH_BARRIER_FLAG, ANY},
phase = CompilePhase.FINAL_CODE)
@IR(applyIfAnd = {"UseCompressedOops", "true", "ReduceInitialCardMarks", "true"},
failOn = {IRNode.G1_STORE_N, IRNode.G1_ENCODE_P_AND_STORE_N},
failOn = {IRNode.G1_STORE_N_WITH_BARRIER_FLAG, ANY,
IRNode.G1_ENCODE_P_AND_STORE_N_WITH_BARRIER_FLAG, ANY},
phase = CompilePhase.FINAL_CODE)
public static Outer testStoreNotNullOnNewObject(Object o1) {
if (o1.hashCode() == 42) {
@ -244,10 +250,11 @@ public class TestG1BarrierGeneration {
counts = {IRNode.G1_ENCODE_P_AND_STORE_N_WITH_BARRIER_FLAG, POST_ONLY, "2"},
phase = CompilePhase.FINAL_CODE)
@IR(applyIfAnd = {"UseCompressedOops", "false", "ReduceInitialCardMarks", "true"},
failOn = {IRNode.G1_STORE_P},
failOn = {IRNode.G1_STORE_P_WITH_BARRIER_FLAG, ANY},
phase = CompilePhase.FINAL_CODE)
@IR(applyIfAnd = {"UseCompressedOops", "true", "ReduceInitialCardMarks", "true"},
failOn = {IRNode.G1_STORE_N, IRNode.G1_ENCODE_P_AND_STORE_N},
failOn = {IRNode.G1_STORE_N_WITH_BARRIER_FLAG, ANY,
IRNode.G1_ENCODE_P_AND_STORE_N_WITH_BARRIER_FLAG, ANY},
phase = CompilePhase.FINAL_CODE)
public static Outer testStoreOnNewObjectInTwoPaths(Object o1, boolean c) {
Outer o;
@ -261,6 +268,63 @@ public class TestG1BarrierGeneration {
return o;
}
@Test
@IR(applyIfAnd = {"UseCompressedOops", "false", "ReduceInitialCardMarks", "false"},
counts = {IRNode.G1_STORE_P_WITH_BARRIER_FLAG, POST_ONLY, "1"},
phase = CompilePhase.FINAL_CODE)
@IR(applyIfAnd = {"UseCompressedOops", "true", "ReduceInitialCardMarks", "false"},
counts = {IRNode.G1_ENCODE_P_AND_STORE_N_WITH_BARRIER_FLAG, POST_ONLY, "1"},
phase = CompilePhase.FINAL_CODE)
@IR(applyIfAnd = {"UseCompressedOops", "false", "ReduceInitialCardMarks", "true"},
failOn = {IRNode.G1_STORE_P_WITH_BARRIER_FLAG, ANY},
phase = CompilePhase.FINAL_CODE)
@IR(applyIfAnd = {"UseCompressedOops", "true", "ReduceInitialCardMarks", "true"},
failOn = {IRNode.G1_STORE_N, IRNode.G1_ENCODE_P_AND_STORE_N_WITH_BARRIER_FLAG, ANY},
phase = CompilePhase.FINAL_CODE)
public static Outer testStoreConditionallyOnNewObject(Object o1, boolean c) {
Outer o = new Outer();
if (c) {
o.f = o1;
}
return o;
}
@Test
@IR(applyIfAnd = {"UseCompressedOops", "false", "ReduceInitialCardMarks", "false"},
counts = {IRNode.G1_STORE_P_WITH_BARRIER_FLAG, POST_ONLY, "1"},
phase = CompilePhase.FINAL_CODE)
@IR(applyIfAnd = {"UseCompressedOops", "true", "ReduceInitialCardMarks", "false"},
counts = {IRNode.G1_ENCODE_P_AND_STORE_N_WITH_BARRIER_FLAG, POST_ONLY, "1"},
phase = CompilePhase.FINAL_CODE)
@IR(applyIfAnd = {"UseCompressedOops", "false", "ReduceInitialCardMarks", "true"},
failOn = {IRNode.G1_STORE_P_WITH_BARRIER_FLAG, ANY},
phase = CompilePhase.FINAL_CODE)
@IR(applyIfAnd = {"UseCompressedOops", "true", "ReduceInitialCardMarks", "true"},
failOn = {IRNode.G1_STORE_N, IRNode.G1_ENCODE_P_AND_STORE_N_WITH_BARRIER_FLAG, ANY},
phase = CompilePhase.FINAL_CODE)
public static Outer testStoreOnNewObjectAfterException(Object o1, boolean c) throws Exception {
Outer o = new Outer();
if (c) {
throw new Exception("");
}
o.f = o1;
return o;
}
@Test
@IR(applyIf = {"UseCompressedOops", "false"},
counts = {IRNode.G1_STORE_P_WITH_BARRIER_FLAG, PRE_AND_POST, "1"},
phase = CompilePhase.FINAL_CODE)
@IR(applyIf = {"UseCompressedOops", "true"},
counts = {IRNode.G1_ENCODE_P_AND_STORE_N_WITH_BARRIER_FLAG, PRE_AND_POST, "1"},
phase = CompilePhase.FINAL_CODE)
public static Outer testStoreOnNewObjectAfterCall(Object o1) {
Outer o = new Outer();
nonInlinedMethod();
o.f = o1;
return o;
}
@Run(test = {"testStore",
"testStoreNull",
"testStoreObfuscatedNull",
@ -270,7 +334,10 @@ public class TestG1BarrierGeneration {
"testStoreOnNewObject",
"testStoreNullOnNewObject",
"testStoreNotNullOnNewObject",
"testStoreOnNewObjectInTwoPaths"})
"testStoreOnNewObjectInTwoPaths",
"testStoreConditionallyOnNewObject",
"testStoreOnNewObjectAfterException",
"testStoreOnNewObjectAfterCall"})
public void runStoreTests() {
{
Outer o = new Outer();
@ -328,6 +395,24 @@ public class TestG1BarrierGeneration {
Outer o = testStoreOnNewObjectInTwoPaths(o1, ThreadLocalRandom.current().nextBoolean());
Asserts.assertEquals(o1, o.f);
}
{
Object o1 = new Object();
boolean c = ThreadLocalRandom.current().nextBoolean();
Outer o = testStoreConditionallyOnNewObject(o1, c);
Asserts.assertTrue(o.f == (c ? o1 : null));
}
{
Object o1 = new Object();
boolean c = ThreadLocalRandom.current().nextBoolean();
try {
Outer o = testStoreOnNewObjectAfterException(o1, c);
} catch (Exception e) {}
}
{
Object o1 = new Object();
Outer o = testStoreOnNewObjectAfterCall(o1);
Asserts.assertEquals(o1, o.f);
}
}
@Test
@ -379,25 +464,91 @@ public class TestG1BarrierGeneration {
}
@Test
@IR(applyIfAnd = {"UseCompressedOops", "false", "ReduceInitialCardMarks", "false"},
counts = {IRNode.G1_STORE_P_WITH_BARRIER_FLAG, POST_ONLY, "1"},
phase = CompilePhase.FINAL_CODE)
@IR(applyIfAnd = {"UseCompressedOops", "true", "ReduceInitialCardMarks", "false"},
counts = {IRNode.G1_ENCODE_P_AND_STORE_N_WITH_BARRIER_FLAG, POST_ONLY, "1"},
phase = CompilePhase.FINAL_CODE)
@IR(applyIfAnd = {"UseCompressedOops", "false", "ReduceInitialCardMarks", "true"},
failOn = {IRNode.G1_STORE_P},
failOn = {IRNode.G1_STORE_P_WITH_BARRIER_FLAG, ANY},
phase = CompilePhase.FINAL_CODE)
@IR(applyIfAnd = {"UseCompressedOops", "true", "ReduceInitialCardMarks", "true"},
failOn = {IRNode.G1_STORE_N, IRNode.G1_ENCODE_P_AND_STORE_N},
failOn = {IRNode.G1_STORE_N_WITH_BARRIER_FLAG, ANY,
IRNode.G1_ENCODE_P_AND_STORE_N_WITH_BARRIER_FLAG, ANY},
phase = CompilePhase.FINAL_CODE)
public static Object[] testStoreOnNewArray(Object o1) {
public static Object[] testStoreOnNewArrayAtKnownIndex(Object o1) {
Object[] a = new Object[10];
// The index needs to be concrete for C2 to detect that it is safe to
// remove the pre-barrier.
a[4] = o1;
return a;
}
@Test
@IR(applyIfAnd = {"UseCompressedOops", "false", "ReduceInitialCardMarks", "false"},
counts = {IRNode.G1_STORE_P_WITH_BARRIER_FLAG, POST_ONLY, "1"},
phase = CompilePhase.FINAL_CODE)
@IR(applyIfAnd = {"UseCompressedOops", "true", "ReduceInitialCardMarks", "false"},
counts = {IRNode.G1_ENCODE_P_AND_STORE_N_WITH_BARRIER_FLAG, POST_ONLY, "1"},
phase = CompilePhase.FINAL_CODE)
@IR(applyIfAnd = {"UseCompressedOops", "false", "ReduceInitialCardMarks", "true"},
failOn = {IRNode.G1_STORE_P_WITH_BARRIER_FLAG, ANY},
phase = CompilePhase.FINAL_CODE)
@IR(applyIfAnd = {"UseCompressedOops", "true", "ReduceInitialCardMarks", "true"},
failOn = {IRNode.G1_STORE_N_WITH_BARRIER_FLAG, ANY,
IRNode.G1_ENCODE_P_AND_STORE_N_WITH_BARRIER_FLAG, ANY},
phase = CompilePhase.FINAL_CODE)
public static Object[] testStoreOnNewArrayAtUnknownIndex(Object o1, int index) {
Object[] a = new Object[10];
a[index] = o1;
return a;
}
@Test
@IR(failOn = IRNode.SAFEPOINT)
@IR(applyIfAnd = {"UseCompressedOops", "false", "ReduceInitialCardMarks", "false"},
counts = {IRNode.G1_STORE_P_WITH_BARRIER_FLAG, POST_ONLY, "1"},
phase = CompilePhase.FINAL_CODE)
@IR(applyIfAnd = {"UseCompressedOops", "true", "ReduceInitialCardMarks", "false"},
counts = {IRNode.G1_ENCODE_P_AND_STORE_N_WITH_BARRIER_FLAG, POST_ONLY, "1"},
phase = CompilePhase.FINAL_CODE)
@IR(applyIfAnd = {"UseCompressedOops", "false", "ReduceInitialCardMarks", "true"},
failOn = {IRNode.G1_STORE_P_WITH_BARRIER_FLAG, ANY},
phase = CompilePhase.FINAL_CODE)
@IR(applyIfAnd = {"UseCompressedOops", "true", "ReduceInitialCardMarks", "true"},
failOn = {IRNode.G1_STORE_N, IRNode.G1_ENCODE_P_AND_STORE_N_WITH_BARRIER_FLAG, ANY},
phase = CompilePhase.FINAL_CODE)
public static Object[] testStoreAllOnNewSmallArray(Object o1) {
Object[] a = new Object[64];
for (int i = 0; i < a.length; i++) {
a[i] = o1;
}
return a;
}
@Test
@IR(counts = {IRNode.SAFEPOINT, "1"})
@IR(applyIf = {"UseCompressedOops", "false"},
counts = {IRNode.G1_STORE_P_WITH_BARRIER_FLAG, PRE_AND_POST, "1"},
phase = CompilePhase.FINAL_CODE)
@IR(applyIf = {"UseCompressedOops", "true"},
counts = {IRNode.G1_ENCODE_P_AND_STORE_N_WITH_BARRIER_FLAG, PRE_AND_POST, "1"},
phase = CompilePhase.FINAL_CODE)
public static Object[] testStoreAllOnNewLargeArray(Object o1) {
Object[] a = new Object[1024];
for (int i = 0; i < a.length; i++) {
a[i] = o1;
}
return a;
}
@Run(test = {"testArrayStore",
"testArrayStoreNull",
"testArrayStoreNotNull",
"testArrayStoreTwice",
"testStoreOnNewArray"})
"testStoreOnNewArrayAtKnownIndex",
"testStoreOnNewArrayAtUnknownIndex",
"testStoreAllOnNewSmallArray",
"testStoreAllOnNewLargeArray"})
public void runArrayStoreTests() {
{
Object[] a = new Object[10];
@ -426,9 +577,28 @@ public class TestG1BarrierGeneration {
}
{
Object o1 = new Object();
Object[] a = testStoreOnNewArray(o1);
Object[] a = testStoreOnNewArrayAtKnownIndex(o1);
Asserts.assertEquals(o1, a[4]);
}
{
Object o1 = new Object();
Object[] a = testStoreOnNewArrayAtUnknownIndex(o1, 5);
Asserts.assertEquals(o1, a[5]);
}
{
Object o1 = new Object();
Object[] a = testStoreAllOnNewSmallArray(o1);
for (int i = 0; i < a.length; i++) {
Asserts.assertEquals(o1, a[i]);
}
}
{
Object o1 = new Object();
Object[] a = testStoreAllOnNewLargeArray(o1);
for (int i = 0; i < a.length; i++) {
Asserts.assertEquals(o1, a[i]);
}
}
}
@Test
@ -442,7 +612,9 @@ public class TestG1BarrierGeneration {
@Test
@IR(applyIf = {"ReduceInitialCardMarks", "true"},
failOn = {IRNode.G1_STORE_P, IRNode.G1_STORE_N, IRNode.G1_ENCODE_P_AND_STORE_N},
failOn = {IRNode.G1_STORE_P_WITH_BARRIER_FLAG, ANY,
IRNode.G1_STORE_N_WITH_BARRIER_FLAG, ANY,
IRNode.G1_ENCODE_P_AND_STORE_N_WITH_BARRIER_FLAG, ANY},
phase = CompilePhase.FINAL_CODE)
@IR(applyIfAnd = {"ReduceInitialCardMarks", "false", "UseCompressedOops", "false"},
counts = {IRNode.G1_STORE_P_WITH_BARRIER_FLAG, POST_ONLY, "2"},
@ -565,9 +737,139 @@ public class TestG1BarrierGeneration {
return fVarHandle.getAndSet(o, newVal);
}
// IR checks are disabled for s390 because barriers are not elided (to be investigated).
@Test
@IR(applyIfAnd = {"UseCompressedOops", "false", "ReduceInitialCardMarks", "false"},
applyIfPlatform = {"s390", "false"},
counts = {IRNode.G1_COMPARE_AND_EXCHANGE_P_WITH_BARRIER_FLAG, POST_ONLY, "1"},
phase = CompilePhase.FINAL_CODE)
@IR(applyIfAnd = {"UseCompressedOops", "true", "ReduceInitialCardMarks", "false"},
applyIfPlatform = {"s390", "false"},
counts = {IRNode.G1_COMPARE_AND_EXCHANGE_N_WITH_BARRIER_FLAG, POST_ONLY, "1"},
phase = CompilePhase.FINAL_CODE)
@IR(applyIfAnd = {"UseCompressedOops", "false", "ReduceInitialCardMarks", "true"},
applyIfPlatform = {"s390", "false"},
failOn = {IRNode.G1_COMPARE_AND_EXCHANGE_P_WITH_BARRIER_FLAG, ANY},
phase = CompilePhase.FINAL_CODE)
@IR(applyIfAnd = {"UseCompressedOops", "true", "ReduceInitialCardMarks", "true"},
applyIfPlatform = {"s390", "false"},
failOn = {IRNode.G1_COMPARE_AND_EXCHANGE_N_WITH_BARRIER_FLAG, ANY},
phase = CompilePhase.FINAL_CODE)
static Object testCompareAndExchangeOnNewObject(Object oldVal, Object newVal) {
Outer o = new Outer();
o.f = oldVal;
return fVarHandle.compareAndExchange(o, oldVal, newVal);
}
// IR checks are disabled for s390 when OOPs compression is disabled
// because barriers are not elided in this configuration (to be investigated).
@Test
@IR(applyIfAnd = {"UseCompressedOops", "false", "ReduceInitialCardMarks", "false"},
applyIfPlatform = {"s390", "false"},
counts = {IRNode.G1_COMPARE_AND_SWAP_P_WITH_BARRIER_FLAG, POST_ONLY, "1"},
phase = CompilePhase.FINAL_CODE)
@IR(applyIfAnd = {"UseCompressedOops", "true", "ReduceInitialCardMarks", "false"},
counts = {IRNode.G1_COMPARE_AND_SWAP_N_WITH_BARRIER_FLAG, POST_ONLY, "1"},
phase = CompilePhase.FINAL_CODE)
@IR(applyIfAnd = {"UseCompressedOops", "false", "ReduceInitialCardMarks", "true"},
applyIfPlatform = {"s390", "false"},
failOn = {IRNode.G1_COMPARE_AND_SWAP_P_WITH_BARRIER_FLAG, ANY},
phase = CompilePhase.FINAL_CODE)
@IR(applyIfAnd = {"UseCompressedOops", "true", "ReduceInitialCardMarks", "true"},
failOn = {IRNode.G1_COMPARE_AND_SWAP_N_WITH_BARRIER_FLAG, ANY},
phase = CompilePhase.FINAL_CODE)
static boolean testCompareAndSwapOnNewObject(Object oldVal, Object newVal) {
Outer o = new Outer();
o.f = oldVal;
return fVarHandle.compareAndSet(o, oldVal, newVal);
}
@Test
@IR(applyIfAnd = {"UseCompressedOops", "false", "ReduceInitialCardMarks", "false"},
counts = {IRNode.G1_GET_AND_SET_P_WITH_BARRIER_FLAG, POST_ONLY, "1"},
phase = CompilePhase.FINAL_CODE)
@IR(applyIfAnd = {"UseCompressedOops", "true", "ReduceInitialCardMarks", "false"},
counts = {IRNode.G1_GET_AND_SET_N_WITH_BARRIER_FLAG, POST_ONLY, "1"},
phase = CompilePhase.FINAL_CODE)
@IR(applyIfAnd = {"UseCompressedOops", "false", "ReduceInitialCardMarks", "true"},
failOn = {IRNode.G1_GET_AND_SET_P_WITH_BARRIER_FLAG, ANY},
phase = CompilePhase.FINAL_CODE)
@IR(applyIfAnd = {"UseCompressedOops", "true", "ReduceInitialCardMarks", "true"},
failOn = {IRNode.G1_GET_AND_SET_N_WITH_BARRIER_FLAG, ANY},
phase = CompilePhase.FINAL_CODE)
static Object testGetAndSetOnNewObject(Object oldVal, Object newVal) {
Outer o = new Outer();
o.f = oldVal;
return fVarHandle.getAndSet(o, newVal);
}
@Test
@IR(applyIfAnd = {"UseCompressedOops", "false", "ReduceInitialCardMarks", "false"},
counts = {IRNode.G1_GET_AND_SET_P_WITH_BARRIER_FLAG, POST_ONLY, "1"},
phase = CompilePhase.FINAL_CODE)
@IR(applyIfAnd = {"UseCompressedOops", "true", "ReduceInitialCardMarks", "false"},
counts = {IRNode.G1_GET_AND_SET_N_WITH_BARRIER_FLAG, POST_ONLY, "1"},
phase = CompilePhase.FINAL_CODE)
@IR(applyIfAnd = {"UseCompressedOops", "false", "ReduceInitialCardMarks", "true"},
failOn = {IRNode.G1_GET_AND_SET_P_WITH_BARRIER_FLAG, ANY},
phase = CompilePhase.FINAL_CODE)
@IR(applyIfAnd = {"UseCompressedOops", "true", "ReduceInitialCardMarks", "true"},
failOn = {IRNode.G1_GET_AND_SET_N_WITH_BARRIER_FLAG, ANY},
phase = CompilePhase.FINAL_CODE)
static Object testGetAndSetConditionallyOnNewObject(Object oldVal, Object newVal, boolean c) {
Outer o = new Outer();
o.f = oldVal;
if (c) {
return fVarHandle.getAndSet(o, newVal);
}
return oldVal;
}
@Test
@IR(applyIfAnd = {"UseCompressedOops", "false", "ReduceInitialCardMarks", "false"},
counts = {IRNode.G1_GET_AND_SET_P_WITH_BARRIER_FLAG, POST_ONLY, "1"},
phase = CompilePhase.FINAL_CODE)
@IR(applyIfAnd = {"UseCompressedOops", "true", "ReduceInitialCardMarks", "false"},
counts = {IRNode.G1_GET_AND_SET_N_WITH_BARRIER_FLAG, POST_ONLY, "1"},
phase = CompilePhase.FINAL_CODE)
@IR(applyIfAnd = {"UseCompressedOops", "false", "ReduceInitialCardMarks", "true"},
failOn = {IRNode.G1_GET_AND_SET_P_WITH_BARRIER_FLAG, ANY},
phase = CompilePhase.FINAL_CODE)
@IR(applyIfAnd = {"UseCompressedOops", "true", "ReduceInitialCardMarks", "true"},
failOn = {IRNode.G1_GET_AND_SET_N_WITH_BARRIER_FLAG, ANY},
phase = CompilePhase.FINAL_CODE)
static Object testGetAndSetOnNewObjectAfterException(Object oldVal, Object newVal, boolean c) throws Exception {
Outer o = new Outer();
if (c) {
throw new Exception("");
}
o.f = oldVal;
return fVarHandle.getAndSet(o, newVal);
}
@Test
@IR(applyIf = {"UseCompressedOops", "false"},
counts = {IRNode.G1_GET_AND_SET_P_WITH_BARRIER_FLAG, PRE_AND_POST, "1"},
phase = CompilePhase.FINAL_CODE)
@IR(applyIf = {"UseCompressedOops", "true"},
counts = {IRNode.G1_GET_AND_SET_N_WITH_BARRIER_FLAG, PRE_AND_POST, "1"},
phase = CompilePhase.FINAL_CODE)
static Object testGetAndSetOnNewObjectAfterCall(Object oldVal, Object newVal) {
Outer o = new Outer();
nonInlinedMethod();
o.f = oldVal;
return fVarHandle.getAndSet(o, newVal);
}
@Run(test = {"testCompareAndExchange",
"testCompareAndSwap",
"testGetAndSet"})
"testGetAndSet",
"testCompareAndExchangeOnNewObject",
"testCompareAndSwapOnNewObject",
"testGetAndSetOnNewObject",
"testGetAndSetConditionallyOnNewObject",
"testGetAndSetOnNewObjectAfterException",
"testGetAndSetOnNewObjectAfterCall"})
public void runAtomicTests() {
{
Outer o = new Outer();
@ -596,6 +898,45 @@ public class TestG1BarrierGeneration {
Asserts.assertEquals(oldVal, oldVal2);
Asserts.assertEquals(o.f, newVal);
}
{
Object oldVal = new Object();
Object newVal = new Object();
Object oldVal2 = testCompareAndExchangeOnNewObject(oldVal, newVal);
Asserts.assertEquals(oldVal, oldVal2);
}
{
Object oldVal = new Object();
Object newVal = new Object();
boolean b = testCompareAndSwapOnNewObject(oldVal, newVal);
Asserts.assertTrue(b);
}
{
Object oldVal = new Object();
Object newVal = new Object();
Object oldVal2 = testGetAndSetOnNewObject(oldVal, newVal);
Asserts.assertEquals(oldVal, oldVal2);
}
{
Object oldVal = new Object();
Object newVal = new Object();
boolean c = ThreadLocalRandom.current().nextBoolean();
Object oldVal2 = testGetAndSetConditionallyOnNewObject(oldVal, newVal, c);
Asserts.assertEquals(oldVal, oldVal2);
}
{
Object oldVal = new Object();
Object newVal = new Object();
boolean c = ThreadLocalRandom.current().nextBoolean();
try {
Object oldVal2 = testGetAndSetOnNewObjectAfterException(oldVal, newVal, c);
} catch (Exception e) {}
}
{
Object oldVal = new Object();
Object newVal = new Object();
Object oldVal2 = testGetAndSetOnNewObjectAfterCall(oldVal, newVal);
Asserts.assertEquals(oldVal, oldVal2);
}
}
@Test