8350565: NMT: remaining memory flag/type to be replaced with memory tag

Reviewed-by: gziemski, jsjolen
This commit is contained in:
Afshin Zafari 2025-03-07 09:06:12 +00:00
parent 7314efc948
commit 4066f33a0b
17 changed files with 77 additions and 77 deletions

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2023 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -145,7 +145,7 @@ class MallocMemory {
class MallocMemorySummary;
// A snapshot of malloc'd memory, includes malloc memory
// usage by types and memory used by tracking itself.
// usage by tags and memory used by tracking itself.
class MallocMemorySnapshot {
friend class MallocMemorySummary;
@ -155,12 +155,12 @@ class MallocMemorySnapshot {
public:
inline MallocMemory* by_type(MemTag mem_tag) {
inline MallocMemory* by_tag(MemTag mem_tag) {
int index = NMTUtil::tag_to_index(mem_tag);
return &_malloc[index];
}
inline const MallocMemory* by_type(MemTag mem_tag) const {
inline const MallocMemory* by_tag(MemTag mem_tag) const {
int index = NMTUtil::tag_to_index(mem_tag);
return &_malloc[index];
}
@ -220,25 +220,25 @@ class MallocMemorySummary : AllStatic {
static void initialize();
static inline void record_malloc(size_t size, MemTag mem_tag) {
as_snapshot()->by_type(mem_tag)->record_malloc(size);
as_snapshot()->by_tag(mem_tag)->record_malloc(size);
as_snapshot()->_all_mallocs.allocate(size);
}
static inline void record_free(size_t size, MemTag mem_tag) {
as_snapshot()->by_type(mem_tag)->record_free(size);
as_snapshot()->by_tag(mem_tag)->record_free(size);
as_snapshot()->_all_mallocs.deallocate(size);
}
static inline void record_new_arena(MemTag mem_tag) {
as_snapshot()->by_type(mem_tag)->record_new_arena();
as_snapshot()->by_tag(mem_tag)->record_new_arena();
}
static inline void record_arena_free(MemTag mem_tag) {
as_snapshot()->by_type(mem_tag)->record_arena_free();
as_snapshot()->by_tag(mem_tag)->record_arena_free();
}
static inline void record_arena_size_change(ssize_t size, MemTag mem_tag) {
as_snapshot()->by_type(mem_tag)->record_arena_size_change(size);
as_snapshot()->by_tag(mem_tag)->record_arena_size_change(size);
}
static void snapshot(MallocMemorySnapshot* s) {

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2023 SAP SE. All rights reserved.
* Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -52,7 +52,7 @@ inline bool MallocMemorySummary::check_exceeds_limit(size_t s, MemTag mem_tag) {
// Category Limit?
l = MallocLimitHandler::category_limit(mem_tag);
if (l->sz > 0) {
const MallocMemory* mm = as_snapshot()->by_type(mem_tag);
const MallocMemory* mm = as_snapshot()->by_tag(mem_tag);
size_t so_far = mm->malloc_size() + mm->arena_size();
if ((so_far + s) > l->sz) {
return category_limit_reached(mem_tag, s, so_far, l);

View File

@ -61,7 +61,7 @@ int compare_malloc_site(const MallocSite& s1, const MallocSite& s2) {
}
// Sort into allocation site addresses and memory tag order for baseline comparison
int compare_malloc_site_and_type(const MallocSite& s1, const MallocSite& s2) {
int compare_malloc_site_and_tag(const MallocSite& s1, const MallocSite& s2) {
int res = compare_malloc_site(s1, s2);
if (res == 0) {
res = (int)(NMTUtil::tag_to_index(s1.mem_tag()) - NMTUtil::tag_to_index(s2.mem_tag()));
@ -231,8 +231,8 @@ MallocSiteIterator MemBaseline::malloc_sites(SortingOrder order) {
case by_site:
malloc_sites_to_allocation_site_order();
break;
case by_site_and_type:
malloc_sites_to_allocation_site_and_type_order();
case by_site_and_tag:
malloc_sites_to_allocation_site_and_tag_order();
break;
case by_address:
default:
@ -272,7 +272,7 @@ void MemBaseline::malloc_sites_to_size_order() {
}
void MemBaseline::malloc_sites_to_allocation_site_order() {
if (_malloc_sites_order != by_site && _malloc_sites_order != by_site_and_type) {
if (_malloc_sites_order != by_site && _malloc_sites_order != by_site_and_tag) {
SortedLinkedList<MallocSite, compare_malloc_site> tmp;
// Add malloc sites to sorted linked list to sort into site (address) order
tmp.move(&_malloc_sites);
@ -282,14 +282,14 @@ void MemBaseline::malloc_sites_to_allocation_site_order() {
}
}
void MemBaseline::malloc_sites_to_allocation_site_and_type_order() {
if (_malloc_sites_order != by_site_and_type) {
SortedLinkedList<MallocSite, compare_malloc_site_and_type> tmp;
void MemBaseline::malloc_sites_to_allocation_site_and_tag_order() {
if (_malloc_sites_order != by_site_and_tag) {
SortedLinkedList<MallocSite, compare_malloc_site_and_tag> tmp;
// Add malloc sites to sorted linked list to sort into site (address) order
tmp.move(&_malloc_sites);
_malloc_sites.set_head(tmp.head());
tmp.set_head(nullptr);
_malloc_sites_order = by_site_and_type;
_malloc_sites_order = by_site_and_tag;
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -53,7 +53,7 @@ class MemBaseline {
by_address, // by memory address
by_size, // by memory size
by_site, // by call site where the memory is allocated from
by_site_and_type // by call site and memory tag
by_site_and_tag // by call site and memory tag
};
private:
@ -146,12 +146,12 @@ class MemBaseline {
MallocMemory* malloc_memory(MemTag mem_tag) {
assert(baseline_type() != Not_baselined, "Not yet baselined");
return _malloc_memory_snapshot.by_type(mem_tag);
return _malloc_memory_snapshot.by_tag(mem_tag);
}
VirtualMemory* virtual_memory(MemTag mem_tag) {
assert(baseline_type() != Not_baselined, "Not yet baselined");
return _virtual_memory_snapshot.by_type(mem_tag);
return _virtual_memory_snapshot.by_tag(mem_tag);
}
@ -204,7 +204,7 @@ class MemBaseline {
// Sort allocation sites in call site address order
void malloc_sites_to_allocation_site_order();
// Sort allocation sites in call site address and memory tag order
void malloc_sites_to_allocation_site_and_type_order();
void malloc_sites_to_allocation_site_and_tag_order();
// Sort allocation sites in reserved size order
void virtual_memory_sites_to_size_order();

View File

@ -79,7 +79,7 @@ void MemReporterBase::print_malloc(const MemoryCounter* c, MemTag mem_tag) const
const size_t count = c->count();
if (mem_tag != mtNone) {
out->print("(%s%zu%s type=%s", alloc_type,
out->print("(%s%zu%s tag=%s", alloc_type,
amount_in_current_scale(amount), scale, NMTUtil::tag_to_name(mem_tag));
} else {
out->print("(%s%zu%s", alloc_type,
@ -181,14 +181,14 @@ void MemSummaryReporter::report() {
MemTag mem_tag = NMTUtil::index_to_tag(index);
// thread stack is reported as part of thread category
if (mem_tag == mtThreadStack) continue;
MallocMemory* malloc_memory = _malloc_snapshot->by_type(mem_tag);
VirtualMemory* virtual_memory = _vm_snapshot->by_type(mem_tag);
MallocMemory* malloc_memory = _malloc_snapshot->by_tag(mem_tag);
VirtualMemory* virtual_memory = _vm_snapshot->by_tag(mem_tag);
report_summary_of_type(mem_tag, malloc_memory, virtual_memory);
report_summary_of_tag(mem_tag, malloc_memory, virtual_memory);
}
}
void MemSummaryReporter::report_summary_of_type(MemTag mem_tag,
void MemSummaryReporter::report_summary_of_tag(MemTag mem_tag,
MallocMemory* malloc_memory, VirtualMemory* virtual_memory) {
size_t reserved_amount = reserved_total (malloc_memory, virtual_memory);
@ -197,7 +197,7 @@ void MemSummaryReporter::report_summary_of_type(MemTag mem_tag,
// Count thread's native stack in "Thread" category
if (mem_tag == mtThread) {
const VirtualMemory* thread_stack_usage =
(const VirtualMemory*)_vm_snapshot->by_type(mtThreadStack);
(const VirtualMemory*)_vm_snapshot->by_tag(mtThreadStack);
reserved_amount += thread_stack_usage->reserved();
committed_amount += thread_stack_usage->committed();
} else if (mem_tag == mtNMT) {
@ -239,7 +239,7 @@ void MemSummaryReporter::report_summary_of_type(MemTag mem_tag,
_instance_class_count, _array_class_count);
} else if (mem_tag == mtThread) {
const VirtualMemory* thread_stack_usage =
_vm_snapshot->by_type(mtThreadStack);
_vm_snapshot->by_tag(mtThreadStack);
// report thread count
out->print_cr("(threads #%zu)", ThreadStackTracker::thread_count());
out->print("(stack: ");
@ -380,7 +380,7 @@ int MemDetailReporter::report_virtual_memory_allocation_sites() {
print_total(virtual_memory_site->reserved(), virtual_memory_site->committed());
const MemTag mem_tag = virtual_memory_site->mem_tag();
if (mem_tag != mtNone) {
out->print(" Type=%s", NMTUtil::tag_to_name(mem_tag));
out->print(" Tag=%s", NMTUtil::tag_to_name(mem_tag));
}
out->print_cr(")");
)
@ -524,7 +524,7 @@ void MemSummaryDiffReporter::report_diff() {
MemTag mem_tag = NMTUtil::index_to_tag(index);
// thread stack is reported as part of thread category
if (mem_tag == mtThreadStack) continue;
diff_summary_of_type(mem_tag,
diff_summary_of_tag(mem_tag,
_early_baseline.malloc_memory(mem_tag),
_early_baseline.virtual_memory(mem_tag),
_early_baseline.metaspace_stats(),
@ -594,7 +594,7 @@ void MemSummaryDiffReporter::print_virtual_memory_diff(size_t current_reserved,
}
void MemSummaryDiffReporter::diff_summary_of_type(MemTag mem_tag,
void MemSummaryDiffReporter::diff_summary_of_tag(MemTag mem_tag,
const MallocMemory* early_malloc, const VirtualMemory* early_vm,
const MetaspaceCombinedStats& early_ms,
const MallocMemory* current_malloc, const VirtualMemory* current_vm,
@ -795,8 +795,8 @@ void MemDetailDiffReporter::report_diff() {
}
void MemDetailDiffReporter::diff_malloc_sites() const {
MallocSiteIterator early_itr = _early_baseline.malloc_sites(MemBaseline::by_site_and_type);
MallocSiteIterator current_itr = _current_baseline.malloc_sites(MemBaseline::by_site_and_type);
MallocSiteIterator early_itr = _early_baseline.malloc_sites(MemBaseline::by_site_and_tag);
MallocSiteIterator current_itr = _current_baseline.malloc_sites(MemBaseline::by_site_and_tag);
const MallocSite* early_site = early_itr.next();
const MallocSite* current_site = current_itr.next();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -139,7 +139,7 @@ class MemSummaryReporter : public MemReporterBase {
virtual void report();
private:
// Report summary for each memory tag
void report_summary_of_type(MemTag mem_tag, MallocMemory* malloc_memory,
void report_summary_of_tag(MemTag mem_tag, MallocMemory* malloc_memory,
VirtualMemory* virtual_memory);
void report_metadata(Metaspace::MetadataType type) const;
@ -204,7 +204,7 @@ class MemSummaryDiffReporter : public MemReporterBase {
private:
// report the comparison of each mem_tag
void diff_summary_of_type(MemTag mem_tag,
void diff_summary_of_tag(MemTag mem_tag,
const MallocMemory* early_malloc, const VirtualMemory* early_vm,
const MetaspaceCombinedStats& early_ms,
const MallocMemory* current_malloc, const VirtualMemory* current_vm,

View File

@ -62,7 +62,7 @@ void MemTracker::initialize() {
assert(level == NMT_off || level == NMT_summary || level == NMT_detail,
"Invalid setting for NativeMemoryTracking (%s)", NativeMemoryTracking);
// Memory type is encoded into tracking header as a byte field,
// Memory tag is encoded into tracking header as a byte field,
// make sure that we don't overflow it.
STATIC_ASSERT(mt_number_of_tags <= max_jubyte);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -210,7 +210,7 @@ class MemTracker : AllStatic {
// be fully uncommitted.
//
// The two new memory regions will be both registered under stack and
// memory flags of the original region.
// memory tags of the original region.
static inline void record_virtual_memory_split_reserved(void* addr, size_t size, size_t split, MemTag mem_tag, MemTag split_tag) {
assert_post_init();
if (!enabled()) return;
@ -225,7 +225,7 @@ class MemTracker : AllStatic {
if (!enabled()) return;
if (addr != nullptr) {
NmtVirtualMemoryLocker nvml;
VirtualMemoryTracker::set_reserved_region_type((address)addr, mem_tag);
VirtualMemoryTracker::set_reserved_region_tag((address)addr, mem_tag);
}
}

View File

@ -44,7 +44,7 @@ void MemoryFileTracker::allocate_memory(MemoryFile* file, size_t offset,
VMATree::RegionData regiondata(sidx, mem_tag);
VMATree::SummaryDiff diff = file->_tree.commit_mapping(offset, size, regiondata);
for (int i = 0; i < mt_number_of_tags; i++) {
VirtualMemory* summary = file->_summary.by_type(NMTUtil::index_to_tag(i));
VirtualMemory* summary = file->_summary.by_tag(NMTUtil::index_to_tag(i));
summary->reserve_memory(diff.tag[i].commit);
summary->commit_memory(diff.tag[i].commit);
}
@ -53,7 +53,7 @@ void MemoryFileTracker::allocate_memory(MemoryFile* file, size_t offset,
void MemoryFileTracker::free_memory(MemoryFile* file, size_t offset, size_t size) {
VMATree::SummaryDiff diff = file->_tree.release_mapping(offset, size);
for (int i = 0; i < mt_number_of_tags; i++) {
VirtualMemory* summary = file->_summary.by_type(NMTUtil::index_to_tag(i));
VirtualMemory* summary = file->_summary.by_tag(NMTUtil::index_to_tag(i));
summary->reserve_memory(diff.tag[i].commit);
summary->commit_memory(diff.tag[i].commit);
}
@ -176,7 +176,7 @@ const GrowableArrayCHeap<MemoryFileTracker::MemoryFile*, mtNMT>& MemoryFileTrack
void MemoryFileTracker::summary_snapshot(VirtualMemorySnapshot* snapshot) const {
iterate_summary([&](MemTag tag, const VirtualMemory* current) {
VirtualMemory* snap = snapshot->by_type(tag);
VirtualMemory* snap = snapshot->by_tag(tag);
// Only account the committed memory.
snap->commit_memory(current->committed());
});

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -78,7 +78,7 @@ public:
for (int d = 0; d < _files.length(); d++) {
const MemoryFile* file = _files.at(d);
for (int i = 0; i < mt_number_of_tags; i++) {
f(NMTUtil::index_to_tag(i), file->_summary.by_type(NMTUtil::index_to_tag(i)));
f(NMTUtil::index_to_tag(i), file->_summary.by_tag(NMTUtil::index_to_tag(i)));
}
}
}

View File

@ -29,8 +29,8 @@ STATIC_ASSERT(NMT_off > NMT_unknown);
STATIC_ASSERT(NMT_summary > NMT_off);
STATIC_ASSERT(NMT_detail > NMT_summary);
#define MEMORY_TAG_DECLARE_NAME(type, human_readable) \
{ #type, human_readable },
#define MEMORY_TAG_DECLARE_NAME(tag, human_readable) \
{ #tag, human_readable },
NMTUtil::S NMTUtil::_strings[] = {
MEMORY_TAG_DO(MEMORY_TAG_DECLARE_NAME)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2023 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@ -88,7 +88,7 @@ class NMTUtil : AllStatic {
// Map memory tag to index
static inline int tag_to_index(MemTag mem_tag) {
assert(tag_is_valid(mem_tag), "Invalid type (%u)", (unsigned)mem_tag);
assert(tag_is_valid(mem_tag), "Invalid tag (%u)", (unsigned)mem_tag);
return static_cast<int>(mem_tag);
}
@ -104,7 +104,7 @@ class NMTUtil : AllStatic {
// Map an index to memory tag
static MemTag index_to_tag(int index) {
assert(tag_index_is_valid(index), "Invalid type index (%d)", index);
assert(tag_index_is_valid(index), "Invalid tag index (%d)", index);
return static_cast<MemTag>(index);
}

View File

@ -60,7 +60,7 @@ void NMTUsage::update_malloc_usage() {
size_t total_arena_size = 0;
for (int i = 0; i < mt_number_of_tags; i++) {
MemTag mem_tag = NMTUtil::index_to_tag(i);
const MallocMemory* mm = ms->by_type(mem_tag);
const MallocMemory* mm = ms->by_tag(mem_tag);
_malloc_by_type[i] = mm->malloc_size() + mm->arena_size();
total_arena_size += mm->arena_size();
}
@ -84,7 +84,7 @@ void NMTUsage::update_vm_usage() {
_vm_total.reserved = 0;
for (int i = 0; i < mt_number_of_tags; i++) {
MemTag mem_tag = NMTUtil::index_to_tag(i);
const VirtualMemory* vm = vms->by_type(mem_tag);
const VirtualMemory* vm = vms->by_tag(mem_tag);
_vm_by_type[i].reserved = vm->reserved();
_vm_by_type[i].committed = vm->committed();

View File

@ -413,7 +413,7 @@ bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size,
}
}
void VirtualMemoryTracker::set_reserved_region_type(address addr, MemTag mem_tag) {
void VirtualMemoryTracker::set_reserved_region_tag(address addr, MemTag mem_tag) {
assert(addr != nullptr, "Invalid address");
assert(_reserved_regions != nullptr, "Sanity check");
MemTracker::assert_locked();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -98,12 +98,12 @@ class VirtualMemorySnapshot : public ResourceObj {
VirtualMemory _virtual_memory[mt_number_of_tags];
public:
inline VirtualMemory* by_type(MemTag mem_tag) {
inline VirtualMemory* by_tag(MemTag mem_tag) {
int index = NMTUtil::tag_to_index(mem_tag);
return &_virtual_memory[index];
}
inline const VirtualMemory* by_type(MemTag mem_tag) const {
inline const VirtualMemory* by_tag(MemTag mem_tag) const {
int index = NMTUtil::tag_to_index(mem_tag);
return &_virtual_memory[index];
}
@ -135,33 +135,33 @@ class VirtualMemorySummary : AllStatic {
public:
static inline void record_reserved_memory(size_t size, MemTag mem_tag) {
as_snapshot()->by_type(mem_tag)->reserve_memory(size);
as_snapshot()->by_tag(mem_tag)->reserve_memory(size);
}
static inline void record_committed_memory(size_t size, MemTag mem_tag) {
as_snapshot()->by_type(mem_tag)->commit_memory(size);
as_snapshot()->by_tag(mem_tag)->commit_memory(size);
}
static inline void record_uncommitted_memory(size_t size, MemTag mem_tag) {
as_snapshot()->by_type(mem_tag)->uncommit_memory(size);
as_snapshot()->by_tag(mem_tag)->uncommit_memory(size);
}
static inline void record_released_memory(size_t size, MemTag mem_tag) {
as_snapshot()->by_type(mem_tag)->release_memory(size);
as_snapshot()->by_tag(mem_tag)->release_memory(size);
}
// Move virtual memory from one memory tag to another.
// Virtual memory can be reserved before it is associated with a memory tag, and tagged
// as 'unknown'. Once the memory is tagged, the virtual memory will be moved from 'unknown'
// type to specified memory tag.
// tag to specified memory tag.
static inline void move_reserved_memory(MemTag from, MemTag to, size_t size) {
as_snapshot()->by_type(from)->release_memory(size);
as_snapshot()->by_type(to)->reserve_memory(size);
as_snapshot()->by_tag(from)->release_memory(size);
as_snapshot()->by_tag(to)->reserve_memory(size);
}
static inline void move_committed_memory(MemTag from, MemTag to, size_t size) {
as_snapshot()->by_type(from)->uncommit_memory(size);
as_snapshot()->by_type(to)->commit_memory(size);
as_snapshot()->by_tag(from)->uncommit_memory(size);
as_snapshot()->by_tag(to)->commit_memory(size);
}
static void snapshot(VirtualMemorySnapshot* s);
@ -386,7 +386,7 @@ class VirtualMemoryTracker : AllStatic {
static bool remove_uncommitted_region (address base_addr, size_t size);
static bool remove_released_region (address base_addr, size_t size);
static bool remove_released_region (ReservedMemoryRegion* rgn);
static void set_reserved_region_type (address addr, MemTag mem_tag);
static void set_reserved_region_tag (address addr, MemTag mem_tag);
// Given an existing memory mapping registered with NMT, split the mapping in
// two. The newly created two mappings will be registered under the call

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2024, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -96,7 +96,7 @@ private:
public:
IntervalState() : type_tag{0,0}, sidx() {}
IntervalState(const StateType type, const RegionData data) {
assert(!(type == StateType::Released) || data.mem_tag == mtNone, "Released type must have memory tag mtNone");
assert(!(type == StateType::Released) || data.mem_tag == mtNone, "Released state-type must have memory tag mtNone");
type_tag[0] = static_cast<uint8_t>(type);
type_tag[1] = static_cast<uint8_t>(data.mem_tag);
sidx = data.stack_idx;

View File

@ -33,17 +33,17 @@ public:
MemoryFileTracker tracker(false);
MemoryFileTracker::MemoryFile* file = tracker.make_file("test");
tracker.allocate_memory(file, 0, 100, CALLER_PC, mtTest);
EXPECT_EQ(file->_summary.by_type(mtTest)->committed(), sz(100));
EXPECT_EQ(file->_summary.by_tag(mtTest)->committed(), sz(100));
tracker.allocate_memory(file, 100, 100, CALLER_PC, mtTest);
EXPECT_EQ(file->_summary.by_type(mtTest)->committed(), sz(200));
EXPECT_EQ(file->_summary.by_tag(mtTest)->committed(), sz(200));
tracker.allocate_memory(file, 200, 100, CALLER_PC, mtTest);
EXPECT_EQ(file->_summary.by_type(mtTest)->committed(), sz(300));
EXPECT_EQ(file->_summary.by_tag(mtTest)->committed(), sz(300));
tracker.free_memory(file, 0, 300);
EXPECT_EQ(file->_summary.by_type(mtTest)->committed(), sz(0));
EXPECT_EQ(file->_summary.by_tag(mtTest)->committed(), sz(0));
tracker.allocate_memory(file, 0, 100, CALLER_PC, mtTest);
EXPECT_EQ(file->_summary.by_type(mtTest)->committed(), sz(100));
EXPECT_EQ(file->_summary.by_tag(mtTest)->committed(), sz(100));
tracker.free_memory(file, 50, 10);
EXPECT_EQ(file->_summary.by_type(mtTest)->committed(), sz(90));
EXPECT_EQ(file->_summary.by_tag(mtTest)->committed(), sz(90));
};
};