Merge remote-tracking branch 'origin/master' into JDK-8366659-OM-wait-suspend-deadlock

This commit is contained in:
Anton Artemov 2025-11-20 10:39:04 +00:00
commit 9356cd9fa8
551 changed files with 23223 additions and 10868 deletions

5
.gitignore vendored
View File

@ -26,3 +26,8 @@ NashornProfile.txt
*.rej
*.orig
test/benchmarks/**/target
/src/hotspot/CMakeLists.txt
/src/hotspot/compile_commands.json
/src/hotspot/cmake-build-debug/
/src/hotspot/.cache/
/src/hotspot/.idea/

View File

@ -668,7 +668,7 @@ update.</p>
(Note that this version is often presented as "MSVC 14.28", and reported
by cl.exe as 19.28.) Older versions will not be accepted by
<code>configure</code> and will not work. The maximum accepted version
of Visual Studio is 2022.</p>
of Visual Studio is 2026.</p>
<p>If you have multiple versions of Visual Studio installed,
<code>configure</code> will by default pick the latest. You can request
a specific version to be used by setting

View File

@ -468,7 +468,7 @@ available for this update.
The minimum accepted version is Visual Studio 2019 version 16.8. (Note that
this version is often presented as "MSVC 14.28", and reported by cl.exe as
19.28.) Older versions will not be accepted by `configure` and will not work.
The maximum accepted version of Visual Studio is 2022.
The maximum accepted version of Visual Studio is 2026.
If you have multiple versions of Visual Studio installed, `configure` will by
default pick the latest. You can request a specific version to be used by

View File

@ -25,7 +25,7 @@
################################################################################
# The order of these defines the priority by which we try to find them.
VALID_VS_VERSIONS="2022 2019"
VALID_VS_VERSIONS="2022 2019 2026"
VS_DESCRIPTION_2019="Microsoft Visual Studio 2019"
VS_VERSION_INTERNAL_2019=142
@ -57,6 +57,21 @@ VS_SDK_PLATFORM_NAME_2022=
VS_SUPPORTED_2022=true
VS_TOOLSET_SUPPORTED_2022=true
VS_DESCRIPTION_2026="Microsoft Visual Studio 2026"
VS_VERSION_INTERNAL_2026=145
VS_MSVCR_2026=vcruntime140.dll
VS_VCRUNTIME_1_2026=vcruntime140_1.dll
VS_MSVCP_2026=msvcp140.dll
VS_ENVVAR_2026="VS180COMNTOOLS"
VS_USE_UCRT_2026="true"
VS_VS_INSTALLDIR_2026="Microsoft Visual Studio/18"
VS_EDITIONS_2026="BuildTools Community Professional Enterprise"
VS_SDK_INSTALLDIR_2026=
VS_VS_PLATFORM_NAME_2026="v145"
VS_SDK_PLATFORM_NAME_2026=
VS_SUPPORTED_2026=true
VS_TOOLSET_SUPPORTED_2026=true
################################################################################
AC_DEFUN([TOOLCHAIN_CHECK_POSSIBLE_VISUAL_STUDIO_ROOT],

View File

@ -95,6 +95,7 @@ $(eval $(call SetupJdkLibrary, BUILD_GTEST_LIBJVM, \
EXTRA_OBJECT_FILES := $(BUILD_LIBJVM_ALL_OBJS), \
DEFAULT_CFLAGS := false, \
CFLAGS := $(JVM_CFLAGS) \
-DHOTSPOT_GTEST \
-I$(GTEST_FRAMEWORK_SRC)/googletest/include \
-I$(GTEST_FRAMEWORK_SRC)/googlemock/include \
$(addprefix -I, $(GTEST_TEST_SRC)), \

View File

@ -337,6 +337,30 @@ TARGETS += $(BUILD_LIBJVM)
# for the associated class. If the class doesn't provide a more specific
# declaration (either directly or by inheriting from a class that provides
# one) then the global definition will be used, triggering this check.
#
# The HotSpot wrapper for <new> declares as deprecated all the allocation and
# deallocation functions that use the global allocator. But that blocking
# isn't a bullet-proof. Some of these functions are implicitly available in
# every translation unit, without the need to include <new>. So even with that
# wrapper we still need this link-time check. The implicitly declared
# functions and their mangled names are - from C++17 6.7.4:
#
# void* operator new(size_t) // _Znwm
# void* operator new(size_t, align_val_t) // _ZnwmSt11align_val_t
#
# void operator delete(void*) noexcept // _ZdlPv
# void operator delete(void*, size_t) noexcept // _ZdlPvm
# void operator delete(void*, align_val_t) noexcept // _ZdlPvSt11align_val_t
# void operator delete(void*, size_t, align_val_t) noexcept // _ZdlPvmSt11align_val_t
#
# void* operator new[](size_t) // _Znam
# void* operator new[](size_t, align_val_t) // _ZnamSt11align_val_t
#
# void operator delete[](void*) noexcept // _ZdaPv
# void operator delete[](void*, size_t) noexcept // _ZdaPvm
# void operator delete[](void*, align_val_t) noexcept // _ZdaPvSt11align_val_t
# void operator delete[](void*, size_t, align_val_t) noexcept // _ZdaPvmSt11align_val_t
ifneq ($(GENERATE_COMPILE_COMMANDS_ONLY), true)
ifneq ($(filter $(TOOLCHAIN_TYPE), gcc clang), )
@ -347,10 +371,18 @@ ifneq ($(GENERATE_COMPILE_COMMANDS_ONLY), true)
# so use mangled names when looking for symbols.
# Save the demangling for when something is actually found.
MANGLED_SYMS := \
_ZdaPv \
_ZdlPv \
_Znam \
_Znwm \
_ZnwmSt11align_val_t \
_ZdlPv \
_ZdlPvm \
_ZdlPvSt11align_val_t \
_ZdlPvmSt11align_val_t \
_Znam \
_ZnamSt11align_val_t \
_ZdaPv \
_ZdaPvm \
_ZdaPvSt11align_val_t \
_ZdaPvmSt11align_val_t \
#
UNDEF_PATTERN := ' U '

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -219,13 +219,13 @@ public final class SealedGraph implements Taglet {
// This implies the module is always the same.
private String relativeLink(TypeElement node) {
var util = SealedGraph.this.docletEnvironment.getElementUtils();
var rootPackage = util.getPackageOf(rootNode);
var nodePackage = util.getPackageOf(node);
var backNavigator = rootPackage.getQualifiedName().toString().chars()
// Note: SVG files for nested types use the simple names of containing types as parent directories.
// We therefore need to convert all dots in the qualified name to "../" below.
var backNavigator = rootNode.getQualifiedName().toString().chars()
.filter(c -> c == '.')
.mapToObj(c -> "../")
.collect(joining()) +
"../";
.collect(joining());
var forwardNavigator = nodePackage.getQualifiedName().toString()
.replace(".", "/");

View File

@ -84,6 +84,7 @@ public interface MessageType {
FILE_OBJECT("file object", "JavaFileObject", "javax.tools"),
PATH("path", "Path", "java.nio.file"),
NAME("name", "Name", "com.sun.tools.javac.util"),
LONG("long", "long", null),
NUMBER("number", "int", null),
OPTION_NAME("option name", "Option", "com.sun.tools.javac.main"),
PROFILE("profile", "Profile", "com.sun.tools.javac.jvm"),

View File

@ -80,6 +80,7 @@ else
BUILD_JDK_JTREG_LIBRARIES_LDFLAGS_libExplicitAttach := -pthread
BUILD_JDK_JTREG_LIBRARIES_LDFLAGS_libImplicitAttach := -pthread
BUILD_JDK_JTREG_LIBRARIES_LDFLAGS_libJNIAttachMutator := -pthread
BUILD_JDK_JTREG_EXCLUDE += exerevokeall.c
ifeq ($(call isTargetOs, linux), true)
BUILD_JDK_JTREG_EXECUTABLES_LIBS_exelauncher := -ldl

View File

@ -879,7 +879,6 @@ void ZBarrierSetAssembler::patch_barrier_relocation(address addr, int format) {
ShouldNotReachHere();
}
OrderAccess::fence();
ICache::invalidate_word((address)patch_addr);
}

View File

@ -1375,7 +1375,6 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
__ ldr(r10, Address(rmethod, Method::native_function_offset()));
ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
__ lea(rscratch2, unsatisfied);
__ ldr(rscratch2, rscratch2);
__ cmp(r10, rscratch2);
__ br(Assembler::NE, L);
__ call_VM(noreg,

View File

@ -6328,36 +6328,8 @@ instruct loadConD_Ex(regD dst, immD src) %{
// Prefetch instructions.
// Must be safe to execute with invalid address (cannot fault).
// Special prefetch versions which use the dcbz instruction.
instruct prefetch_alloc_zero(indirectMemory mem, iRegLsrc src) %{
match(PrefetchAllocation (AddP mem src));
predicate(AllocatePrefetchStyle == 3);
ins_cost(MEMORY_REF_COST);
format %{ "PREFETCH $mem, 2, $src \t// Prefetch write-many with zero" %}
size(4);
ins_encode %{
__ dcbz($src$$Register, $mem$$base$$Register);
%}
ins_pipe(pipe_class_memory);
%}
instruct prefetch_alloc_zero_no_offset(indirectMemory mem) %{
match(PrefetchAllocation mem);
predicate(AllocatePrefetchStyle == 3);
ins_cost(MEMORY_REF_COST);
format %{ "PREFETCH $mem, 2 \t// Prefetch write-many with zero" %}
size(4);
ins_encode %{
__ dcbz($mem$$base$$Register);
%}
ins_pipe(pipe_class_memory);
%}
instruct prefetch_alloc(indirectMemory mem, iRegLsrc src) %{
match(PrefetchAllocation (AddP mem src));
predicate(AllocatePrefetchStyle != 3);
ins_cost(MEMORY_REF_COST);
format %{ "PREFETCH $mem, 2, $src \t// Prefetch write-many" %}
@ -6370,7 +6342,6 @@ instruct prefetch_alloc(indirectMemory mem, iRegLsrc src) %{
instruct prefetch_alloc_no_offset(indirectMemory mem) %{
match(PrefetchAllocation mem);
predicate(AllocatePrefetchStyle != 3);
ins_cost(MEMORY_REF_COST);
format %{ "PREFETCH $mem, 2 \t// Prefetch write-many" %}

View File

@ -1146,9 +1146,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
Label L;
__ ld(x28, Address(xmethod, Method::native_function_offset()));
ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
__ la(t, unsatisfied);
__ load_long_misaligned(t1, Address(t, 0), t0, 2); // 2 bytes aligned, but not 4 or 8
__ la(t1, unsatisfied);
__ bne(x28, t1, L);
__ call_VM(noreg,
CAST_FROM_FN_PTR(address,

View File

@ -612,7 +612,6 @@ void CgroupSubsystemFactory::cleanup(CgroupInfo* cg_infos) {
*
* cpu affinity
* cgroup cpu quota & cpu period
* cgroup cpu shares
*
* Algorithm:
*
@ -623,19 +622,18 @@ void CgroupSubsystemFactory::cleanup(CgroupInfo* cg_infos) {
*
* All results of division are rounded up to the next whole number.
*
* If quotas have not been specified, return the
* number of active processors in the system.
* If quotas have not been specified, sets the result reference to
* the number of active processors in the system.
*
* If quotas have been specified, the resulting number
* returned will never exceed the number of active processors.
* If quotas have been specified, the number set in the result
* reference will never exceed the number of active processors.
*
* return:
* number of CPUs
* true if there were no errors. false otherwise.
*/
int CgroupSubsystem::active_processor_count() {
int quota_count = 0;
bool CgroupSubsystem::active_processor_count(int& value) {
int cpu_count;
int result;
int result = -1;
// We use a cache with a timeout to avoid performing expensive
// computations in the event this function is called frequently.
@ -643,38 +641,50 @@ int CgroupSubsystem::active_processor_count() {
CachingCgroupController<CgroupCpuController>* contrl = cpu_controller();
CachedMetric* cpu_limit = contrl->metrics_cache();
if (!cpu_limit->should_check_metric()) {
int val = (int)cpu_limit->value();
log_trace(os, container)("CgroupSubsystem::active_processor_count (cached): %d", val);
return val;
value = (int)cpu_limit->value();
log_trace(os, container)("CgroupSubsystem::active_processor_count (cached): %d", value);
return true;
}
cpu_count = os::Linux::active_processor_count();
result = CgroupUtil::processor_count(contrl->controller(), cpu_count);
if (!CgroupUtil::processor_count(contrl->controller(), cpu_count, result)) {
return false;
}
assert(result > 0 && result <= cpu_count, "must be");
// Update cached metric to avoid re-reading container settings too often
cpu_limit->set_value(result, OSCONTAINER_CACHE_TIMEOUT);
value = result;
return result;
return true;
}
/* memory_limit_in_bytes
*
* Return the limit of available memory for this process.
* Return the limit of available memory for this process in the provided
* physical_memory_size_type reference. If there was no limit value set in the underlying
* interface files 'value_unlimited' is returned.
*
* return:
* memory limit in bytes or
* -1 for unlimited
* OSCONTAINER_ERROR for not supported
* false if retrieving the value failed
* true if retrieving the value was successfull and the value was
* set in the 'value' reference.
*/
jlong CgroupSubsystem::memory_limit_in_bytes(julong upper_bound) {
bool CgroupSubsystem::memory_limit_in_bytes(physical_memory_size_type upper_bound,
physical_memory_size_type& value) {
CachingCgroupController<CgroupMemoryController>* contrl = memory_controller();
CachedMetric* memory_limit = contrl->metrics_cache();
if (!memory_limit->should_check_metric()) {
return memory_limit->value();
value = memory_limit->value();
return true;
}
physical_memory_size_type mem_limit = 0;
if (!contrl->controller()->read_memory_limit_in_bytes(upper_bound, mem_limit)) {
return false;
}
jlong mem_limit = contrl->controller()->read_memory_limit_in_bytes(upper_bound);
// Update cached metric to avoid re-reading container settings too often
memory_limit->set_value(mem_limit, OSCONTAINER_CACHE_TIMEOUT);
return mem_limit;
value = mem_limit;
return true;
}
bool CgroupController::read_string(const char* filename, char* buf, size_t buf_size) {
@ -719,36 +729,35 @@ bool CgroupController::read_string(const char* filename, char* buf, size_t buf_s
return true;
}
bool CgroupController::read_number(const char* filename, julong* result) {
bool CgroupController::read_number(const char* filename, uint64_t& result) {
char buf[1024];
bool is_ok = read_string(filename, buf, 1024);
if (!is_ok) {
return false;
}
int matched = sscanf(buf, JULONG_FORMAT, result);
int matched = sscanf(buf, UINT64_FORMAT, &result);
if (matched == 1) {
return true;
}
return false;
}
bool CgroupController::read_number_handle_max(const char* filename, jlong* result) {
bool CgroupController::read_number_handle_max(const char* filename, uint64_t& result) {
char buf[1024];
bool is_ok = read_string(filename, buf, 1024);
if (!is_ok) {
return false;
}
jlong val = limit_from_str(buf);
if (val == OSCONTAINER_ERROR) {
uint64_t val = 0;
if (!limit_from_str(buf, val)) {
return false;
}
*result = val;
result = val;
return true;
}
bool CgroupController::read_numerical_key_value(const char* filename, const char* key, julong* result) {
bool CgroupController::read_numerical_key_value(const char* filename, const char* key, uint64_t& result) {
assert(key != nullptr, "key must be given");
assert(result != nullptr, "result pointer must not be null");
assert(filename != nullptr, "file to search in must be given");
const char* s_path = subsystem_path();
if (s_path == nullptr) {
@ -786,7 +795,7 @@ bool CgroupController::read_numerical_key_value(const char* filename, const char
&& after_key != '\n') {
// Skip key, skip space
const char* value_substr = line + key_len + 1;
int matched = sscanf(value_substr, JULONG_FORMAT, result);
int matched = sscanf(value_substr, UINT64_FORMAT, &result);
found_match = matched == 1;
if (found_match) {
break;
@ -797,12 +806,12 @@ bool CgroupController::read_numerical_key_value(const char* filename, const char
if (found_match) {
return true;
}
log_debug(os, container)("Type %s (key == %s) not found in file %s", JULONG_FORMAT,
log_debug(os, container)("Type %s (key == %s) not found in file %s", UINT64_FORMAT,
key, absolute_path);
return false;
}
bool CgroupController::read_numerical_tuple_value(const char* filename, bool use_first, jlong* result) {
bool CgroupController::read_numerical_tuple_value(const char* filename, bool use_first, uint64_t& result) {
char buf[1024];
bool is_ok = read_string(filename, buf, 1024);
if (!is_ok) {
@ -813,80 +822,90 @@ bool CgroupController::read_numerical_tuple_value(const char* filename, bool use
if (matched != 1) {
return false;
}
jlong val = limit_from_str(token);
if (val == OSCONTAINER_ERROR) {
uint64_t val = 0;
if (!limit_from_str(token, val)) {
return false;
}
*result = val;
result = val;
return true;
}
jlong CgroupController::limit_from_str(char* limit_str) {
bool CgroupController::limit_from_str(char* limit_str, uint64_t& value) {
if (limit_str == nullptr) {
return OSCONTAINER_ERROR;
return false;
}
// Unlimited memory in cgroups is the literal string 'max' for
// some controllers, for example the pids controller.
if (strcmp("max", limit_str) == 0) {
return (jlong)-1;
value = value_unlimited;
return true;
}
julong limit;
if (sscanf(limit_str, JULONG_FORMAT, &limit) != 1) {
return OSCONTAINER_ERROR;
uint64_t limit;
if (sscanf(limit_str, UINT64_FORMAT, &limit) != 1) {
return false;
}
return (jlong)limit;
value = limit;
return true;
}
// CgroupSubsystem implementations
jlong CgroupSubsystem::memory_and_swap_limit_in_bytes(julong upper_mem_bound, julong upper_swap_bound) {
return memory_controller()->controller()->memory_and_swap_limit_in_bytes(upper_mem_bound, upper_swap_bound);
bool CgroupSubsystem::memory_and_swap_limit_in_bytes(physical_memory_size_type upper_mem_bound,
physical_memory_size_type upper_swap_bound,
physical_memory_size_type& value) {
return memory_controller()->controller()->memory_and_swap_limit_in_bytes(upper_mem_bound,
upper_swap_bound,
value);
}
jlong CgroupSubsystem::memory_and_swap_usage_in_bytes(julong upper_mem_bound, julong upper_swap_bound) {
return memory_controller()->controller()->memory_and_swap_usage_in_bytes(upper_mem_bound, upper_swap_bound);
bool CgroupSubsystem::memory_and_swap_usage_in_bytes(physical_memory_size_type upper_mem_bound,
physical_memory_size_type upper_swap_bound,
physical_memory_size_type& value) {
return memory_controller()->controller()->memory_and_swap_usage_in_bytes(upper_mem_bound,
upper_swap_bound,
value);
}
jlong CgroupSubsystem::memory_soft_limit_in_bytes(julong upper_bound) {
return memory_controller()->controller()->memory_soft_limit_in_bytes(upper_bound);
bool CgroupSubsystem::memory_soft_limit_in_bytes(physical_memory_size_type upper_bound,
physical_memory_size_type& value) {
return memory_controller()->controller()->memory_soft_limit_in_bytes(upper_bound, value);
}
jlong CgroupSubsystem::memory_throttle_limit_in_bytes() {
return memory_controller()->controller()->memory_throttle_limit_in_bytes();
bool CgroupSubsystem::memory_throttle_limit_in_bytes(physical_memory_size_type& value) {
return memory_controller()->controller()->memory_throttle_limit_in_bytes(value);
}
jlong CgroupSubsystem::memory_usage_in_bytes() {
return memory_controller()->controller()->memory_usage_in_bytes();
bool CgroupSubsystem::memory_usage_in_bytes(physical_memory_size_type& value) {
return memory_controller()->controller()->memory_usage_in_bytes(value);
}
jlong CgroupSubsystem::memory_max_usage_in_bytes() {
return memory_controller()->controller()->memory_max_usage_in_bytes();
bool CgroupSubsystem::memory_max_usage_in_bytes(physical_memory_size_type& value) {
return memory_controller()->controller()->memory_max_usage_in_bytes(value);
}
jlong CgroupSubsystem::rss_usage_in_bytes() {
return memory_controller()->controller()->rss_usage_in_bytes();
bool CgroupSubsystem::rss_usage_in_bytes(physical_memory_size_type& value) {
return memory_controller()->controller()->rss_usage_in_bytes(value);
}
jlong CgroupSubsystem::cache_usage_in_bytes() {
return memory_controller()->controller()->cache_usage_in_bytes();
bool CgroupSubsystem::cache_usage_in_bytes(physical_memory_size_type& value) {
return memory_controller()->controller()->cache_usage_in_bytes(value);
}
int CgroupSubsystem::cpu_quota() {
return cpu_controller()->controller()->cpu_quota();
bool CgroupSubsystem::cpu_quota(int& value) {
return cpu_controller()->controller()->cpu_quota(value);
}
int CgroupSubsystem::cpu_period() {
return cpu_controller()->controller()->cpu_period();
bool CgroupSubsystem::cpu_period(int& value) {
return cpu_controller()->controller()->cpu_period(value);
}
int CgroupSubsystem::cpu_shares() {
return cpu_controller()->controller()->cpu_shares();
bool CgroupSubsystem::cpu_shares(int& value) {
return cpu_controller()->controller()->cpu_shares(value);
}
jlong CgroupSubsystem::cpu_usage_in_micros() {
return cpuacct_controller()->cpu_usage_in_micros();
bool CgroupSubsystem::cpu_usage_in_micros(uint64_t& value) {
return cpuacct_controller()->cpu_usage_in_micros(value);
}
void CgroupSubsystem::print_version_specific_info(outputStream* st, julong upper_mem_bound) {
void CgroupSubsystem::print_version_specific_info(outputStream* st, physical_memory_size_type upper_mem_bound) {
memory_controller()->controller()->print_version_specific_info(st, upper_mem_bound);
}

View File

@ -72,23 +72,29 @@
#define CONTAINER_READ_NUMBER_CHECKED(controller, filename, log_string, retval) \
{ \
bool is_ok; \
is_ok = controller->read_number(filename, &retval); \
is_ok = controller->read_number(filename, retval); \
if (!is_ok) { \
log_trace(os, container)(log_string " failed: %d", OSCONTAINER_ERROR); \
return OSCONTAINER_ERROR; \
log_trace(os, container)(log_string " failed"); \
return false; \
} \
log_trace(os, container)(log_string " is: " JULONG_FORMAT, retval); \
log_trace(os, container)(log_string " is: " UINT64_FORMAT, retval); \
return true; \
}
#define CONTAINER_READ_NUMBER_CHECKED_MAX(controller, filename, log_string, retval) \
{ \
bool is_ok; \
is_ok = controller->read_number_handle_max(filename, &retval); \
is_ok = controller->read_number_handle_max(filename, retval); \
if (!is_ok) { \
log_trace(os, container)(log_string " failed: %d", OSCONTAINER_ERROR); \
return OSCONTAINER_ERROR; \
log_trace(os, container)(log_string " failed"); \
return false; \
} \
log_trace(os, container)(log_string " is: " JLONG_FORMAT, retval); \
if (retval == value_unlimited) { \
log_trace(os, container)(log_string " is: unlimited"); \
} else { \
log_trace(os, container)(log_string " is: " UINT64_FORMAT, retval); \
} \
return true; \
}
#define CONTAINER_READ_STRING_CHECKED(controller, filename, log_string, retval, buf_size) \
@ -96,7 +102,7 @@
bool is_ok; \
is_ok = controller->read_string(filename, retval, buf_size); \
if (!is_ok) { \
log_trace(os, container)(log_string " failed: %d", OSCONTAINER_ERROR); \
log_trace(os, container)(log_string " failed"); \
return nullptr; \
} \
log_trace(os, container)(log_string " is: %s", retval); \
@ -105,12 +111,13 @@
#define CONTAINER_READ_NUMERICAL_KEY_VALUE_CHECKED(controller, filename, key, log_string, retval) \
{ \
bool is_ok; \
is_ok = controller->read_numerical_key_value(filename, key, &retval); \
is_ok = controller->read_numerical_key_value(filename, key, retval); \
if (!is_ok) { \
log_trace(os, container)(log_string " failed: %d", OSCONTAINER_ERROR); \
return OSCONTAINER_ERROR; \
log_trace(os, container)(log_string " failed"); \
return false; \
} \
log_trace(os, container)(log_string " is: " JULONG_FORMAT, retval); \
log_trace(os, container)(log_string " is: " UINT64_FORMAT, retval); \
return true; \
}
class CgroupController: public CHeapObj<mtInternal> {
@ -124,21 +131,22 @@ class CgroupController: public CHeapObj<mtInternal> {
const char* mount_point() { return _mount_point; }
virtual bool needs_hierarchy_adjustment() { return false; }
/* Read a numerical value as unsigned long
/* Read a numerical value as uint64_t
*
* returns: false if any error occurred. true otherwise and
* the parsed value is set in the provided julong pointer.
* the parsed value is set in the provided result reference.
*/
bool read_number(const char* filename, julong* result);
bool read_number(const char* filename, uint64_t& result);
/* Convenience method to deal with numbers as well as the string 'max'
* in interface files. Otherwise same as read_number().
*
* returns: false if any error occurred. true otherwise and
* the parsed value (which might be negative) is being set in
* the provided jlong pointer.
* the parsed value will be set in the provided result reference.
* When the value was the string 'max' then 'value_unlimited' is
* being set as the value.
*/
bool read_number_handle_max(const char* filename, jlong* result);
bool read_number_handle_max(const char* filename, uint64_t& result);
/* Read a string of at most buf_size - 1 characters from the interface file.
* The provided buffer must be at least buf_size in size so as to account
@ -156,37 +164,37 @@ class CgroupController: public CHeapObj<mtInternal> {
* parsing interface files like cpu.max which contain such tuples.
*
* returns: false if any error occurred. true otherwise and the parsed
* value of the appropriate tuple entry set in the provided jlong pointer.
* value of the appropriate tuple entry set in the provided result reference.
*/
bool read_numerical_tuple_value(const char* filename, bool use_first, jlong* result);
bool read_numerical_tuple_value(const char* filename, bool use_first, uint64_t& result);
/* Read a numerical value from a multi-line interface file. The matched line is
* determined by the provided 'key'. The associated numerical value is being set
* via the passed in julong pointer. Example interface file 'memory.stat'
* via the passed in result reference. Example interface file 'memory.stat'
*
* returns: false if any error occurred. true otherwise and the parsed value is
* being set in the provided julong pointer.
* being set in the provided result reference.
*/
bool read_numerical_key_value(const char* filename, const char* key, julong* result);
bool read_numerical_key_value(const char* filename, const char* key, uint64_t& result);
private:
static jlong limit_from_str(char* limit_str);
static bool limit_from_str(char* limit_str, physical_memory_size_type& value);
};
class CachedMetric : public CHeapObj<mtInternal>{
private:
volatile jlong _metric;
volatile physical_memory_size_type _metric;
volatile jlong _next_check_counter;
public:
CachedMetric() {
_metric = -1;
_metric = value_unlimited;
_next_check_counter = min_jlong;
}
bool should_check_metric() {
return os::elapsed_counter() > _next_check_counter;
}
jlong value() { return _metric; }
void set_value(jlong value, jlong timeout) {
physical_memory_size_type value() { return _metric; }
void set_value(physical_memory_size_type value, jlong timeout) {
_metric = value;
// Metric is unlikely to change, but we want to remain
// responsive to configuration changes. A very short grace time
@ -216,9 +224,9 @@ class CachingCgroupController : public CHeapObj<mtInternal> {
// Pure virtual class representing version agnostic CPU controllers
class CgroupCpuController: public CHeapObj<mtInternal> {
public:
virtual int cpu_quota() = 0;
virtual int cpu_period() = 0;
virtual int cpu_shares() = 0;
virtual bool cpu_quota(int& value) = 0;
virtual bool cpu_period(int& value) = 0;
virtual bool cpu_shares(int& value) = 0;
virtual bool needs_hierarchy_adjustment() = 0;
virtual bool is_read_only() = 0;
virtual const char* subsystem_path() = 0;
@ -230,7 +238,7 @@ class CgroupCpuController: public CHeapObj<mtInternal> {
// Pure virtual class representing version agnostic CPU accounting controllers
class CgroupCpuacctController: public CHeapObj<mtInternal> {
public:
virtual jlong cpu_usage_in_micros() = 0;
virtual bool cpu_usage_in_micros(uint64_t& value) = 0;
virtual bool needs_hierarchy_adjustment() = 0;
virtual bool is_read_only() = 0;
virtual const char* subsystem_path() = 0;
@ -242,16 +250,22 @@ class CgroupCpuacctController: public CHeapObj<mtInternal> {
// Pure virtual class representing version agnostic memory controllers
class CgroupMemoryController: public CHeapObj<mtInternal> {
public:
virtual jlong read_memory_limit_in_bytes(julong upper_bound) = 0;
virtual jlong memory_usage_in_bytes() = 0;
virtual jlong memory_and_swap_limit_in_bytes(julong upper_mem_bound, julong upper_swap_bound) = 0;
virtual jlong memory_and_swap_usage_in_bytes(julong upper_mem_bound, julong upper_swap_bound) = 0;
virtual jlong memory_soft_limit_in_bytes(julong upper_bound) = 0;
virtual jlong memory_throttle_limit_in_bytes() = 0;
virtual jlong memory_max_usage_in_bytes() = 0;
virtual jlong rss_usage_in_bytes() = 0;
virtual jlong cache_usage_in_bytes() = 0;
virtual void print_version_specific_info(outputStream* st, julong upper_mem_bound) = 0;
virtual bool read_memory_limit_in_bytes(physical_memory_size_type upper_bound,
physical_memory_size_type& value) = 0;
virtual bool memory_usage_in_bytes(physical_memory_size_type& value) = 0;
virtual bool memory_and_swap_limit_in_bytes(physical_memory_size_type upper_mem_bound,
physical_memory_size_type upper_swap_bound,
physical_memory_size_type& value) = 0;
virtual bool memory_and_swap_usage_in_bytes(physical_memory_size_type upper_mem_bound,
physical_memory_size_type upper_swap_bound,
physical_memory_size_type& value) = 0;
virtual bool memory_soft_limit_in_bytes(physical_memory_size_type upper_bound,
physical_memory_size_type& value) = 0;
virtual bool memory_throttle_limit_in_bytes(physical_memory_size_type& value) = 0;
virtual bool memory_max_usage_in_bytes(physical_memory_size_type& value) = 0;
virtual bool rss_usage_in_bytes(physical_memory_size_type& value) = 0;
virtual bool cache_usage_in_bytes(physical_memory_size_type& value) = 0;
virtual void print_version_specific_info(outputStream* st, physical_memory_size_type upper_mem_bound) = 0;
virtual bool needs_hierarchy_adjustment() = 0;
virtual bool is_read_only() = 0;
virtual const char* subsystem_path() = 0;
@ -262,11 +276,11 @@ class CgroupMemoryController: public CHeapObj<mtInternal> {
class CgroupSubsystem: public CHeapObj<mtInternal> {
public:
jlong memory_limit_in_bytes(julong upper_bound);
int active_processor_count();
bool memory_limit_in_bytes(physical_memory_size_type upper_bound, physical_memory_size_type& value);
bool active_processor_count(int& value);
virtual jlong pids_max() = 0;
virtual jlong pids_current() = 0;
virtual bool pids_max(uint64_t& value) = 0;
virtual bool pids_current(uint64_t& value) = 0;
virtual bool is_containerized() = 0;
virtual char * cpu_cpuset_cpus() = 0;
@ -276,21 +290,26 @@ class CgroupSubsystem: public CHeapObj<mtInternal> {
virtual CachingCgroupController<CgroupCpuController>* cpu_controller() = 0;
virtual CgroupCpuacctController* cpuacct_controller() = 0;
int cpu_quota();
int cpu_period();
int cpu_shares();
bool cpu_quota(int& value);
bool cpu_period(int& value);
bool cpu_shares(int& value);
jlong cpu_usage_in_micros();
bool cpu_usage_in_micros(uint64_t& value);
jlong memory_usage_in_bytes();
jlong memory_and_swap_limit_in_bytes(julong upper_mem_bound, julong upper_swap_bound);
jlong memory_and_swap_usage_in_bytes(julong upper_mem_bound, julong upper_swap_bound);
jlong memory_soft_limit_in_bytes(julong upper_bound);
jlong memory_throttle_limit_in_bytes();
jlong memory_max_usage_in_bytes();
jlong rss_usage_in_bytes();
jlong cache_usage_in_bytes();
void print_version_specific_info(outputStream* st, julong upper_mem_bound);
bool memory_usage_in_bytes(physical_memory_size_type& value);
bool memory_and_swap_limit_in_bytes(physical_memory_size_type upper_mem_bound,
physical_memory_size_type upper_swap_bound,
physical_memory_size_type& value);
bool memory_and_swap_usage_in_bytes(physical_memory_size_type upper_mem_bound,
physical_memory_size_type upper_swap_bound,
physical_memory_size_type& value);
bool memory_soft_limit_in_bytes(physical_memory_size_type upper_bound,
physical_memory_size_type& value);
bool memory_throttle_limit_in_bytes(physical_memory_size_type& value);
bool memory_max_usage_in_bytes(physical_memory_size_type& value);
bool rss_usage_in_bytes(physical_memory_size_type& value);
bool cache_usage_in_bytes(physical_memory_size_type& value);
void print_version_specific_info(outputStream* st, physical_memory_size_type upper_mem_bound);
};
// Utility class for storing info retrieved from /proc/cgroups,

View File

@ -25,13 +25,19 @@
#include "cgroupUtil_linux.hpp"
#include "os_linux.hpp"
int CgroupUtil::processor_count(CgroupCpuController* cpu_ctrl, int host_cpus) {
assert(host_cpus > 0, "physical host cpus must be positive");
int limit_count = host_cpus;
int quota = cpu_ctrl->cpu_quota();
int period = cpu_ctrl->cpu_period();
bool CgroupUtil::processor_count(CgroupCpuController* cpu_ctrl, int upper_bound, int& value) {
assert(upper_bound > 0, "upper bound of cpus must be positive");
int limit_count = upper_bound;
int quota = -1;
int period = -1;
if (!cpu_ctrl->cpu_quota(quota)) {
return false;
}
if (!cpu_ctrl->cpu_period(period)) {
return false;
}
int quota_count = 0;
int result = 0;
int result = upper_bound;
if (quota > -1 && period > 0) {
quota_count = ceilf((float)quota / (float)period);
@ -43,16 +49,50 @@ int CgroupUtil::processor_count(CgroupCpuController* cpu_ctrl, int host_cpus) {
limit_count = quota_count;
}
result = MIN2(host_cpus, limit_count);
result = MIN2(upper_bound, limit_count);
log_trace(os, container)("OSContainer::active_processor_count: %d", result);
return result;
value = result;
return true;
}
// Get an updated memory limit. The return value is strictly less than or equal to the
// passed in 'lowest' value.
physical_memory_size_type CgroupUtil::get_updated_mem_limit(CgroupMemoryController* mem,
physical_memory_size_type lowest,
physical_memory_size_type upper_bound) {
assert(lowest <= upper_bound, "invariant");
physical_memory_size_type current_limit = value_unlimited;
if (mem->read_memory_limit_in_bytes(upper_bound, current_limit) && current_limit != value_unlimited) {
assert(current_limit <= upper_bound, "invariant");
if (lowest > current_limit) {
return current_limit;
}
}
return lowest;
}
// Get an updated cpu limit. The return value is strictly less than or equal to the
// passed in 'lowest' value.
int CgroupUtil::get_updated_cpu_limit(CgroupCpuController* cpu,
int lowest,
int upper_bound) {
assert(lowest > 0 && lowest <= upper_bound, "invariant");
int cpu_limit_val = -1;
if (CgroupUtil::processor_count(cpu, upper_bound, cpu_limit_val) && cpu_limit_val != upper_bound) {
assert(cpu_limit_val <= upper_bound, "invariant");
if (lowest > cpu_limit_val) {
return cpu_limit_val;
}
}
return lowest;
}
void CgroupUtil::adjust_controller(CgroupMemoryController* mem) {
assert(mem->cgroup_path() != nullptr, "invariant");
if (strstr(mem->cgroup_path(), "../") != nullptr) {
log_warning(os, container)("Cgroup memory controller path at '%s' seems to have moved to '%s', detected limits won't be accurate",
mem->mount_point(), mem->cgroup_path());
log_warning(os, container)("Cgroup memory controller path at '%s' seems to have moved "
"to '%s'. Detected limits won't be accurate",
mem->mount_point(), mem->cgroup_path());
mem->set_subsystem_path("/");
return;
}
@ -65,17 +105,18 @@ void CgroupUtil::adjust_controller(CgroupMemoryController* mem) {
char* cg_path = os::strdup(orig);
char* last_slash;
assert(cg_path[0] == '/', "cgroup path must start with '/'");
julong phys_mem = static_cast<julong>(os::Linux::physical_memory());
physical_memory_size_type phys_mem = os::Linux::physical_memory();
char* limit_cg_path = nullptr;
jlong limit = mem->read_memory_limit_in_bytes(phys_mem);
jlong lowest_limit = limit < 0 ? phys_mem : limit;
julong orig_limit = ((julong)lowest_limit) != phys_mem ? lowest_limit : phys_mem;
physical_memory_size_type limit = value_unlimited;
physical_memory_size_type lowest_limit = phys_mem;
lowest_limit = get_updated_mem_limit(mem, lowest_limit, phys_mem);
physical_memory_size_type orig_limit = lowest_limit != phys_mem ? lowest_limit : phys_mem;
while ((last_slash = strrchr(cg_path, '/')) != cg_path) {
*last_slash = '\0'; // strip path
// update to shortened path and try again
mem->set_subsystem_path(cg_path);
limit = mem->read_memory_limit_in_bytes(phys_mem);
if (limit >= 0 && limit < lowest_limit) {
limit = get_updated_mem_limit(mem, lowest_limit, phys_mem);
if (limit < lowest_limit) {
lowest_limit = limit;
os::free(limit_cg_path); // handles nullptr
limit_cg_path = os::strdup(cg_path);
@ -83,24 +124,24 @@ void CgroupUtil::adjust_controller(CgroupMemoryController* mem) {
}
// need to check limit at mount point
mem->set_subsystem_path("/");
limit = mem->read_memory_limit_in_bytes(phys_mem);
if (limit >= 0 && limit < lowest_limit) {
limit = get_updated_mem_limit(mem, lowest_limit, phys_mem);
if (limit < lowest_limit) {
lowest_limit = limit;
os::free(limit_cg_path); // handles nullptr
limit_cg_path = os::strdup("/");
}
assert(lowest_limit >= 0, "limit must be positive");
if ((julong)lowest_limit != orig_limit) {
assert(lowest_limit <= phys_mem, "limit must not exceed host memory");
if (lowest_limit != orig_limit) {
// we've found a lower limit anywhere in the hierarchy,
// set the path to the limit path
assert(limit_cg_path != nullptr, "limit path must be set");
mem->set_subsystem_path(limit_cg_path);
log_trace(os, container)("Adjusted controller path for memory to: %s. "
"Lowest limit was: " JLONG_FORMAT,
"Lowest limit was: " PHYS_MEM_TYPE_FORMAT,
mem->subsystem_path(),
lowest_limit);
} else {
log_trace(os, container)("Lowest limit was: " JLONG_FORMAT, lowest_limit);
log_trace(os, container)("Lowest limit was: " PHYS_MEM_TYPE_FORMAT, lowest_limit);
log_trace(os, container)("No lower limit found for memory in hierarchy %s, "
"adjusting to original path %s",
mem->mount_point(), orig);
@ -114,8 +155,9 @@ void CgroupUtil::adjust_controller(CgroupMemoryController* mem) {
void CgroupUtil::adjust_controller(CgroupCpuController* cpu) {
assert(cpu->cgroup_path() != nullptr, "invariant");
if (strstr(cpu->cgroup_path(), "../") != nullptr) {
log_warning(os, container)("Cgroup cpu controller path at '%s' seems to have moved to '%s', detected limits won't be accurate",
cpu->mount_point(), cpu->cgroup_path());
log_warning(os, container)("Cgroup cpu controller path at '%s' seems to have moved "
"to '%s'. Detected limits won't be accurate",
cpu->mount_point(), cpu->cgroup_path());
cpu->set_subsystem_path("/");
return;
}
@ -129,15 +171,15 @@ void CgroupUtil::adjust_controller(CgroupCpuController* cpu) {
char* last_slash;
assert(cg_path[0] == '/', "cgroup path must start with '/'");
int host_cpus = os::Linux::active_processor_count();
int cpus = CgroupUtil::processor_count(cpu, host_cpus);
int lowest_limit = cpus < host_cpus ? cpus: host_cpus;
int lowest_limit = host_cpus;
int cpus = get_updated_cpu_limit(cpu, lowest_limit, host_cpus);
int orig_limit = lowest_limit != host_cpus ? lowest_limit : host_cpus;
char* limit_cg_path = nullptr;
while ((last_slash = strrchr(cg_path, '/')) != cg_path) {
*last_slash = '\0'; // strip path
// update to shortened path and try again
cpu->set_subsystem_path(cg_path);
cpus = CgroupUtil::processor_count(cpu, host_cpus);
cpus = get_updated_cpu_limit(cpu, lowest_limit, host_cpus);
if (cpus != host_cpus && cpus < lowest_limit) {
lowest_limit = cpus;
os::free(limit_cg_path); // handles nullptr
@ -146,7 +188,7 @@ void CgroupUtil::adjust_controller(CgroupCpuController* cpu) {
}
// need to check limit at mount point
cpu->set_subsystem_path("/");
cpus = CgroupUtil::processor_count(cpu, host_cpus);
cpus = get_updated_cpu_limit(cpu, lowest_limit, host_cpus);
if (cpus != host_cpus && cpus < lowest_limit) {
lowest_limit = cpus;
os::free(limit_cg_path); // handles nullptr
@ -160,8 +202,7 @@ void CgroupUtil::adjust_controller(CgroupCpuController* cpu) {
cpu->set_subsystem_path(limit_cg_path);
log_trace(os, container)("Adjusted controller path for cpu to: %s. "
"Lowest limit was: %d",
cpu->subsystem_path(),
lowest_limit);
cpu->subsystem_path(), lowest_limit);
} else {
log_trace(os, container)("Lowest limit was: %d", lowest_limit);
log_trace(os, container)("No lower limit found for cpu in hierarchy %s, "

View File

@ -31,13 +31,20 @@
class CgroupUtil: AllStatic {
public:
static int processor_count(CgroupCpuController* cpu, int host_cpus);
static bool processor_count(CgroupCpuController* cpu, int upper_bound, int& value);
// Given a memory controller, adjust its path to a point in the hierarchy
// that represents the closest memory limit.
static void adjust_controller(CgroupMemoryController* m);
// Given a cpu controller, adjust its path to a point in the hierarchy
// that represents the closest cpu limit.
static void adjust_controller(CgroupCpuController* c);
private:
static physical_memory_size_type get_updated_mem_limit(CgroupMemoryController* m,
physical_memory_size_type lowest,
physical_memory_size_type upper_bound);
static int get_updated_cpu_limit(CgroupCpuController* c,
int lowest,
int upper_bound);
};
#endif // CGROUP_UTIL_LINUX_HPP

View File

@ -124,10 +124,13 @@ void CgroupV1Controller::set_subsystem_path(const char* cgroup_path) {
}
}
jlong CgroupV1MemoryController::uses_mem_hierarchy() {
julong use_hierarchy;
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.use_hierarchy", "Use Hierarchy", use_hierarchy);
return (jlong)use_hierarchy;
bool CgroupV1MemoryController::read_use_hierarchy_val(physical_memory_size_type& result) {
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.use_hierarchy", "Use Hierarchy", result);
}
bool CgroupV1MemoryController::uses_mem_hierarchy() {
physical_memory_size_type use_hierarchy = 0;
return read_use_hierarchy_val(use_hierarchy) && use_hierarchy > 0;
}
/*
@ -141,125 +144,177 @@ bool CgroupV1Controller::needs_hierarchy_adjustment() {
return strcmp(_root, _cgroup_path) != 0;
}
static inline
void verbose_log(julong read_mem_limit, julong upper_mem_bound) {
if (log_is_enabled(Debug, os, container)) {
jlong mem_limit = (jlong)read_mem_limit; // account for negative values
if (mem_limit < 0 || read_mem_limit >= upper_mem_bound) {
const char *reason;
if (mem_limit == OSCONTAINER_ERROR) {
reason = "failed";
} else if (mem_limit == -1) {
reason = "unlimited";
} else {
assert(read_mem_limit >= upper_mem_bound, "Expected read value exceeding upper memory bound");
// Exceeding physical memory is treated as unlimited. This implementation
// caps it at host_mem since Cg v1 has no value to represent 'max'.
reason = "ignored";
}
log_debug(os, container)("container memory limit %s: " JLONG_FORMAT ", upper bound is " JLONG_FORMAT,
reason, mem_limit, upper_mem_bound);
bool CgroupV1MemoryController::read_memory_limit_val(physical_memory_size_type& result) {
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.limit_in_bytes", "Memory Limit", result);
}
bool CgroupV1MemoryController::read_hierarchical_memory_limit_val(physical_memory_size_type& result) {
CONTAINER_READ_NUMERICAL_KEY_VALUE_CHECKED(reader(), "/memory.stat",
"hierarchical_memory_limit", "Hierarchical Memory Limit",
result);
}
bool CgroupV1MemoryController::read_memory_limit_in_bytes(physical_memory_size_type upper_bound,
physical_memory_size_type& result) {
physical_memory_size_type memlimit = 0;
if (!read_memory_limit_val(memlimit)) {
log_trace(os, container)("container memory limit failed, upper bound is " PHYS_MEM_TYPE_FORMAT, upper_bound);
return false;
}
if (memlimit >= upper_bound) {
physical_memory_size_type hierlimit = 0;
if (uses_mem_hierarchy() && read_hierarchical_memory_limit_val(hierlimit) &&
hierlimit < upper_bound) {
log_trace(os, container)("Memory Limit is: " PHYS_MEM_TYPE_FORMAT, hierlimit);
result = hierlimit;
} else {
// Exceeding physical memory is treated as unlimited. This implementation
// caps it at host_mem since Cg v1 has no value to represent 'max'.
log_trace(os, container)("container memory limit ignored: " PHYS_MEM_TYPE_FORMAT
", upper bound is " PHYS_MEM_TYPE_FORMAT, memlimit, upper_bound);
result = value_unlimited;
}
} else {
result = memlimit;
}
return true;
}
jlong CgroupV1MemoryController::read_memory_limit_in_bytes(julong upper_bound) {
julong memlimit;
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.limit_in_bytes", "Memory Limit", memlimit);
if (memlimit >= upper_bound && uses_mem_hierarchy()) {
CONTAINER_READ_NUMERICAL_KEY_VALUE_CHECKED(reader(), "/memory.stat",
"hierarchical_memory_limit", "Hierarchical Memory Limit",
memlimit);
}
verbose_log(memlimit, upper_bound);
return (jlong)((memlimit < upper_bound) ? memlimit : -1);
bool CgroupV1MemoryController::read_mem_swap(physical_memory_size_type& result) {
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.memsw.limit_in_bytes", "Memory and Swap Limit", result);
}
/* read_mem_swap
bool CgroupV1MemoryController::read_hierarchical_mem_swap_val(physical_memory_size_type& result) {
CONTAINER_READ_NUMERICAL_KEY_VALUE_CHECKED(reader(), "/memory.stat",
"hierarchical_memsw_limit", "Hierarchical Memory and Swap Limit",
result);
}
/* memory_and_swap_limit_in_bytes
*
* Determine the memory and swap limit metric. Returns a positive limit value strictly
* lower than the physical memory and swap limit iff there is a limit. Otherwise a
* negative value is returned indicating the determined status.
* Determine the memory and swap limit metric. Sets the 'result' reference to a positive limit value or
* 'value_unlimited' (for unlimited).
*
* returns:
* * A number > 0 if the limit is available and lower than a physical upper bound.
* * OSCONTAINER_ERROR if the limit cannot be retrieved (i.e. not supported) or
* * -1 if there isn't any limit in place (note: includes values which exceed a physical
* upper bound)
* * false if an error occurred. 'result' reference remains unchanged.
* * true if the limit value has been set in the 'result' reference
*/
jlong CgroupV1MemoryController::read_mem_swap(julong upper_memsw_bound) {
julong memswlimit;
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.memsw.limit_in_bytes", "Memory and Swap Limit", memswlimit);
if (memswlimit >= upper_memsw_bound && uses_mem_hierarchy()) {
CONTAINER_READ_NUMERICAL_KEY_VALUE_CHECKED(reader(), "/memory.stat",
"hierarchical_memsw_limit", "Hierarchical Memory and Swap Limit",
memswlimit);
bool CgroupV1MemoryController::memory_and_swap_limit_in_bytes(physical_memory_size_type upper_mem_bound,
physical_memory_size_type upper_swap_bound,
physical_memory_size_type& result) {
physical_memory_size_type total_mem_swap = upper_mem_bound + upper_swap_bound;
physical_memory_size_type memory_swap = 0;
bool mem_swap_read_failed = false;
if (!read_mem_swap(memory_swap)) {
mem_swap_read_failed = true;
}
if (memory_swap >= total_mem_swap) {
physical_memory_size_type hiermswlimit = 0;
if (uses_mem_hierarchy() && read_hierarchical_mem_swap_val(hiermswlimit) &&
hiermswlimit < total_mem_swap) {
log_trace(os, container)("Memory and Swap Limit is: " PHYS_MEM_TYPE_FORMAT, hiermswlimit);
memory_swap = hiermswlimit;
} else {
memory_swap = value_unlimited;
}
}
if (memory_swap == value_unlimited) {
log_trace(os, container)("Memory and Swap Limit is: Unlimited");
result = value_unlimited;
return true;
}
verbose_log(memswlimit, upper_memsw_bound);
return (jlong)((memswlimit < upper_memsw_bound) ? memswlimit : -1);
}
jlong CgroupV1MemoryController::memory_and_swap_limit_in_bytes(julong upper_mem_bound, julong upper_swap_bound) {
jlong memory_swap = read_mem_swap(upper_mem_bound + upper_swap_bound);
if (memory_swap == -1) {
return memory_swap;
}
// If there is a swap limit, but swappiness == 0, reset the limit
// to the memory limit. Do the same for cases where swap isn't
// supported.
jlong swappiness = read_mem_swappiness();
if (swappiness == 0 || memory_swap == OSCONTAINER_ERROR) {
jlong memlimit = read_memory_limit_in_bytes(upper_mem_bound);
if (memory_swap == OSCONTAINER_ERROR) {
log_trace(os, container)("Memory and Swap Limit has been reset to " JLONG_FORMAT " because swap is not supported", memlimit);
} else {
log_trace(os, container)("Memory and Swap Limit has been reset to " JLONG_FORMAT " because swappiness is 0", memlimit);
}
return memlimit;
physical_memory_size_type swappiness = 0;
if (!read_mem_swappiness(swappiness)) {
// assume no swap
mem_swap_read_failed = true;
}
return memory_swap;
if (swappiness == 0 || mem_swap_read_failed) {
physical_memory_size_type memlimit = value_unlimited;
if (!read_memory_limit_in_bytes(upper_mem_bound, memlimit)) {
return false;
}
if (memlimit == value_unlimited) {
result = value_unlimited; // No memory limit, thus no swap limit
return true;
}
if (mem_swap_read_failed) {
log_trace(os, container)("Memory and Swap Limit has been reset to " PHYS_MEM_TYPE_FORMAT
" because swap is not supported", memlimit);
} else {
log_trace(os, container)("Memory and Swap Limit has been reset to " PHYS_MEM_TYPE_FORMAT
" because swappiness is 0", memlimit);
}
result = memlimit;
return true;
}
result = memory_swap;
return true;
}
static inline
jlong memory_swap_usage_impl(CgroupController* ctrl) {
julong memory_swap_usage;
CONTAINER_READ_NUMBER_CHECKED(ctrl, "/memory.memsw.usage_in_bytes", "mem swap usage", memory_swap_usage);
return (jlong)memory_swap_usage;
bool memory_swap_usage_impl(CgroupController* ctrl, physical_memory_size_type& result) {
CONTAINER_READ_NUMBER_CHECKED(ctrl, "/memory.memsw.usage_in_bytes", "mem swap usage", result);
}
jlong CgroupV1MemoryController::memory_and_swap_usage_in_bytes(julong upper_mem_bound, julong upper_swap_bound) {
jlong memory_sw_limit = memory_and_swap_limit_in_bytes(upper_mem_bound, upper_swap_bound);
jlong memory_limit = read_memory_limit_in_bytes(upper_mem_bound);
if (memory_sw_limit > 0 && memory_limit > 0) {
jlong delta_swap = memory_sw_limit - memory_limit;
if (delta_swap > 0) {
return memory_swap_usage_impl(reader());
bool CgroupV1MemoryController::memory_and_swap_usage_in_bytes(physical_memory_size_type upper_mem_bound,
physical_memory_size_type upper_swap_bound,
physical_memory_size_type& result) {
physical_memory_size_type memory_sw_limit = value_unlimited;
if (!memory_and_swap_limit_in_bytes(upper_mem_bound, upper_swap_bound, memory_sw_limit)) {
return false;
}
physical_memory_size_type mem_limit_val = value_unlimited;
physical_memory_size_type memory_limit = value_unlimited;
if (read_memory_limit_in_bytes(upper_mem_bound, mem_limit_val)) {
if (mem_limit_val != value_unlimited) {
memory_limit = mem_limit_val;
}
}
return memory_usage_in_bytes();
}
jlong CgroupV1MemoryController::read_mem_swappiness() {
julong swappiness;
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.swappiness", "Swappiness", swappiness);
return (jlong)swappiness;
}
jlong CgroupV1MemoryController::memory_soft_limit_in_bytes(julong upper_bound) {
julong memsoftlimit;
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.soft_limit_in_bytes", "Memory Soft Limit", memsoftlimit);
if (memsoftlimit >= upper_bound) {
log_trace(os, container)("Memory Soft Limit is: Unlimited");
return (jlong)-1;
} else {
return (jlong)memsoftlimit;
if (memory_sw_limit != value_unlimited && memory_limit != value_unlimited) {
if (memory_limit < memory_sw_limit) {
// swap allowed and > 0
physical_memory_size_type swap_usage = 0;
if (!memory_swap_usage_impl(reader(), swap_usage)) {
return false;
}
result = swap_usage;
return true;
}
}
return memory_usage_in_bytes(result);
}
jlong CgroupV1MemoryController::memory_throttle_limit_in_bytes() {
bool CgroupV1MemoryController::read_mem_swappiness(physical_memory_size_type& result) {
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.swappiness", "Swappiness", result);
}
bool CgroupV1MemoryController::memory_soft_limit_val(physical_memory_size_type& result) {
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.soft_limit_in_bytes", "Memory Soft Limit", result);
}
bool CgroupV1MemoryController::memory_soft_limit_in_bytes(physical_memory_size_type upper_bound,
physical_memory_size_type& result) {
physical_memory_size_type mem_soft_limit = 0;
if (!memory_soft_limit_val(mem_soft_limit)) {
return false;
}
if (mem_soft_limit >= upper_bound) {
log_trace(os, container)("Memory Soft Limit is: Unlimited");
result = value_unlimited;
} else {
result = mem_soft_limit;
}
return true;
}
bool CgroupV1MemoryController::memory_throttle_limit_in_bytes(physical_memory_size_type& result) {
// Log this string at trace level so as to make tests happy.
log_trace(os, container)("Memory Throttle Limit is not supported.");
return OSCONTAINER_ERROR; // not supported
return false;
}
// Constructor
@ -288,80 +343,129 @@ bool CgroupV1Subsystem::is_containerized() {
_cpuset->is_read_only();
}
/* memory_usage_in_bytes
bool CgroupV1MemoryController::memory_usage_in_bytes(physical_memory_size_type& result) {
physical_memory_size_type memory_usage = 0;
if (!memory_usage_val(memory_usage)) {
return false;
}
result = memory_usage;
return true;
}
/* memory_usage_val
*
* Return the amount of used memory for this process.
* Read the amount of used memory for this process into the passed in reference 'result'
*
* return:
* memory usage in bytes or
* -1 for unlimited
* OSCONTAINER_ERROR for not supported
* true when reading of the file was successful and 'result' was set appropriately
* false when reading of the file failed
*/
jlong CgroupV1MemoryController::memory_usage_in_bytes() {
julong memusage;
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.usage_in_bytes", "Memory Usage", memusage);
return (jlong)memusage;
bool CgroupV1MemoryController::memory_usage_val(physical_memory_size_type& result) {
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.usage_in_bytes", "Memory Usage", result);
}
bool CgroupV1MemoryController::memory_max_usage_val(physical_memory_size_type& result) {
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.max_usage_in_bytes", "Maximum Memory Usage", result);
}
/* memory_max_usage_in_bytes
*
* Return the maximum amount of used memory for this process.
* Return the maximum amount of used memory for this process in the
* result reference.
*
* return:
* max memory usage in bytes or
* OSCONTAINER_ERROR for not supported
* true if the result reference has been set
* false otherwise (e.g. on error)
*/
jlong CgroupV1MemoryController::memory_max_usage_in_bytes() {
julong memmaxusage;
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.max_usage_in_bytes", "Maximum Memory Usage", memmaxusage);
return (jlong)memmaxusage;
}
jlong CgroupV1MemoryController::rss_usage_in_bytes() {
julong rss;
bool is_ok = reader()->read_numerical_key_value("/memory.stat", "rss", &rss);
if (!is_ok) {
return OSCONTAINER_ERROR;
bool CgroupV1MemoryController::memory_max_usage_in_bytes(physical_memory_size_type& result) {
physical_memory_size_type memory_max_usage = 0;
if (!memory_max_usage_val(memory_max_usage)) {
return false;
}
log_trace(os, container)("RSS usage is: " JULONG_FORMAT, rss);
return (jlong)rss;
result = memory_max_usage;
return true;
}
jlong CgroupV1MemoryController::cache_usage_in_bytes() {
julong cache;
bool is_ok = reader()->read_numerical_key_value("/memory.stat", "cache", &cache);
if (!is_ok) {
return OSCONTAINER_ERROR;
bool CgroupV1MemoryController::rss_usage_in_bytes(physical_memory_size_type& result) {
physical_memory_size_type rss = 0;
if (!reader()->read_numerical_key_value("/memory.stat", "rss", rss)) {
return false;
}
log_trace(os, container)("Cache usage is: " JULONG_FORMAT, cache);
return cache;
log_trace(os, container)("RSS usage is: " PHYS_MEM_TYPE_FORMAT, rss);
result = rss;
return true;
}
jlong CgroupV1MemoryController::kernel_memory_usage_in_bytes() {
julong kmem_usage;
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.kmem.usage_in_bytes", "Kernel Memory Usage", kmem_usage);
return (jlong)kmem_usage;
bool CgroupV1MemoryController::cache_usage_in_bytes(physical_memory_size_type& result) {
physical_memory_size_type cache = 0;
if (!reader()->read_numerical_key_value("/memory.stat", "cache", cache)) {
return false;
}
log_trace(os, container)("Cache usage is: " PHYS_MEM_TYPE_FORMAT, cache);
result = cache;
return true;
}
jlong CgroupV1MemoryController::kernel_memory_limit_in_bytes(julong upper_bound) {
julong kmem_limit;
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.kmem.limit_in_bytes", "Kernel Memory Limit", kmem_limit);
bool CgroupV1MemoryController::kernel_memory_usage_val(physical_memory_size_type& result) {
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.kmem.usage_in_bytes", "Kernel Memory Usage", result);
}
bool CgroupV1MemoryController::kernel_memory_usage_in_bytes(physical_memory_size_type& result) {
physical_memory_size_type kmem_usage = 0;
if (!kernel_memory_usage_val(kmem_usage)) {
return false;
}
result = kmem_usage;
return true;
}
bool CgroupV1MemoryController::kernel_memory_limit_val(physical_memory_size_type& result) {
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.kmem.limit_in_bytes", "Kernel Memory Limit", result);
}
bool CgroupV1MemoryController::kernel_memory_limit_in_bytes(physical_memory_size_type upper_bound,
physical_memory_size_type& result) {
physical_memory_size_type kmem_limit = 0;
if (!kernel_memory_limit_val(kmem_limit)) {
return false;
}
if (kmem_limit >= upper_bound) {
return (jlong)-1;
kmem_limit = value_unlimited;
}
return (jlong)kmem_limit;
result = kmem_limit;
return true;
}
jlong CgroupV1MemoryController::kernel_memory_max_usage_in_bytes() {
julong kmem_max_usage;
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.kmem.max_usage_in_bytes", "Maximum Kernel Memory Usage", kmem_max_usage);
return (jlong)kmem_max_usage;
bool CgroupV1MemoryController::kernel_memory_max_usage_val(physical_memory_size_type& result) {
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.kmem.max_usage_in_bytes", "Maximum Kernel Memory Usage", result);
}
void CgroupV1MemoryController::print_version_specific_info(outputStream* st, julong mem_bound) {
jlong kmem_usage = kernel_memory_usage_in_bytes();
jlong kmem_limit = kernel_memory_limit_in_bytes(mem_bound);
jlong kmem_max_usage = kernel_memory_max_usage_in_bytes();
bool CgroupV1MemoryController::kernel_memory_max_usage_in_bytes(physical_memory_size_type& result) {
physical_memory_size_type kmem_max_usage = 0;
if (!kernel_memory_max_usage_val(kmem_max_usage)) {
return false;
}
result = kmem_max_usage;
return true;
}
void CgroupV1MemoryController::print_version_specific_info(outputStream* st, physical_memory_size_type mem_bound) {
MetricResult kmem_usage;
physical_memory_size_type temp = 0;
if (kernel_memory_usage_in_bytes(temp)) {
kmem_usage.set_value(temp);
}
MetricResult kmem_limit;
temp = value_unlimited;
if (kernel_memory_limit_in_bytes(mem_bound, temp)) {
kmem_limit.set_value(temp);
}
MetricResult kmem_max_usage;
temp = 0;
if (kernel_memory_max_usage_in_bytes(temp)) {
kmem_max_usage.set_value(temp);
}
OSContainer::print_container_helper(st, kmem_limit, "kernel_memory_limit_in_bytes");
OSContainer::print_container_helper(st, kmem_usage, "kernel_memory_usage_in_bytes");
@ -383,74 +487,114 @@ char* CgroupV1Subsystem::cpu_cpuset_memory_nodes() {
/* cpu_quota
*
* Return the number of microseconds per period
* process is guaranteed to run.
* a process is guaranteed to run in the provided
* result reference.
*
* return:
* quota time in microseconds
* -1 for no quota
* OSCONTAINER_ERROR for not supported
* true if the value was set in the result reference
* false on failure to read the number from the file
* and the result reference has not been touched.
*/
int CgroupV1CpuController::cpu_quota() {
julong quota;
bool is_ok = reader()->read_number("/cpu.cfs_quota_us", &quota);
if (!is_ok) {
log_trace(os, container)("CPU Quota failed: %d", OSCONTAINER_ERROR);
return OSCONTAINER_ERROR;
bool CgroupV1CpuController::cpu_quota(int& result) {
uint64_t quota = 0;
// intentionally not using the macro so as to not log a
// negative value as a large unsiged int
if (!reader()->read_number("/cpu.cfs_quota_us", quota)) {
log_trace(os, container)("CPU Quota failed");
return false;
}
// cast to int since the read value might be negative
// and we want to avoid logging -1 as a large unsigned value.
int quota_int = (int)quota;
int quota_int = static_cast<int>(quota);
log_trace(os, container)("CPU Quota is: %d", quota_int);
return quota_int;
result = quota_int;
return true;
}
int CgroupV1CpuController::cpu_period() {
julong period;
CONTAINER_READ_NUMBER_CHECKED(reader(), "/cpu.cfs_period_us", "CPU Period", period);
return (int)period;
bool CgroupV1CpuController::cpu_period_val(uint64_t& result) {
CONTAINER_READ_NUMBER_CHECKED(reader(), "/cpu.cfs_period_us", "CPU Period", result);
}
bool CgroupV1CpuController::cpu_period(int& result) {
uint64_t period = value_unlimited;
if (!cpu_period_val(period)) {
return false;
}
result = static_cast<int>(period);
return true;
}
bool CgroupV1CpuController::cpu_shares_val(uint64_t& result) {
CONTAINER_READ_NUMBER_CHECKED(reader(), "/cpu.shares", "CPU Shares", result);
}
/* cpu_shares
*
* Return the amount of cpu shares available to the process
* - Share number (typically a number relative to 1024)
* - (2048 typically expresses 2 CPUs worth of processing)
*
* return:
* Share number (typically a number relative to 1024)
* (2048 typically expresses 2 CPUs worth of processing)
* -1 for no share setup
* OSCONTAINER_ERROR for not supported
* false on error
* true if the result has been set in the result reference
*/
int CgroupV1CpuController::cpu_shares() {
julong shares;
CONTAINER_READ_NUMBER_CHECKED(reader(), "/cpu.shares", "CPU Shares", shares);
int shares_int = (int)shares;
// Convert 1024 to no shares setup
if (shares_int == 1024) return -1;
bool CgroupV1CpuController::cpu_shares(int& result) {
uint64_t shares = 0;
if (!cpu_shares_val(shares)) {
return false;
}
int shares_int = static_cast<int>(shares);
// Convert 1024 to no shares setup (-1)
if (shares_int == 1024) {
shares_int = -1;
}
return shares_int;
result = shares_int;
return true;
}
jlong CgroupV1CpuacctController::cpu_usage_in_micros() {
julong cpu_usage;
CONTAINER_READ_NUMBER_CHECKED(reader(), "/cpuacct.usage", "CPU Usage", cpu_usage);
bool CgroupV1CpuacctController::cpu_usage_in_micros_val(uint64_t& result) {
CONTAINER_READ_NUMBER_CHECKED(reader(), "/cpuacct.usage", "CPU Usage", result);
}
bool CgroupV1CpuacctController::cpu_usage_in_micros(uint64_t& result) {
uint64_t cpu_usage = 0;
if (!cpu_usage_in_micros_val(cpu_usage)) {
return false;
}
// Output is in nanoseconds, convert to microseconds.
return (jlong)cpu_usage / 1000;
result = static_cast<uint64_t>(cpu_usage / 1000);
return true;
}
static
bool pids_max_val(CgroupController* ctrl, uint64_t& result) {
CONTAINER_READ_NUMBER_CHECKED_MAX(ctrl, "/pids.max", "Maximum number of tasks", result);
}
/* pids_max
*
* Return the maximum number of tasks available to the process
* in the passed result reference (might be value_unlimited).
*
* return:
* maximum number of tasks
* -1 for unlimited
* OSCONTAINER_ERROR for not supported
* false on error
* true when the result reference has been appropriately set
*/
jlong CgroupV1Subsystem::pids_max() {
if (_pids == nullptr) return OSCONTAINER_ERROR;
jlong pids_max;
CONTAINER_READ_NUMBER_CHECKED_MAX(_pids, "/pids.max", "Maximum number of tasks", pids_max);
return pids_max;
bool CgroupV1Subsystem::pids_max(uint64_t& result) {
if (_pids == nullptr) return false;
uint64_t pids_val = 0;
if (!pids_max_val(_pids, pids_val)) {
return false;
}
result = pids_val;
return true;
}
static
bool pids_current_val(CgroupController* ctrl, uint64_t& result) {
CONTAINER_READ_NUMBER_CHECKED(ctrl, "/pids.current", "Current number of tasks", result);
}
/* pids_current
@ -458,12 +602,15 @@ jlong CgroupV1Subsystem::pids_max() {
* The number of tasks currently in the cgroup (and its descendants) of the process
*
* return:
* current number of tasks
* OSCONTAINER_ERROR for not supported
* true if the current number of tasks has been set in the result reference
* false if an error occurred
*/
jlong CgroupV1Subsystem::pids_current() {
if (_pids == nullptr) return OSCONTAINER_ERROR;
julong pids_current;
CONTAINER_READ_NUMBER_CHECKED(_pids, "/pids.current", "Current number of tasks", pids_current);
return (jlong)pids_current;
bool CgroupV1Subsystem::pids_current(uint64_t& result) {
if (_pids == nullptr) return false;
uint64_t pids_current = 0;
if (!pids_current_val(_pids, pids_current)) {
return false;
}
result = pids_current;
return true;
}

View File

@ -73,23 +73,44 @@ class CgroupV1MemoryController final : public CgroupMemoryController {
private:
CgroupV1Controller _reader;
CgroupV1Controller* reader() { return &_reader; }
bool read_memory_limit_val(physical_memory_size_type& result);
bool read_hierarchical_memory_limit_val(physical_memory_size_type& result);
bool read_hierarchical_mem_swap_val(physical_memory_size_type& result);
bool read_use_hierarchy_val(physical_memory_size_type& result);
bool memory_usage_val(physical_memory_size_type& result);
bool read_mem_swappiness(physical_memory_size_type& result);
bool read_mem_swap(physical_memory_size_type& result);
bool memory_soft_limit_val(physical_memory_size_type& result);
bool memory_max_usage_val(physical_memory_size_type& result);
bool kernel_memory_usage_val(physical_memory_size_type& result);
bool kernel_memory_limit_val(physical_memory_size_type& result);
bool kernel_memory_max_usage_val(physical_memory_size_type& result);
bool uses_mem_hierarchy();
public:
void set_subsystem_path(const char *cgroup_path) override {
reader()->set_subsystem_path(cgroup_path);
}
jlong read_memory_limit_in_bytes(julong upper_bound) override;
jlong memory_usage_in_bytes() override;
jlong memory_and_swap_limit_in_bytes(julong upper_mem_bound, julong upper_swap_bound) override;
jlong memory_and_swap_usage_in_bytes(julong upper_mem_bound, julong upper_swap_bound) override;
jlong memory_soft_limit_in_bytes(julong upper_bound) override;
jlong memory_throttle_limit_in_bytes() override;
jlong memory_max_usage_in_bytes() override;
jlong rss_usage_in_bytes() override;
jlong cache_usage_in_bytes() override;
jlong kernel_memory_usage_in_bytes();
jlong kernel_memory_limit_in_bytes(julong upper_bound);
jlong kernel_memory_max_usage_in_bytes();
void print_version_specific_info(outputStream* st, julong upper_mem_bound) override;
bool read_memory_limit_in_bytes(physical_memory_size_type upper_bound,
physical_memory_size_type& value) override;
bool memory_usage_in_bytes(physical_memory_size_type& result) override;
bool memory_and_swap_limit_in_bytes(physical_memory_size_type upper_mem_bound,
physical_memory_size_type upper_swap_bound,
physical_memory_size_type& result) override;
bool memory_and_swap_usage_in_bytes(physical_memory_size_type upper_mem_bound,
physical_memory_size_type upper_swap_bound,
physical_memory_size_type& result) override;
bool memory_soft_limit_in_bytes(physical_memory_size_type upper_bound,
physical_memory_size_type& result) override;
bool memory_throttle_limit_in_bytes(physical_memory_size_type& result) override;
bool memory_max_usage_in_bytes(physical_memory_size_type& result) override;
bool rss_usage_in_bytes(physical_memory_size_type& result) override;
bool cache_usage_in_bytes(physical_memory_size_type& result) override;
bool kernel_memory_usage_in_bytes(physical_memory_size_type& result);
bool kernel_memory_limit_in_bytes(physical_memory_size_type upper_bound,
physical_memory_size_type& result);
bool kernel_memory_max_usage_in_bytes(physical_memory_size_type& result);
void print_version_specific_info(outputStream* st, physical_memory_size_type upper_mem_bound) override;
bool needs_hierarchy_adjustment() override {
return reader()->needs_hierarchy_adjustment();
}
@ -99,10 +120,6 @@ class CgroupV1MemoryController final : public CgroupMemoryController {
const char* subsystem_path() override { return reader()->subsystem_path(); }
const char* mount_point() override { return reader()->mount_point(); }
const char* cgroup_path() override { return reader()->cgroup_path(); }
private:
jlong uses_mem_hierarchy();
jlong read_mem_swappiness();
jlong read_mem_swap(julong upper_memsw_bound);
public:
CgroupV1MemoryController(const CgroupV1Controller& reader)
@ -116,10 +133,12 @@ class CgroupV1CpuController final : public CgroupCpuController {
private:
CgroupV1Controller _reader;
CgroupV1Controller* reader() { return &_reader; }
bool cpu_period_val(uint64_t& result);
bool cpu_shares_val(uint64_t& result);
public:
int cpu_quota() override;
int cpu_period() override;
int cpu_shares() override;
bool cpu_quota(int& result) override;
bool cpu_period(int& result) override;
bool cpu_shares(int& result) override;
void set_subsystem_path(const char *cgroup_path) override {
reader()->set_subsystem_path(cgroup_path);
}
@ -147,8 +166,9 @@ class CgroupV1CpuacctController final : public CgroupCpuacctController {
private:
CgroupV1Controller _reader;
CgroupV1Controller* reader() { return &_reader; }
bool cpu_usage_in_micros_val(uint64_t& result);
public:
jlong cpu_usage_in_micros() override;
bool cpu_usage_in_micros(uint64_t& result) override;
void set_subsystem_path(const char *cgroup_path) override {
reader()->set_subsystem_path(cgroup_path);
}
@ -180,15 +200,15 @@ class CgroupV1Subsystem: public CgroupSubsystem {
CgroupV1Controller* pids,
CgroupV1MemoryController* memory);
jlong kernel_memory_usage_in_bytes();
jlong kernel_memory_limit_in_bytes();
jlong kernel_memory_max_usage_in_bytes();
bool kernel_memory_usage_in_bytes(physical_memory_size_type& result);
bool kernel_memory_limit_in_bytes(physical_memory_size_type& result);
bool kernel_memory_max_usage_in_bytes(physical_memory_size_type& result);
char * cpu_cpuset_cpus();
char * cpu_cpuset_memory_nodes();
char * cpu_cpuset_cpus() override;
char * cpu_cpuset_memory_nodes() override;
jlong pids_max();
jlong pids_current();
bool pids_max(uint64_t& result) override;
bool pids_current(uint64_t& result) override;
bool is_containerized();
const char * container_type() {

View File

@ -26,6 +26,8 @@
#include "cgroupUtil_linux.hpp"
#include "cgroupV2Subsystem_linux.hpp"
#include <math.h>
// Constructor
CgroupV2Controller::CgroupV2Controller(char* mount_path,
char *cgroup_path,
@ -42,43 +44,72 @@ CgroupV2Controller::CgroupV2Controller(const CgroupV2Controller& o) :
_mount_point = o._mount_point;
}
static
bool read_cpu_shares_value(CgroupV2Controller* ctrl, uint64_t& value) {
CONTAINER_READ_NUMBER_CHECKED(ctrl, "/cpu.weight", "Raw value for CPU Shares", value);
}
/* cpu_shares
*
* Return the amount of cpu shares available to the process
* Return the amount of cpu shares available to the process in the
* 'result' reference.
*
* return:
* Share number (typically a number relative to 1024)
* (2048 typically expresses 2 CPUs worth of processing)
* -1 for no share setup
* OSCONTAINER_ERROR for not supported
*
* return:
* true if the result reference got updated
* false if there was an error
*/
int CgroupV2CpuController::cpu_shares() {
julong shares;
CONTAINER_READ_NUMBER_CHECKED(reader(), "/cpu.weight", "Raw value for CPU Shares", shares);
int shares_int = (int)shares;
bool CgroupV2CpuController::cpu_shares(int& result) {
uint64_t shares = 0;
bool is_ok = read_cpu_shares_value(reader(), shares);
if (!is_ok) {
return false;
}
int shares_int = static_cast<int>(shares);
// Convert default value of 100 to no shares setup
if (shares_int == 100) {
log_debug(os, container)("CPU Shares is: %d", -1);
return -1;
log_debug(os, container)("CPU Shares is: unlimited");
result = -1;
return true;
}
// cg v2 values must be in range [1-10000]
assert(shares_int >= 1 && shares_int <= 10000, "invariant");
// CPU shares (OCI) value needs to get translated into
// a proper Cgroups v2 value. See:
// https://github.com/containers/crun/blob/master/crun.1.md#cpu-controller
// https://github.com/containers/crun/blob/1.24/crun.1.md#cpu-controller
//
// Use the inverse of (x == OCI value, y == cgroupsv2 value):
// ((262142 * y - 1)/9999) + 2 = x
// y = 10^(log2(x)^2/612 + 125/612 * log2(x) - 7.0/34.0)
//
int x = 262142 * shares_int - 1;
double frac = x/9999.0;
x = ((int)frac) + 2;
// By re-arranging it to the standard quadratic form:
// log2(x)^2 + 125 * log2(x) - (126 + 612 * log_10(y)) = 0
//
// Therefore, log2(x) = (-125 + sqrt( 125^2 - 4 * (-(126 + 612 * log_10(y)))))/2
//
// As a result we have the inverse (we can discount substraction of the
// square root value since those values result in very small numbers and the
// cpu shares values - OCI - are in range [2,262144]):
//
// x = 2^((-125 + sqrt(16129 + 2448* log10(y)))/2)
//
double log_multiplicand = log10(shares_int);
double discriminant = 16129 + 2448 * log_multiplicand;
double square_root = sqrt(discriminant);
double exponent = (-125 + square_root)/2;
double scaled_val = pow(2, exponent);
int x = (int) scaled_val;
log_trace(os, container)("Scaled CPU shares value is: %d", x);
// Since the scaled value is not precise, return the closest
// multiple of PER_CPU_SHARES for a more conservative mapping
if ( x <= PER_CPU_SHARES ) {
// will always map to 1 CPU
// Don't do the multiples of PER_CPU_SHARES mapping since we
// have a value <= PER_CPU_SHARES
log_debug(os, container)("CPU Shares is: %d", x);
return x;
result = x;
return true;
}
int f = x/PER_CPU_SHARES;
int lower_multiple = f * PER_CPU_SHARES;
@ -88,28 +119,33 @@ int CgroupV2CpuController::cpu_shares() {
x = distance_lower <= distance_upper ? lower_multiple : upper_multiple;
log_trace(os, container)("Closest multiple of %d of the CPU Shares value is: %d", PER_CPU_SHARES, x);
log_debug(os, container)("CPU Shares is: %d", x);
return x;
result = x;
return true;
}
/* cpu_quota
*
* Return the number of microseconds per period
* process is guaranteed to run.
* process is guaranteed to run in the passed in 'result' reference.
*
* return:
* quota time in microseconds
* -1 for no quota
* OSCONTAINER_ERROR for not supported
* true if the result reference has been set
* false on error
*/
int CgroupV2CpuController::cpu_quota() {
jlong quota_val;
bool is_ok = reader()->read_numerical_tuple_value("/cpu.max", true /* use_first */, &quota_val);
if (!is_ok) {
return OSCONTAINER_ERROR;
bool CgroupV2CpuController::cpu_quota(int& result) {
uint64_t quota_val = 0;
if (!reader()->read_numerical_tuple_value("/cpu.max", true /* use_first */, quota_val)) {
return false;
}
int limit = -1;
// The read first tuple value might be 'max' which maps
// to value_unlimited. Keep that at -1;
if (quota_val != value_unlimited) {
limit = static_cast<int>(quota_val);
}
int limit = (int)quota_val;
log_trace(os, container)("CPU Quota is: %d", limit);
return limit;
result = limit;
return true;
}
// Constructor
@ -143,80 +179,67 @@ char* CgroupV2Subsystem::cpu_cpuset_memory_nodes() {
return os::strdup(mems);
}
int CgroupV2CpuController::cpu_period() {
jlong period_val;
bool is_ok = reader()->read_numerical_tuple_value("/cpu.max", false /* use_first */, &period_val);
if (!is_ok) {
log_trace(os, container)("CPU Period failed: %d", OSCONTAINER_ERROR);
return OSCONTAINER_ERROR;
bool CgroupV2CpuController::cpu_period(int& result) {
uint64_t cpu_period = 0;
if (!reader()->read_numerical_tuple_value("/cpu.max", false /* use_first */, cpu_period)) {
log_trace(os, container)("CPU Period failed");
return false;
}
int period = (int)period_val;
log_trace(os, container)("CPU Period is: %d", period);
return period;
int period_int = static_cast<int>(cpu_period);
log_trace(os, container)("CPU Period is: %d", period_int);
result = period_int;
return true;
}
jlong CgroupV2CpuController::cpu_usage_in_micros() {
julong cpu_usage;
bool is_ok = reader()->read_numerical_key_value("/cpu.stat", "usage_usec", &cpu_usage);
bool CgroupV2CpuController::cpu_usage_in_micros(uint64_t& value) {
bool is_ok = reader()->read_numerical_key_value("/cpu.stat", "usage_usec", value);
if (!is_ok) {
log_trace(os, container)("CPU Usage failed: %d", OSCONTAINER_ERROR);
return OSCONTAINER_ERROR;
log_trace(os, container)("CPU Usage failed");
return false;
}
log_trace(os, container)("CPU Usage is: " JULONG_FORMAT, cpu_usage);
return (jlong)cpu_usage;
log_trace(os, container)("CPU Usage is: " UINT64_FORMAT, value);
return true;
}
/* memory_usage_in_bytes
*
* Return the amount of used memory used by this cgroup and descendents
* read the amount of used memory used by this cgroup and descendents
* into the passed in 'value' reference.
*
* return:
* memory usage in bytes or
* -1 for unlimited
* OSCONTAINER_ERROR for not supported
* false on failure, true otherwise.
*/
jlong CgroupV2MemoryController::memory_usage_in_bytes() {
julong memusage;
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.current", "Memory Usage", memusage);
return (jlong)memusage;
bool CgroupV2MemoryController::memory_usage_in_bytes(physical_memory_size_type& value) {
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.current", "Memory Usage", value);
}
jlong CgroupV2MemoryController::memory_soft_limit_in_bytes(julong upper_bound) {
jlong mem_soft_limit;
CONTAINER_READ_NUMBER_CHECKED_MAX(reader(), "/memory.low", "Memory Soft Limit", mem_soft_limit);
return mem_soft_limit;
bool CgroupV2MemoryController::memory_soft_limit_in_bytes(physical_memory_size_type upper_bound,
physical_memory_size_type& value) {
CONTAINER_READ_NUMBER_CHECKED_MAX(reader(), "/memory.low", "Memory Soft Limit", value);
}
jlong CgroupV2MemoryController::memory_throttle_limit_in_bytes() {
jlong mem_throttle_limit;
CONTAINER_READ_NUMBER_CHECKED_MAX(reader(), "/memory.high", "Memory Throttle Limit", mem_throttle_limit);
return mem_throttle_limit;
bool CgroupV2MemoryController::memory_throttle_limit_in_bytes(physical_memory_size_type& value) {
CONTAINER_READ_NUMBER_CHECKED_MAX(reader(), "/memory.high", "Memory Throttle Limit", value);
}
jlong CgroupV2MemoryController::memory_max_usage_in_bytes() {
julong mem_max_usage;
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.peak", "Maximum Memory Usage", mem_max_usage);
return mem_max_usage;
bool CgroupV2MemoryController::memory_max_usage_in_bytes(physical_memory_size_type& value) {
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.peak", "Maximum Memory Usage", value);
}
jlong CgroupV2MemoryController::rss_usage_in_bytes() {
julong rss;
bool is_ok = reader()->read_numerical_key_value("/memory.stat", "anon", &rss);
if (!is_ok) {
return OSCONTAINER_ERROR;
bool CgroupV2MemoryController::rss_usage_in_bytes(physical_memory_size_type& value) {
if (!reader()->read_numerical_key_value("/memory.stat", "anon", value)) {
return false;
}
log_trace(os, container)("RSS usage is: " JULONG_FORMAT, rss);
return (jlong)rss;
log_trace(os, container)("RSS usage is: " PHYS_MEM_TYPE_FORMAT, value);
return true;
}
jlong CgroupV2MemoryController::cache_usage_in_bytes() {
julong cache;
bool is_ok = reader()->read_numerical_key_value("/memory.stat", "file", &cache);
if (!is_ok) {
return OSCONTAINER_ERROR;
bool CgroupV2MemoryController::cache_usage_in_bytes(physical_memory_size_type& value) {
if (!reader()->read_numerical_key_value("/memory.stat", "file", value)) {
return false;
}
log_trace(os, container)("Cache usage is: " JULONG_FORMAT, cache);
return (jlong)cache;
log_trace(os, container)("Cache usage is: " PHYS_MEM_TYPE_FORMAT, value);
return true;
}
// Note that for cgroups v2 the actual limits set for swap and
@ -224,91 +247,108 @@ jlong CgroupV2MemoryController::cache_usage_in_bytes() {
// respectively. In order to properly report a cgroup v1 like
// compound value we need to sum the two values. Setting a swap limit
// without also setting a memory limit is not allowed.
jlong CgroupV2MemoryController::memory_and_swap_limit_in_bytes(julong upper_mem_bound,
julong upper_swap_bound /* unused in cg v2 */) {
jlong swap_limit;
bool is_ok = reader()->read_number_handle_max("/memory.swap.max", &swap_limit);
if (!is_ok) {
bool CgroupV2MemoryController::memory_and_swap_limit_in_bytes(physical_memory_size_type upper_mem_bound,
physical_memory_size_type upper_swap_bound, /* unused in cg v2 */
physical_memory_size_type& result) {
physical_memory_size_type swap_limit_val = 0;
if (!reader()->read_number_handle_max("/memory.swap.max", swap_limit_val)) {
// Some container tests rely on this trace logging to happen.
log_trace(os, container)("Swap Limit failed: %d", OSCONTAINER_ERROR);
log_trace(os, container)("Swap Limit failed");
// swap disabled at kernel level, treat it as no swap
return read_memory_limit_in_bytes(upper_mem_bound);
physical_memory_size_type mem_limit = value_unlimited;
if (!read_memory_limit_in_bytes(upper_mem_bound, mem_limit)) {
return false;
}
result = mem_limit;
return true;
}
log_trace(os, container)("Swap Limit is: " JLONG_FORMAT, swap_limit);
if (swap_limit >= 0) {
jlong memory_limit = read_memory_limit_in_bytes(upper_mem_bound);
assert(memory_limit >= 0, "swap limit without memory limit?");
return memory_limit + swap_limit;
if (swap_limit_val == value_unlimited) {
log_trace(os, container)("Memory and Swap Limit is: Unlimited");
result = swap_limit_val;
return true;
}
log_trace(os, container)("Swap Limit is: " PHYS_MEM_TYPE_FORMAT, swap_limit_val);
physical_memory_size_type memory_limit = 0;
if (read_memory_limit_in_bytes(upper_mem_bound, memory_limit)) {
assert(memory_limit != value_unlimited, "swap limit without memory limit?");
result = memory_limit + swap_limit_val;
log_trace(os, container)("Memory and Swap Limit is: " PHYS_MEM_TYPE_FORMAT, result);
return true;
} else {
return false;
}
log_trace(os, container)("Memory and Swap Limit is: " JLONG_FORMAT, swap_limit);
return swap_limit;
}
// memory.swap.current : total amount of swap currently used by the cgroup and its descendants
static
jlong memory_swap_current_value(CgroupV2Controller* ctrl) {
julong swap_current;
CONTAINER_READ_NUMBER_CHECKED(ctrl, "/memory.swap.current", "Swap currently used", swap_current);
return (jlong)swap_current;
bool memory_swap_current_value(CgroupV2Controller* ctrl, physical_memory_size_type& result) {
CONTAINER_READ_NUMBER_CHECKED(ctrl, "/memory.swap.current", "Swap currently used", result);
}
jlong CgroupV2MemoryController::memory_and_swap_usage_in_bytes(julong upper_mem_bound, julong upper_swap_bound) {
jlong memory_usage = memory_usage_in_bytes();
if (memory_usage >= 0) {
jlong swap_current = memory_swap_current_value(reader());
return memory_usage + (swap_current >= 0 ? swap_current : 0);
bool CgroupV2MemoryController::memory_and_swap_usage_in_bytes(physical_memory_size_type upper_mem_bound,
physical_memory_size_type upper_swap_bound,
physical_memory_size_type& result) {
physical_memory_size_type memory_usage = 0;
if (!memory_usage_in_bytes(memory_usage)) {
return false;
}
return memory_usage; // not supported or unlimited case
physical_memory_size_type swap_current = 0;
if (!memory_swap_current_value(reader(), swap_current)) {
result = memory_usage; // treat as no swap usage
return true;
}
result = memory_usage + swap_current;
return true;
}
static
jlong memory_limit_value(CgroupV2Controller* ctrl) {
jlong memory_limit;
CONTAINER_READ_NUMBER_CHECKED_MAX(ctrl, "/memory.max", "Memory Limit", memory_limit);
return memory_limit;
bool memory_limit_value(CgroupV2Controller* ctrl, physical_memory_size_type& result) {
CONTAINER_READ_NUMBER_CHECKED_MAX(ctrl, "/memory.max", "Memory Limit", result);
}
/* read_memory_limit_in_bytes
*
* Return the limit of available memory for this process.
* Calculate the limit of available memory for this process. The result will be
* set in the 'result' variable if the function returns true.
*
* return:
* memory limit in bytes or
* -1 for unlimited, OSCONTAINER_ERROR for an error
* true when the limit could be read correctly.
* false in case of any error.
*/
jlong CgroupV2MemoryController::read_memory_limit_in_bytes(julong upper_bound) {
jlong limit = memory_limit_value(reader());
bool CgroupV2MemoryController::read_memory_limit_in_bytes(physical_memory_size_type upper_bound,
physical_memory_size_type& result) {
physical_memory_size_type limit = 0; // default unlimited
if (!memory_limit_value(reader(), limit)) {
log_trace(os, container)("container memory limit failed, using host value " PHYS_MEM_TYPE_FORMAT,
upper_bound);
return false;
}
bool is_unlimited = limit == value_unlimited;
bool exceeds_physical_mem = false;
if (!is_unlimited && limit >= upper_bound) {
exceeds_physical_mem = true;
}
if (log_is_enabled(Trace, os, container)) {
if (limit == -1) {
log_trace(os, container)("Memory Limit is: Unlimited");
} else {
log_trace(os, container)("Memory Limit is: " JLONG_FORMAT, limit);
if (!is_unlimited) {
log_trace(os, container)("Memory Limit is: " PHYS_MEM_TYPE_FORMAT, limit);
}
}
if (log_is_enabled(Debug, os, container)) {
julong read_limit = (julong)limit; // avoid signed/unsigned compare
if (limit < 0 || read_limit >= upper_bound) {
const char* reason;
if (limit == -1) {
reason = "unlimited";
} else if (limit == OSCONTAINER_ERROR) {
reason = "failed";
if (is_unlimited || exceeds_physical_mem) {
if (is_unlimited) {
log_trace(os, container)("Memory Limit is: Unlimited");
log_trace(os, container)("container memory limit unlimited, using upper bound value " PHYS_MEM_TYPE_FORMAT, upper_bound);
} else {
assert(read_limit >= upper_bound, "Expected mem limit to exceed upper memory bound");
reason = "ignored";
log_trace(os, container)("container memory limit ignored: " PHYS_MEM_TYPE_FORMAT ", upper bound is " PHYS_MEM_TYPE_FORMAT,
limit, upper_bound);
}
log_debug(os, container)("container memory limit %s: " JLONG_FORMAT ", upper bound is " JLONG_FORMAT,
reason, limit, upper_bound);
}
}
return limit;
result = limit;
return true;
}
static
jlong memory_swap_limit_value(CgroupV2Controller* ctrl) {
jlong swap_limit;
CONTAINER_READ_NUMBER_CHECKED_MAX(ctrl, "/memory.swap.max", "Swap Limit", swap_limit);
return swap_limit;
bool memory_swap_limit_value(CgroupV2Controller* ctrl, physical_memory_size_type& value) {
CONTAINER_READ_NUMBER_CHECKED_MAX(ctrl, "/memory.swap.max", "Swap Limit", value);
}
void CgroupV2Controller::set_subsystem_path(const char* cgroup_path) {
@ -327,10 +367,17 @@ bool CgroupV2Controller::needs_hierarchy_adjustment() {
return strcmp(_cgroup_path, "/") != 0;
}
void CgroupV2MemoryController::print_version_specific_info(outputStream* st, julong upper_mem_bound) {
jlong swap_current = memory_swap_current_value(reader());
jlong swap_limit = memory_swap_limit_value(reader());
void CgroupV2MemoryController::print_version_specific_info(outputStream* st, physical_memory_size_type upper_mem_bound) {
MetricResult swap_current;
physical_memory_size_type swap_current_val = 0;
if (memory_swap_current_value(reader(), swap_current_val)) {
swap_current.set_value(swap_current_val);
}
MetricResult swap_limit;
physical_memory_size_type swap_limit_val = 0;
if (memory_swap_limit_value(reader(), swap_limit_val)) {
swap_limit.set_value(swap_limit_val);
}
OSContainer::print_container_helper(st, swap_current, "memory_swap_current_in_bytes");
OSContainer::print_container_helper(st, swap_limit, "memory_swap_max_limit_in_bytes");
}
@ -346,29 +393,27 @@ char* CgroupV2Controller::construct_path(char* mount_path, const char* cgroup_pa
/* pids_max
*
* Return the maximum number of tasks available to the process
* Calculate the maximum number of tasks available to the process. Set the
* value in the passed in 'value' reference. The value might be 'value_unlimited' when
* there is no limit.
*
* return:
* maximum number of tasks
* -1 for unlimited
* OSCONTAINER_ERROR for not supported
* true if the value has been set appropriately
* false if there was an error
*/
jlong CgroupV2Subsystem::pids_max() {
jlong pids_max;
CONTAINER_READ_NUMBER_CHECKED_MAX(unified(), "/pids.max", "Maximum number of tasks", pids_max);
return pids_max;
bool CgroupV2Subsystem::pids_max(uint64_t& value) {
CONTAINER_READ_NUMBER_CHECKED_MAX(unified(), "/pids.max", "Maximum number of tasks", value);
}
/* pids_current
*
* The number of tasks currently in the cgroup (and its descendants) of the process
* The number of tasks currently in the cgroup (and its descendants) of the process. Set
* in the passed in 'value' reference.
*
* return:
* current number of tasks
* OSCONTAINER_ERROR for not supported
* true on success
* false when there was an error
*/
jlong CgroupV2Subsystem::pids_current() {
julong pids_current;
CONTAINER_READ_NUMBER_CHECKED(unified(), "/pids.current", "Current number of tasks", pids_current);
return pids_current;
bool CgroupV2Subsystem::pids_current(uint64_t& value) {
CONTAINER_READ_NUMBER_CHECKED(unified(), "/pids.current", "Current number of tasks", value);
}

View File

@ -59,10 +59,10 @@ class CgroupV2CpuController: public CgroupCpuController {
public:
CgroupV2CpuController(const CgroupV2Controller& reader) : _reader(reader) {
}
int cpu_quota() override;
int cpu_period() override;
int cpu_shares() override;
jlong cpu_usage_in_micros();
bool cpu_quota(int& value) override;
bool cpu_period(int& value) override;
bool cpu_shares(int& value) override;
bool cpu_usage_in_micros(uint64_t& value);
bool is_read_only() override {
return reader()->is_read_only();
}
@ -87,8 +87,8 @@ class CgroupV2CpuacctController: public CgroupCpuacctController {
CgroupV2CpuacctController(CgroupV2CpuController* reader) : _reader(reader) {
}
// In cgroup v2, cpu usage is a part of the cpu controller.
jlong cpu_usage_in_micros() override {
return reader()->cpu_usage_in_micros();
bool cpu_usage_in_micros(uint64_t& result) override {
return reader()->cpu_usage_in_micros(result);
}
bool is_read_only() override {
return reader()->is_read_only();
@ -110,20 +110,27 @@ class CgroupV2MemoryController final: public CgroupMemoryController {
private:
CgroupV2Controller _reader;
CgroupV2Controller* reader() { return &_reader; }
public:
CgroupV2MemoryController(const CgroupV2Controller& reader) : _reader(reader) {
}
jlong read_memory_limit_in_bytes(julong upper_bound) override;
jlong memory_and_swap_limit_in_bytes(julong upper_mem_bound, julong upper_swap_bound) override;
jlong memory_and_swap_usage_in_bytes(julong upper_mem_bound, julong upper_swap_bound) override;
jlong memory_soft_limit_in_bytes(julong upper_bound) override;
jlong memory_throttle_limit_in_bytes() override;
jlong memory_usage_in_bytes() override;
jlong memory_max_usage_in_bytes() override;
jlong rss_usage_in_bytes() override;
jlong cache_usage_in_bytes() override;
void print_version_specific_info(outputStream* st, julong upper_mem_bound) override;
bool read_memory_limit_in_bytes(physical_memory_size_type upper_bound,
physical_memory_size_type& result) override;
bool memory_and_swap_limit_in_bytes(physical_memory_size_type upper_mem_bound,
physical_memory_size_type upper_swap_bound,
physical_memory_size_type& result) override;
bool memory_and_swap_usage_in_bytes(physical_memory_size_type upper_mem_bound,
physical_memory_size_type upper_swap_bound,
physical_memory_size_type& result) override;
bool memory_soft_limit_in_bytes(physical_memory_size_type upper_bound,
physical_memory_size_type& result) override;
bool memory_throttle_limit_in_bytes(physical_memory_size_type& result) override;
bool memory_usage_in_bytes(physical_memory_size_type& result) override;
bool memory_max_usage_in_bytes(physical_memory_size_type& result) override;
bool rss_usage_in_bytes(physical_memory_size_type& result) override;
bool cache_usage_in_bytes(physical_memory_size_type& result) override;
void print_version_specific_info(outputStream* st, physical_memory_size_type upper_mem_bound) override;
bool is_read_only() override {
return reader()->is_read_only();
}
@ -160,8 +167,8 @@ class CgroupV2Subsystem: public CgroupSubsystem {
char * cpu_cpuset_cpus() override;
char * cpu_cpuset_memory_nodes() override;
jlong pids_max() override;
jlong pids_current() override;
bool pids_max(uint64_t& result) override;
bool pids_current(uint64_t& result) override;
bool is_containerized() override;

View File

@ -84,8 +84,12 @@ void OSContainer::init() {
// We can be in one of two cases:
// 1.) On a physical Linux system without any limit
// 2.) On a physical Linux system with a limit enforced by other means (like systemd slice)
any_mem_cpu_limit_present = memory_limit_in_bytes() > 0 ||
os::Linux::active_processor_count() != active_processor_count();
physical_memory_size_type mem_limit_val = value_unlimited;
(void)memory_limit_in_bytes(mem_limit_val); // discard error and use default
int host_cpus = os::Linux::active_processor_count();
int cpus = host_cpus;
(void)active_processor_count(cpus); // discard error and use default
any_mem_cpu_limit_present = mem_limit_val != value_unlimited || host_cpus != cpus;
if (any_mem_cpu_limit_present) {
reason = " because either a cpu or a memory limit is present";
} else {
@ -103,77 +107,138 @@ const char * OSContainer::container_type() {
return cgroup_subsystem->container_type();
}
bool OSContainer::available_memory_in_container(julong& value) {
jlong mem_limit = memory_limit_in_bytes();
jlong mem_usage = memory_usage_in_bytes();
bool OSContainer::memory_limit_in_bytes(physical_memory_size_type& value) {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
physical_memory_size_type phys_mem = os::Linux::physical_memory();
return cgroup_subsystem->memory_limit_in_bytes(phys_mem, value);
}
if (mem_limit > 0 && mem_usage <= 0) {
log_debug(os, container)("container memory usage failed: " JLONG_FORMAT, mem_usage);
bool OSContainer::available_memory_in_bytes(physical_memory_size_type& value) {
physical_memory_size_type mem_limit = value_unlimited;
physical_memory_size_type mem_usage = 0;
if (memory_limit_in_bytes(mem_limit) && memory_usage_in_bytes(mem_usage)) {
assert(mem_usage != value_unlimited, "invariant");
if (mem_limit != value_unlimited) {
value = (mem_limit > mem_usage) ? mem_limit - mem_usage : 0;
return true;
}
}
log_trace(os, container)("calculating available memory in container failed");
return false;
}
if (mem_limit <= 0 || mem_usage <= 0) {
bool OSContainer::available_swap_in_bytes(physical_memory_size_type host_free_swap,
physical_memory_size_type& value) {
physical_memory_size_type mem_limit = 0;
physical_memory_size_type mem_swap_limit = 0;
if (memory_limit_in_bytes(mem_limit) &&
memory_and_swap_limit_in_bytes(mem_swap_limit) &&
mem_limit != value_unlimited &&
mem_swap_limit != value_unlimited) {
if (mem_limit >= mem_swap_limit) {
value = 0; // no swap, thus no free swap
return true;
}
physical_memory_size_type swap_limit = mem_swap_limit - mem_limit;
physical_memory_size_type mem_swap_usage = 0;
physical_memory_size_type mem_usage = 0;
if (memory_and_swap_usage_in_bytes(mem_swap_usage) &&
memory_usage_in_bytes(mem_usage)) {
physical_memory_size_type swap_usage = value_unlimited;
if (mem_usage > mem_swap_usage) {
swap_usage = 0; // delta usage must not be negative
} else {
swap_usage = mem_swap_usage - mem_usage;
}
// free swap is based on swap limit (upper bound) and swap usage
if (swap_usage >= swap_limit) {
value = 0; // free swap must not be negative
return true;
}
value = swap_limit - swap_usage;
return true;
}
}
// unlimited or not supported. Leave an appropriate trace message
if (log_is_enabled(Trace, os, container)) {
char mem_swap_buf[25]; // uint64_t => 20 + 1, 'unlimited' => 9 + 1; 10 < 21 < 25
char mem_limit_buf[25];
int num = 0;
if (mem_swap_limit == value_unlimited) {
num = os::snprintf(mem_swap_buf, sizeof(mem_swap_buf), "%s", "unlimited");
} else {
num = os::snprintf(mem_swap_buf, sizeof(mem_swap_buf), PHYS_MEM_TYPE_FORMAT, mem_swap_limit);
}
assert(num < 25, "buffer too small");
mem_swap_buf[num] = '\0';
if (mem_limit == value_unlimited) {
num = os::snprintf(mem_limit_buf, sizeof(mem_limit_buf), "%s", "unlimited");
} else {
num = os::snprintf(mem_limit_buf, sizeof(mem_limit_buf), PHYS_MEM_TYPE_FORMAT, mem_limit);
}
assert(num < 25, "buffer too small");
mem_limit_buf[num] = '\0';
log_trace(os,container)("OSContainer::available_swap_in_bytes: container_swap_limit=%s"
" container_mem_limit=%s, host_free_swap: " PHYS_MEM_TYPE_FORMAT,
mem_swap_buf, mem_limit_buf, host_free_swap);
}
return false;
}
bool OSContainer::memory_and_swap_limit_in_bytes(physical_memory_size_type& value) {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
physical_memory_size_type phys_mem = os::Linux::physical_memory();
physical_memory_size_type host_swap = 0;
if (!os::Linux::host_swap(host_swap)) {
return false;
}
value = mem_limit > mem_usage ? static_cast<julong>(mem_limit - mem_usage) : 0;
return true;
return cgroup_subsystem->memory_and_swap_limit_in_bytes(phys_mem, host_swap, value);
}
jlong OSContainer::memory_limit_in_bytes() {
bool OSContainer::memory_and_swap_usage_in_bytes(physical_memory_size_type& value) {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
julong phys_mem = static_cast<julong>(os::Linux::physical_memory());
return cgroup_subsystem->memory_limit_in_bytes(phys_mem);
physical_memory_size_type phys_mem = os::Linux::physical_memory();
physical_memory_size_type host_swap = 0;
if (!os::Linux::host_swap(host_swap)) {
return false;
}
return cgroup_subsystem->memory_and_swap_usage_in_bytes(phys_mem, host_swap, value);
}
jlong OSContainer::memory_and_swap_limit_in_bytes() {
bool OSContainer::memory_soft_limit_in_bytes(physical_memory_size_type& value) {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
julong phys_mem = static_cast<julong>(os::Linux::physical_memory());
julong host_swap = os::Linux::host_swap();
return cgroup_subsystem->memory_and_swap_limit_in_bytes(phys_mem, host_swap);
physical_memory_size_type phys_mem = os::Linux::physical_memory();
return cgroup_subsystem->memory_soft_limit_in_bytes(phys_mem, value);
}
jlong OSContainer::memory_and_swap_usage_in_bytes() {
bool OSContainer::memory_throttle_limit_in_bytes(physical_memory_size_type& value) {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
julong phys_mem = static_cast<julong>(os::Linux::physical_memory());
julong host_swap = os::Linux::host_swap();
return cgroup_subsystem->memory_and_swap_usage_in_bytes(phys_mem, host_swap);
return cgroup_subsystem->memory_throttle_limit_in_bytes(value);
}
jlong OSContainer::memory_soft_limit_in_bytes() {
bool OSContainer::memory_usage_in_bytes(physical_memory_size_type& value) {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
julong phys_mem = static_cast<julong>(os::Linux::physical_memory());
return cgroup_subsystem->memory_soft_limit_in_bytes(phys_mem);
return cgroup_subsystem->memory_usage_in_bytes(value);
}
jlong OSContainer::memory_throttle_limit_in_bytes() {
bool OSContainer::memory_max_usage_in_bytes(physical_memory_size_type& value) {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
return cgroup_subsystem->memory_throttle_limit_in_bytes();
return cgroup_subsystem->memory_max_usage_in_bytes(value);
}
jlong OSContainer::memory_usage_in_bytes() {
bool OSContainer::rss_usage_in_bytes(physical_memory_size_type& value) {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
return cgroup_subsystem->memory_usage_in_bytes();
return cgroup_subsystem->rss_usage_in_bytes(value);
}
jlong OSContainer::memory_max_usage_in_bytes() {
bool OSContainer::cache_usage_in_bytes(physical_memory_size_type& value) {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
return cgroup_subsystem->memory_max_usage_in_bytes();
}
jlong OSContainer::rss_usage_in_bytes() {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
return cgroup_subsystem->rss_usage_in_bytes();
}
jlong OSContainer::cache_usage_in_bytes() {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
return cgroup_subsystem->cache_usage_in_bytes();
return cgroup_subsystem->cache_usage_in_bytes(value);
}
void OSContainer::print_version_specific_info(outputStream* st) {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
julong phys_mem = static_cast<julong>(os::Linux::physical_memory());
physical_memory_size_type phys_mem = os::Linux::physical_memory();
cgroup_subsystem->print_version_specific_info(st, phys_mem);
}
@ -187,50 +252,55 @@ char * OSContainer::cpu_cpuset_memory_nodes() {
return cgroup_subsystem->cpu_cpuset_memory_nodes();
}
int OSContainer::active_processor_count() {
bool OSContainer::active_processor_count(int& value) {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
return cgroup_subsystem->active_processor_count();
return cgroup_subsystem->active_processor_count(value);
}
int OSContainer::cpu_quota() {
bool OSContainer::cpu_quota(int& value) {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
return cgroup_subsystem->cpu_quota();
return cgroup_subsystem->cpu_quota(value);
}
int OSContainer::cpu_period() {
bool OSContainer::cpu_period(int& value) {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
return cgroup_subsystem->cpu_period();
return cgroup_subsystem->cpu_period(value);
}
int OSContainer::cpu_shares() {
bool OSContainer::cpu_shares(int& value) {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
return cgroup_subsystem->cpu_shares();
return cgroup_subsystem->cpu_shares(value);
}
jlong OSContainer::cpu_usage_in_micros() {
bool OSContainer::cpu_usage_in_micros(uint64_t& value) {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
return cgroup_subsystem->cpu_usage_in_micros();
return cgroup_subsystem->cpu_usage_in_micros(value);
}
jlong OSContainer::pids_max() {
bool OSContainer::pids_max(uint64_t& value) {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
return cgroup_subsystem->pids_max();
return cgroup_subsystem->pids_max(value);
}
jlong OSContainer::pids_current() {
bool OSContainer::pids_current(uint64_t& value) {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
return cgroup_subsystem->pids_current();
return cgroup_subsystem->pids_current(value);
}
void OSContainer::print_container_helper(outputStream* st, jlong j, const char* metrics) {
void OSContainer::print_container_helper(outputStream* st, MetricResult& res, const char* metrics) {
st->print("%s: ", metrics);
if (j >= 0) {
if (j >= 1024) {
st->print_cr(UINT64_FORMAT " k", uint64_t(j) / K);
if (res.success()) {
if (res.value() != value_unlimited) {
if (res.value() >= 1024) {
st->print_cr(PHYS_MEM_TYPE_FORMAT " k", (physical_memory_size_type)(res.value() / K));
} else {
st->print_cr(PHYS_MEM_TYPE_FORMAT, res.value());
}
} else {
st->print_cr(UINT64_FORMAT, uint64_t(j));
st->print_cr("%s", "unlimited");
}
} else {
st->print_cr("%s", j == OSCONTAINER_ERROR ? "not supported" : "unlimited");
// Not supported
st->print_cr("%s", "unavailable");
}
}

View File

@ -30,11 +30,30 @@
#include "utilities/macros.hpp"
#include "utilities/ostream.hpp"
#define OSCONTAINER_ERROR (-2)
// Some cgroup interface files define the value 'max' for unlimited.
// Define this constant value to indicate this value.
const uint64_t value_unlimited = std::numeric_limits<uint64_t>::max();
// 20ms timeout between re-reads of memory limit and _active_processor_count.
#define OSCONTAINER_CACHE_TIMEOUT (NANOSECS_PER_SEC/50)
// Carrier object for print_container_helper()
class MetricResult: public StackObj {
private:
static const uint64_t value_unused = 0;
bool _success = false;
physical_memory_size_type _value = value_unused;
public:
void set_value(physical_memory_size_type val) {
// having a value means success
_success = true;
_value = val;
}
bool success() { return _success; }
physical_memory_size_type value() { return _value; }
};
class OSContainer: AllStatic {
private:
@ -45,36 +64,38 @@ class OSContainer: AllStatic {
public:
static void init();
static void print_version_specific_info(outputStream* st);
static void print_container_helper(outputStream* st, jlong j, const char* metrics);
static void print_container_helper(outputStream* st, MetricResult& res, const char* metrics);
static inline bool is_containerized();
static const char * container_type();
static bool available_memory_in_container(julong& value);
static jlong memory_limit_in_bytes();
static jlong memory_and_swap_limit_in_bytes();
static jlong memory_and_swap_usage_in_bytes();
static jlong memory_soft_limit_in_bytes();
static jlong memory_throttle_limit_in_bytes();
static jlong memory_usage_in_bytes();
static jlong memory_max_usage_in_bytes();
static jlong rss_usage_in_bytes();
static jlong cache_usage_in_bytes();
static bool available_memory_in_bytes(physical_memory_size_type& value);
static bool available_swap_in_bytes(physical_memory_size_type host_free_swap,
physical_memory_size_type& value);
static bool memory_limit_in_bytes(physical_memory_size_type& value);
static bool memory_and_swap_limit_in_bytes(physical_memory_size_type& value);
static bool memory_and_swap_usage_in_bytes(physical_memory_size_type& value);
static bool memory_soft_limit_in_bytes(physical_memory_size_type& value);
static bool memory_throttle_limit_in_bytes(physical_memory_size_type& value);
static bool memory_usage_in_bytes(physical_memory_size_type& value);
static bool memory_max_usage_in_bytes(physical_memory_size_type& value);
static bool rss_usage_in_bytes(physical_memory_size_type& value);
static bool cache_usage_in_bytes(physical_memory_size_type& value);
static int active_processor_count();
static bool active_processor_count(int& value);
static char * cpu_cpuset_cpus();
static char * cpu_cpuset_memory_nodes();
static int cpu_quota();
static int cpu_period();
static bool cpu_quota(int& value);
static bool cpu_period(int& value);
static int cpu_shares();
static bool cpu_shares(int& value);
static jlong cpu_usage_in_micros();
static bool cpu_usage_in_micros(uint64_t& value);
static jlong pids_max();
static jlong pids_current();
static bool pids_max(uint64_t& value);
static bool pids_current(uint64_t& value);
};
inline bool OSContainer::is_containerized() {

View File

@ -214,10 +214,8 @@ static bool suppress_primordial_thread_resolution = false;
// utility functions
bool os::available_memory(physical_memory_size_type& value) {
julong avail_mem = 0;
if (OSContainer::is_containerized() && OSContainer::available_memory_in_container(avail_mem)) {
log_trace(os)("available container memory: " JULONG_FORMAT, avail_mem);
value = static_cast<physical_memory_size_type>(avail_mem);
if (OSContainer::is_containerized() && OSContainer::available_memory_in_bytes(value)) {
log_trace(os)("available container memory: " PHYS_MEM_TYPE_FORMAT, value);
return true;
}
@ -225,36 +223,38 @@ bool os::available_memory(physical_memory_size_type& value) {
}
bool os::Linux::available_memory(physical_memory_size_type& value) {
julong avail_mem = static_cast<julong>(-1L);
physical_memory_size_type avail_mem = 0;
bool found_available_mem = false;
FILE *fp = os::fopen("/proc/meminfo", "r");
if (fp != nullptr) {
char buf[80];
do {
if (fscanf(fp, "MemAvailable: " JULONG_FORMAT " kB", &avail_mem) == 1) {
if (fscanf(fp, "MemAvailable: " PHYS_MEM_TYPE_FORMAT " kB", &avail_mem) == 1) {
avail_mem *= K;
found_available_mem = true;
break;
}
} while (fgets(buf, sizeof(buf), fp) != nullptr);
fclose(fp);
}
if (avail_mem == static_cast<julong>(-1L)) {
// Only enter the free memory block if we
// haven't found the available memory
if (!found_available_mem) {
physical_memory_size_type free_mem = 0;
if (!free_memory(free_mem)) {
return false;
}
avail_mem = static_cast<julong>(free_mem);
avail_mem = free_mem;
}
log_trace(os)("available memory: " JULONG_FORMAT, avail_mem);
value = static_cast<physical_memory_size_type>(avail_mem);
log_trace(os)("available memory: " PHYS_MEM_TYPE_FORMAT, avail_mem);
value = avail_mem;
return true;
}
bool os::free_memory(physical_memory_size_type& value) {
julong free_mem = 0;
if (OSContainer::is_containerized() && OSContainer::available_memory_in_container(free_mem)) {
log_trace(os)("free container memory: " JULONG_FORMAT, free_mem);
value = static_cast<physical_memory_size_type>(free_mem);
if (OSContainer::is_containerized() && OSContainer::available_memory_in_bytes(value)) {
log_trace(os)("free container memory: " PHYS_MEM_TYPE_FORMAT, value);
return true;
}
@ -269,29 +269,26 @@ bool os::Linux::free_memory(physical_memory_size_type& value) {
if (ret != 0) {
return false;
}
julong free_mem = (julong)si.freeram * si.mem_unit;
log_trace(os)("free memory: " JULONG_FORMAT, free_mem);
value = static_cast<physical_memory_size_type>(free_mem);
physical_memory_size_type free_mem = (physical_memory_size_type)si.freeram * si.mem_unit;
log_trace(os)("free memory: " PHYS_MEM_TYPE_FORMAT, free_mem);
value = free_mem;
return true;
}
bool os::total_swap_space(physical_memory_size_type& value) {
if (OSContainer::is_containerized()) {
jlong memory_and_swap_limit_in_bytes = OSContainer::memory_and_swap_limit_in_bytes();
jlong memory_limit_in_bytes = OSContainer::memory_limit_in_bytes();
if (memory_limit_in_bytes > 0 && memory_and_swap_limit_in_bytes > 0) {
value = static_cast<physical_memory_size_type>(memory_and_swap_limit_in_bytes - memory_limit_in_bytes);
return true;
physical_memory_size_type mem_swap_limit = value_unlimited;
physical_memory_size_type memory_limit = value_unlimited;
if (OSContainer::memory_and_swap_limit_in_bytes(mem_swap_limit) &&
OSContainer::memory_limit_in_bytes(memory_limit)) {
if (memory_limit != value_unlimited && mem_swap_limit != value_unlimited &&
mem_swap_limit >= memory_limit /* ensure swap is >= 0 */) {
value = mem_swap_limit - memory_limit;
return true;
}
}
} // fallback to the host swap space if the container did return the unbound value of -1
struct sysinfo si;
int ret = sysinfo(&si);
if (ret != 0) {
assert(false, "sysinfo failed in total_swap_space(): %s", os::strerror(errno));
return false;
}
value = static_cast<physical_memory_size_type>(si.totalswap) * si.mem_unit;
return true;
} // fallback to the host swap space if the container returned unlimited
return Linux::host_swap(value);
}
static bool host_free_swap_f(physical_memory_size_type& value) {
@ -315,29 +312,12 @@ bool os::free_swap_space(physical_memory_size_type& value) {
}
physical_memory_size_type host_free_swap_val = MIN2(total_swap_space, host_free_swap);
if (OSContainer::is_containerized()) {
jlong mem_swap_limit = OSContainer::memory_and_swap_limit_in_bytes();
jlong mem_limit = OSContainer::memory_limit_in_bytes();
if (mem_swap_limit >= 0 && mem_limit >= 0) {
jlong delta_limit = mem_swap_limit - mem_limit;
if (delta_limit <= 0) {
value = 0;
return true;
}
jlong mem_swap_usage = OSContainer::memory_and_swap_usage_in_bytes();
jlong mem_usage = OSContainer::memory_usage_in_bytes();
if (mem_swap_usage > 0 && mem_usage > 0) {
jlong delta_usage = mem_swap_usage - mem_usage;
if (delta_usage >= 0) {
jlong free_swap = delta_limit - delta_usage;
value = free_swap >= 0 ? static_cast<physical_memory_size_type>(free_swap) : 0;
return true;
}
}
if (OSContainer::available_swap_in_bytes(host_free_swap_val, value)) {
return true;
}
// unlimited or not supported. Fall through to return host value
log_trace(os,container)("os::free_swap_space: container_swap_limit=" JLONG_FORMAT
" container_mem_limit=" JLONG_FORMAT " returning host value: " PHYS_MEM_TYPE_FORMAT,
mem_swap_limit, mem_limit, host_free_swap_val);
// Fall through to use host value
log_trace(os,container)("os::free_swap_space: containerized value unavailable"
" returning host value: " PHYS_MEM_TYPE_FORMAT, host_free_swap_val);
}
value = host_free_swap_val;
return true;
@ -345,10 +325,10 @@ bool os::free_swap_space(physical_memory_size_type& value) {
physical_memory_size_type os::physical_memory() {
if (OSContainer::is_containerized()) {
jlong mem_limit;
if ((mem_limit = OSContainer::memory_limit_in_bytes()) > 0) {
log_trace(os)("total container memory: " JLONG_FORMAT, mem_limit);
return static_cast<physical_memory_size_type>(mem_limit);
physical_memory_size_type mem_limit = value_unlimited;
if (OSContainer::memory_limit_in_bytes(mem_limit) && mem_limit != value_unlimited) {
log_trace(os)("total container memory: " PHYS_MEM_TYPE_FORMAT, mem_limit);
return mem_limit;
}
}
@ -508,10 +488,15 @@ pid_t os::Linux::gettid() {
// Returns the amount of swap currently configured, in bytes.
// This can change at any time.
julong os::Linux::host_swap() {
bool os::Linux::host_swap(physical_memory_size_type& value) {
struct sysinfo si;
sysinfo(&si);
return (julong)(si.totalswap * si.mem_unit);
int ret = sysinfo(&si);
if (ret != 0) {
assert(false, "sysinfo failed in host_swap(): %s", os::strerror(errno));
return false;
}
value = static_cast<physical_memory_size_type>(si.totalswap) * si.mem_unit;
return true;
}
// Most versions of linux have a bug where the number of processors are
@ -2469,9 +2454,11 @@ bool os::Linux::print_container_info(outputStream* st) {
st->print_cr("cpu_memory_nodes: %s", p != nullptr ? p : "not supported");
free(p);
int i = OSContainer::active_processor_count();
int i = -1;
bool supported = OSContainer::active_processor_count(i);
st->print("active_processor_count: ");
if (i > 0) {
if (supported) {
assert(i > 0, "must be");
if (ActiveProcessorCount > 0) {
st->print_cr("%d, but overridden by -XX:ActiveProcessorCount %d", i, ActiveProcessorCount);
} else {
@ -2481,65 +2468,105 @@ bool os::Linux::print_container_info(outputStream* st) {
st->print_cr("not supported");
}
i = OSContainer::cpu_quota();
supported = OSContainer::cpu_quota(i);
st->print("cpu_quota: ");
if (i > 0) {
if (supported && i > 0) {
st->print_cr("%d", i);
} else {
st->print_cr("%s", i == OSCONTAINER_ERROR ? "not supported" : "no quota");
st->print_cr("%s", !supported ? "not supported" : "no quota");
}
i = OSContainer::cpu_period();
supported = OSContainer::cpu_period(i);
st->print("cpu_period: ");
if (i > 0) {
if (supported && i > 0) {
st->print_cr("%d", i);
} else {
st->print_cr("%s", i == OSCONTAINER_ERROR ? "not supported" : "no period");
st->print_cr("%s", !supported ? "not supported" : "no period");
}
i = OSContainer::cpu_shares();
supported = OSContainer::cpu_shares(i);
st->print("cpu_shares: ");
if (i > 0) {
if (supported && i > 0) {
st->print_cr("%d", i);
} else {
st->print_cr("%s", i == OSCONTAINER_ERROR ? "not supported" : "no shares");
st->print_cr("%s", !supported ? "not supported" : "no shares");
}
jlong j = OSContainer::cpu_usage_in_micros();
uint64_t j = 0;
supported = OSContainer::cpu_usage_in_micros(j);
st->print("cpu_usage_in_micros: ");
if (j >= 0) {
st->print_cr(JLONG_FORMAT, j);
if (supported && j > 0) {
st->print_cr(UINT64_FORMAT, j);
} else {
st->print_cr("%s", j == OSCONTAINER_ERROR ? "not supported" : "no usage");
st->print_cr("%s", !supported ? "not supported" : "no usage");
}
OSContainer::print_container_helper(st, OSContainer::memory_limit_in_bytes(), "memory_limit_in_bytes");
OSContainer::print_container_helper(st, OSContainer::memory_and_swap_limit_in_bytes(), "memory_and_swap_limit_in_bytes");
OSContainer::print_container_helper(st, OSContainer::memory_soft_limit_in_bytes(), "memory_soft_limit_in_bytes");
OSContainer::print_container_helper(st, OSContainer::memory_throttle_limit_in_bytes(), "memory_throttle_limit_in_bytes");
OSContainer::print_container_helper(st, OSContainer::memory_usage_in_bytes(), "memory_usage_in_bytes");
OSContainer::print_container_helper(st, OSContainer::memory_max_usage_in_bytes(), "memory_max_usage_in_bytes");
OSContainer::print_container_helper(st, OSContainer::rss_usage_in_bytes(), "rss_usage_in_bytes");
OSContainer::print_container_helper(st, OSContainer::cache_usage_in_bytes(), "cache_usage_in_bytes");
MetricResult memory_limit;
physical_memory_size_type val = value_unlimited;
if (OSContainer::memory_limit_in_bytes(val)) {
memory_limit.set_value(val);
}
MetricResult mem_swap_limit;
val = value_unlimited;
if (OSContainer::memory_and_swap_limit_in_bytes(val)) {
mem_swap_limit.set_value(val);
}
MetricResult mem_soft_limit;
val = value_unlimited;
if (OSContainer::memory_soft_limit_in_bytes(val)) {
mem_soft_limit.set_value(val);
}
MetricResult mem_throttle_limit;
val = value_unlimited;
if (OSContainer::memory_throttle_limit_in_bytes(val)) {
mem_throttle_limit.set_value(val);
}
MetricResult mem_usage;
val = 0;
if (OSContainer::memory_usage_in_bytes(val)) {
mem_usage.set_value(val);
}
MetricResult mem_max_usage;
val = 0;
if (OSContainer::memory_max_usage_in_bytes(val)) {
mem_max_usage.set_value(val);
}
MetricResult rss_usage;
val = 0;
if (OSContainer::rss_usage_in_bytes(val)) {
rss_usage.set_value(val);
}
MetricResult cache_usage;
val = 0;
if (OSContainer::cache_usage_in_bytes(val)) {
cache_usage.set_value(val);
}
OSContainer::print_container_helper(st, memory_limit, "memory_limit_in_bytes");
OSContainer::print_container_helper(st, mem_swap_limit, "memory_and_swap_limit_in_bytes");
OSContainer::print_container_helper(st, mem_soft_limit, "memory_soft_limit_in_bytes");
OSContainer::print_container_helper(st, mem_throttle_limit, "memory_throttle_limit_in_bytes");
OSContainer::print_container_helper(st, mem_usage, "memory_usage_in_bytes");
OSContainer::print_container_helper(st, mem_max_usage, "memory_max_usage_in_bytes");
OSContainer::print_container_helper(st, rss_usage, "rss_usage_in_bytes");
OSContainer::print_container_helper(st, cache_usage, "cache_usage_in_bytes");
OSContainer::print_version_specific_info(st);
j = OSContainer::pids_max();
supported = OSContainer::pids_max(j);
st->print("maximum number of tasks: ");
if (j > 0) {
st->print_cr(JLONG_FORMAT, j);
if (supported && j != value_unlimited) {
st->print_cr(UINT64_FORMAT, j);
} else {
st->print_cr("%s", j == OSCONTAINER_ERROR ? "not supported" : "unlimited");
st->print_cr("%s", !supported ? "not supported" : "unlimited");
}
j = OSContainer::pids_current();
supported = OSContainer::pids_current(j);
st->print("current number of tasks: ");
if (j > 0) {
st->print_cr(JLONG_FORMAT, j);
if (supported && j > 0) {
st->print_cr(UINT64_FORMAT, j);
} else {
if (j == OSCONTAINER_ERROR) {
st->print_cr("not supported");
}
st->print_cr("%s", !supported ? "not supported" : "no current tasks");
}
return true;
@ -4643,7 +4670,7 @@ int os::Linux::active_processor_count() {
//
// 1. User option -XX:ActiveProcessorCount
// 2. kernel os calls (sched_getaffinity or sysconf(_SC_NPROCESSORS_ONLN)
// 3. extracted from cgroup cpu subsystem (shares and quotas)
// 3. extracted from cgroup cpu subsystem (quotas)
//
// Option 1, if specified, will always override.
// If the cgroup subsystem is active and configured, we
@ -4660,9 +4687,8 @@ int os::active_processor_count() {
return ActiveProcessorCount;
}
int active_cpus;
if (OSContainer::is_containerized()) {
active_cpus = OSContainer::active_processor_count();
int active_cpus = -1;
if (OSContainer::is_containerized() && OSContainer::active_processor_count(active_cpus)) {
log_trace(os)("active_processor_count: determined by OSContainer: %d",
active_cpus);
} else {

View File

@ -45,8 +45,6 @@ class os::Linux {
static GrowableArray<int>* _cpu_to_node;
static GrowableArray<int>* _nindex_to_node;
static julong available_memory_in_container();
protected:
static physical_memory_size_type _physical_memory;
@ -117,7 +115,7 @@ class os::Linux {
static uintptr_t initial_thread_stack_size(void) { return _initial_thread_stack_size; }
static physical_memory_size_type physical_memory() { return _physical_memory; }
static julong host_swap();
static bool host_swap(physical_memory_size_type& value);
static intptr_t* ucontext_get_sp(const ucontext_t* uc);
static intptr_t* ucontext_get_fp(const ucontext_t* uc);

View File

@ -39,7 +39,7 @@ class TypeArrayKlass;
// It also decides what Klasses must be cached in aot-initialized state.
//
// ArchiveBuilder uses [1] as roots to scan for all MetaspaceObjs that need to be cached.
// ArchiveHeapWriter uses [2] to create an image of the archived heap.
// HeapShared uses [2] to create an image of the archived heap.
//
// [1] is stored in _all_cached_classes in aotArtifactFinder.cpp.
// [2] is stored in HeapShared::archived_object_cache().

View File

@ -796,7 +796,7 @@ void AOTMapLogger::dumptime_log_mapped_heap_region(ArchiveMappedHeapInfo* heap_i
address buffer_start = address(r.start()); // start of the current oop inside the buffer
address buffer_end = address(r.end());
address requested_base = UseCompressedOops ? (address)CompressedOops::base() : (address)AOTMappedHeapWriter::NOCOOPS_REQUESTED_BASE;
address requested_base = UseCompressedOops ? AOTMappedHeapWriter::narrow_oop_base() : (address)AOTMappedHeapWriter::NOCOOPS_REQUESTED_BASE;
address requested_start = UseCompressedOops ? AOTMappedHeapWriter::buffered_addr_to_requested_addr(buffer_start) : requested_base;
log_region_range("heap", buffer_start, buffer_end, requested_start);

View File

@ -55,7 +55,7 @@
GrowableArrayCHeap<u1, mtClassShared>* AOTMappedHeapWriter::_buffer = nullptr;
// The following are offsets from buffer_bottom()
bool AOTMappedHeapWriter::_is_writing_deterministic_heap = false;
size_t AOTMappedHeapWriter::_buffer_used;
// Heap root segments
@ -74,7 +74,7 @@ AOTMappedHeapWriter::_buffer_offset_to_source_obj_table = nullptr;
DumpedInternedStrings *AOTMappedHeapWriter::_dumped_interned_strings = nullptr;
typedef HashTable<
size_t, // offset of a filler from ArchiveHeapWriter::buffer_bottom()
size_t, // offset of a filler from AOTMappedHeapWriter::buffer_bottom()
size_t, // size of this filler (in bytes)
127, // prime number
AnyObj::C_HEAP,
@ -96,6 +96,45 @@ void AOTMappedHeapWriter::init() {
_source_objs = new GrowableArrayCHeap<oop, mtClassShared>(10000);
guarantee(MIN_GC_REGION_ALIGNMENT <= G1HeapRegion::min_region_size_in_words() * HeapWordSize, "must be");
if (CDSConfig::old_cds_flags_used()) {
// With the old CDS workflow, we can guatantee determninistic output: given
// the same classlist file, we can generate the same static CDS archive.
// To ensure determinism, we always use the same compressed oop encoding
// (zero-based, no shift). See set_requested_address_range().
_is_writing_deterministic_heap = true;
} else {
// Determninistic output is not supported by the new AOT workflow, so
// we don't force the (zero-based, no shift) encoding. This way, it is more
// likely that we can avoid oop relocation in the production run.
_is_writing_deterministic_heap = false;
}
}
}
// For AOTMappedHeapWriter::narrow_oop_{mode, base, shift}(), see comments
// in AOTMappedHeapWriter::set_requested_address_range(),
CompressedOops::Mode AOTMappedHeapWriter::narrow_oop_mode() {
if (is_writing_deterministic_heap()) {
return CompressedOops::UnscaledNarrowOop;
} else {
return CompressedOops::mode();
}
}
address AOTMappedHeapWriter::narrow_oop_base() {
if (is_writing_deterministic_heap()) {
return (address)0;
} else {
return CompressedOops::base();
}
}
int AOTMappedHeapWriter::narrow_oop_shift() {
if (is_writing_deterministic_heap()) {
return 0;
} else {
return CompressedOops::shift();
}
}
@ -116,7 +155,7 @@ void AOTMappedHeapWriter::write(GrowableArrayCHeap<oop, mtClassShared>* roots,
assert(CDSConfig::is_dumping_heap(), "sanity");
allocate_buffer();
copy_source_objs_to_buffer(roots);
set_requested_address(heap_info);
set_requested_address_range(heap_info);
relocate_embedded_oops(roots, heap_info);
}
@ -536,14 +575,55 @@ size_t AOTMappedHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
return buffered_obj_offset;
}
void AOTMappedHeapWriter::set_requested_address(ArchiveMappedHeapInfo* info) {
// Set the range [_requested_bottom, _requested_top), the requested address range of all
// the archived heap objects in the production run.
//
// (1) UseCompressedOops == true && !is_writing_deterministic_heap()
//
// The archived objects are stored using the COOPS encoding of the assembly phase.
// We pick a range within the heap used by the assembly phase.
//
// In the production run, if different COOPS encodings are used:
// - The heap contents needs to be relocated.
//
// (2) UseCompressedOops == true && is_writing_deterministic_heap()
//
// We always use zero-based, zero-shift encoding. _requested_top is aligned to 0x10000000.
//
// (3) UseCompressedOops == false:
//
// In the production run, the heap range is usually picked (randomly) by the OS, so we
// will almost always need to perform relocation, regardless of how we pick the requested
// address range.
//
// So we just hard code it to NOCOOPS_REQUESTED_BASE.
//
void AOTMappedHeapWriter::set_requested_address_range(ArchiveMappedHeapInfo* info) {
assert(!info->is_used(), "only set once");
size_t heap_region_byte_size = _buffer_used;
assert(heap_region_byte_size > 0, "must archived at least one object!");
if (UseCompressedOops) {
if (UseG1GC) {
if (is_writing_deterministic_heap()) {
// Pick a heap range so that requested addresses can be encoded with zero-base/no shift.
// We align the requested bottom to at least 1 MB: if the production run uses G1 with a small
// heap (e.g., -Xmx256m), it's likely that we can map the archived objects at the
// requested location to avoid relocation.
//
// For other collectors or larger heaps, relocation is unavoidable, but is usually
// quite cheap. If you really want to avoid relocation, use the AOT workflow instead.
address heap_end = (address)0x100000000;
size_t alignment = MAX2(MIN_GC_REGION_ALIGNMENT, 1024 * 1024);
if (align_up(heap_region_byte_size, alignment) >= (size_t)heap_end) {
log_error(aot, heap)("cached heap space is too large: %zu bytes", heap_region_byte_size);
AOTMetaspace::unrecoverable_writing_error();
}
_requested_bottom = align_down(heap_end - heap_region_byte_size, alignment);
} else if (UseG1GC) {
// For G1, pick the range at the top of the current heap. If the exact same heap sizes
// are used in the production run, it's likely that we can map the archived objects
// at the requested location to avoid relocation.
address heap_end = (address)G1CollectedHeap::heap()->reserved().end();
log_info(aot, heap)("Heap end = %p", heap_end);
_requested_bottom = align_down(heap_end - heap_region_byte_size, G1HeapRegion::GrainBytes);
@ -612,7 +692,14 @@ oop AOTMappedHeapWriter::load_oop_from_buffer(narrowOop* buffered_addr) {
template <typename T> void AOTMappedHeapWriter::relocate_field_in_buffer(T* field_addr_in_buffer, oop source_referent, CHeapBitMap* oopmap) {
oop request_referent = source_obj_to_requested_obj(source_referent);
store_requested_oop_in_buffer<T>(field_addr_in_buffer, request_referent);
if (UseCompressedOops && is_writing_deterministic_heap()) {
// We use zero-based, 0-shift encoding, so the narrowOop is just the lower
// 32 bits of request_referent
intptr_t addr = cast_from_oop<intptr_t>(request_referent);
*((narrowOop*)field_addr_in_buffer) = checked_cast<narrowOop>(addr);
} else {
store_requested_oop_in_buffer<T>(field_addr_in_buffer, request_referent);
}
if (request_referent != nullptr) {
mark_oop_pointer<T>(field_addr_in_buffer, oopmap);
}
@ -918,9 +1005,9 @@ AOTMapLogger::OopDataIterator* AOTMappedHeapWriter::oop_iterator(ArchiveMappedHe
address buffer_start = address(r.start());
address buffer_end = address(r.end());
address requested_base = UseCompressedOops ? (address)CompressedOops::base() : (address)AOTMappedHeapWriter::NOCOOPS_REQUESTED_BASE;
address requested_start = UseCompressedOops ? buffered_addr_to_requested_addr(buffer_start) : requested_base;
int requested_shift = CompressedOops::shift();
address requested_base = UseCompressedOops ? AOTMappedHeapWriter::narrow_oop_base() : (address)AOTMappedHeapWriter::NOCOOPS_REQUESTED_BASE;
address requested_start = UseCompressedOops ? AOTMappedHeapWriter::buffered_addr_to_requested_addr(buffer_start) : requested_base;
int requested_shift = AOTMappedHeapWriter::narrow_oop_shift();
intptr_t buffer_to_requested_delta = requested_start - buffer_start;
uint64_t buffer_start_narrow_oop = 0xdeadbeed;
if (UseCompressedOops) {

View File

@ -29,6 +29,7 @@
#include "cds/heapShared.hpp"
#include "memory/allocation.hpp"
#include "memory/allStatic.hpp"
#include "oops/compressedOops.hpp"
#include "oops/oopHandle.hpp"
#include "utilities/bitMap.hpp"
#include "utilities/exceptions.hpp"
@ -71,7 +72,7 @@ class AOTMappedHeapWriter : AllStatic {
// These are entered into HeapShared::archived_object_cache().
//
// - "buffered objects" are copies of the "source objects", and are stored in into
// ArchiveHeapWriter::_buffer, which is a GrowableArray that sits outside of
// AOTMappedHeapWriter::_buffer, which is a GrowableArray that sits outside of
// the valid heap range. Therefore we avoid using the addresses of these copies
// as oops. They are usually called "buffered_addr" in the code (of the type "address").
//
@ -81,26 +82,11 @@ class AOTMappedHeapWriter : AllStatic {
// - Each archived object has a "requested address" -- at run time, if the object
// can be mapped at this address, we can avoid relocation.
//
// The requested address is implemented differently depending on UseCompressedOops:
// The requested address of an archived object is essentially its buffered_addr + delta,
// where delta is (_requested_bottom - buffer_bottom());
//
// UseCompressedOops == true:
// The archived objects are stored assuming that the runtime COOPS compression
// scheme is exactly the same as in dump time (or else a more expensive runtime relocation
// would be needed.)
//
// At dump time, we assume that the runtime heap range is exactly the same as
// in dump time. The requested addresses of the archived objects are chosen such that
// they would occupy the top end of a G1 heap (TBD when dumping is supported by other
// collectors. See JDK-8298614).
//
// UseCompressedOops == false:
// At runtime, the heap range is usually picked (randomly) by the OS, so we will almost always
// need to perform relocation. Hence, the goal of the "requested address" is to ensure that
// the contents of the archived objects are deterministic. I.e., the oop fields of archived
// objects will always point to deterministic addresses.
//
// For G1, the archived heap is written such that the lowest archived object is placed
// at NOCOOPS_REQUESTED_BASE. (TBD after JDK-8298614).
// The requested addresses of all archived objects are within [_requested_bottom, _requested_top).
// See AOTMappedHeapWriter::set_requested_address_range() for more info.
// ----------------------------------------------------------------------
public:
@ -111,6 +97,15 @@ public:
// Shenandoah heap region size can never be smaller than 256K.
static constexpr int MIN_GC_REGION_ALIGNMENT = 256 * K;
// The heap contents are required to be deterministic when dumping "old" CDS archives, in order
// to support reproducible lib/server/classes*.jsa when building the JDK.
static bool is_writing_deterministic_heap() { return _is_writing_deterministic_heap; }
// The oop encoding used by the archived heap objects.
static CompressedOops::Mode narrow_oop_mode();
static address narrow_oop_base();
static int narrow_oop_shift();
static const int INITIAL_TABLE_SIZE = 15889; // prime number
static const int MAX_TABLE_SIZE = 1000000;
@ -121,6 +116,7 @@ private:
int _field_offset;
};
static bool _is_writing_deterministic_heap;
static GrowableArrayCHeap<u1, mtClassShared>* _buffer;
// The number of bytes that have written into _buffer (may be smaller than _buffer->length()).
@ -130,15 +126,15 @@ private:
static HeapRootSegments _heap_root_segments;
// The address range of the requested location of the archived heap objects.
static address _requested_bottom;
static address _requested_top;
static address _requested_bottom; // The requested address of the lowest archived heap object
static address _requested_top; // The exclusive end of the highest archived heap object
static GrowableArrayCHeap<NativePointerInfo, mtClassShared>* _native_pointers;
static GrowableArrayCHeap<oop, mtClassShared>* _source_objs;
static DumpedInternedStrings *_dumped_interned_strings;
// We sort _source_objs_order to minimize the number of bits in ptrmap and oopmap.
// See comments near the body of ArchiveHeapWriter::compare_objs_by_oop_fields().
// See comments near the body of AOTMappedHeapWriter::compare_objs_by_oop_fields().
// The objects will be written in the order of:
//_source_objs->at(_source_objs_order->at(0)._index)
// source_objs->at(_source_objs_order->at(1)._index)
@ -200,7 +196,7 @@ private:
static int filler_array_length(size_t fill_bytes);
static HeapWord* init_filler_array_at_buffer_top(int array_length, size_t fill_bytes);
static void set_requested_address(ArchiveMappedHeapInfo* info);
static void set_requested_address_range(ArchiveMappedHeapInfo* info);
static void mark_native_pointers(oop orig_obj);
static void relocate_embedded_oops(GrowableArrayCHeap<oop, mtClassShared>* roots, ArchiveMappedHeapInfo* info);
static void compute_ptrmap(ArchiveMappedHeapInfo *info);

View File

@ -114,6 +114,7 @@ intx AOTMetaspace::_relocation_delta;
char* AOTMetaspace::_requested_base_address;
Array<Method*>* AOTMetaspace::_archived_method_handle_intrinsics = nullptr;
bool AOTMetaspace::_use_optimized_module_handling = true;
FileMapInfo* AOTMetaspace::_output_mapinfo = nullptr;
// The CDS archive is divided into the following regions:
// rw - read-write metadata
@ -322,6 +323,24 @@ void AOTMetaspace::initialize_for_static_dump() {
AOTMetaspace::unrecoverable_writing_error();
}
_symbol_region.init(&_symbol_rs, &_symbol_vs);
if (CDSConfig::is_dumping_preimage_static_archive()) {
// We are in the AOT training run. User code is executed.
//
// On Windows, if the user code closes System.out and we open the AOT config file for output
// only at VM exit, we might get back the same file HANDLE as stdout, and the AOT config
// file may get corrupted by UL logs. By opening early, we ensure that the output
// HANDLE is different than stdout so we can avoid such corruption.
open_output_mapinfo();
} else {
// No need for the above as we won't execute any user code.
}
}
void AOTMetaspace::open_output_mapinfo() {
const char* static_archive = CDSConfig::output_archive_path();
assert(static_archive != nullptr, "sanity");
_output_mapinfo = new FileMapInfo(static_archive, true);
_output_mapinfo->open_as_output();
}
// Called by universe_post_init()
@ -655,15 +674,14 @@ private:
public:
VM_PopulateDumpSharedSpace(StaticArchiveBuilder& b) :
VM_Operation(), _mapped_heap_info(), _streamed_heap_info(), _map_info(nullptr), _builder(b) {}
VM_PopulateDumpSharedSpace(StaticArchiveBuilder& b, FileMapInfo* map_info) :
VM_Operation(), _mapped_heap_info(), _streamed_heap_info(), _map_info(map_info), _builder(b) {}
bool skip_operation() const { return false; }
VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; }
ArchiveMappedHeapInfo* mapped_heap_info() { return &_mapped_heap_info; }
ArchiveStreamedHeapInfo* streamed_heap_info() { return &_streamed_heap_info; }
FileMapInfo* map_info() const { return _map_info; }
void doit(); // outline because gdb sucks
bool allow_nested_vm_operations() const { return true; }
}; // class VM_PopulateDumpSharedSpace
@ -795,12 +813,6 @@ void VM_PopulateDumpSharedSpace::doit() {
CppVtables::zero_archived_vtables();
// Write the archive file
if (CDSConfig::is_dumping_final_static_archive()) {
FileMapInfo::free_current_info(); // FIXME: should not free current info
}
const char* static_archive = CDSConfig::output_archive_path();
assert(static_archive != nullptr, "sanity");
_map_info = new FileMapInfo(static_archive, true);
_map_info->populate_header(AOTMetaspace::core_region_alignment());
_map_info->set_early_serialized_data(early_serialized_data);
_map_info->set_serialized_data(serialized_data);
@ -1138,7 +1150,14 @@ void AOTMetaspace::dump_static_archive_impl(StaticArchiveBuilder& builder, TRAPS
}
#endif
VM_PopulateDumpSharedSpace op(builder);
if (!CDSConfig::is_dumping_preimage_static_archive()) {
if (CDSConfig::is_dumping_final_static_archive()) {
FileMapInfo::free_current_info(); // FIXME: should not free current info
}
open_output_mapinfo();
}
VM_PopulateDumpSharedSpace op(builder, _output_mapinfo);
VMThread::execute(&op);
if (AOTCodeCache::is_on_for_dump() && CDSConfig::is_dumping_final_static_archive()) {
@ -1152,7 +1171,9 @@ void AOTMetaspace::dump_static_archive_impl(StaticArchiveBuilder& builder, TRAPS
CDSConfig::disable_dumping_aot_code();
}
bool status = write_static_archive(&builder, op.map_info(), op.mapped_heap_info(), op.streamed_heap_info());
bool status = write_static_archive(&builder, _output_mapinfo, op.mapped_heap_info(), op.streamed_heap_info());
assert(!_output_mapinfo->is_open(), "Must be closed already");
_output_mapinfo = nullptr;
if (status && CDSConfig::is_dumping_preimage_static_archive()) {
tty->print_cr("%s AOTConfiguration recorded: %s",
CDSConfig::has_temp_aot_config_file() ? "Temporary" : "", AOTConfiguration);
@ -1173,11 +1194,10 @@ bool AOTMetaspace::write_static_archive(ArchiveBuilder* builder,
// relocate the data so that it can be mapped to AOTMetaspace::requested_base_address()
// without runtime relocation.
builder->relocate_to_requested();
map_info->open_as_output();
if (!map_info->is_open()) {
return false;
}
map_info->prepare_for_writing();
builder->write_archive(map_info, mapped_heap_info, streamed_heap_info);
return true;
}

View File

@ -60,6 +60,7 @@ class AOTMetaspace : AllStatic {
static char* _requested_base_address;
static bool _use_optimized_module_handling;
static Array<Method*>* _archived_method_handle_intrinsics;
static FileMapInfo* _output_mapinfo;
public:
enum {
@ -185,6 +186,7 @@ public:
private:
static void read_extra_data(JavaThread* current, const char* filename) NOT_CDS_RETURN;
static void fork_and_dump_final_static_archive(TRAPS);
static void open_output_mapinfo();
static bool write_static_archive(ArchiveBuilder* builder,
FileMapInfo* map_info,
ArchiveMappedHeapInfo* mapped_heap_info,

View File

@ -526,7 +526,7 @@ void CDSConfig::check_aotmode_record() {
bool has_output = !FLAG_IS_DEFAULT(AOTCacheOutput);
if (!has_output && !has_config) {
vm_exit_during_initialization("At least one of AOTCacheOutput and AOTConfiguration must be specified when using -XX:AOTMode=record");
vm_exit_during_initialization("At least one of AOTCacheOutput and AOTConfiguration must be specified when using -XX:AOTMode=record");
}
if (has_output) {

View File

@ -353,6 +353,7 @@ void DynamicArchiveBuilder::write_archive(char* serialized_data, AOTClassLocatio
assert(dynamic_info != nullptr, "Sanity");
dynamic_info->open_as_output();
dynamic_info->prepare_for_writing();
ArchiveBuilder::write_archive(dynamic_info, nullptr, nullptr);
address base = _requested_dynamic_archive_bottom;

View File

@ -216,12 +216,14 @@ void FileMapHeader::populate(FileMapInfo *info, size_t core_region_alignment,
_obj_alignment = ObjectAlignmentInBytes;
_compact_strings = CompactStrings;
_compact_headers = UseCompactObjectHeaders;
#if INCLUDE_CDS_JAVA_HEAP
if (CDSConfig::is_dumping_heap()) {
_object_streaming_mode = HeapShared::is_writing_streaming_mode();
_narrow_oop_mode = CompressedOops::mode();
_narrow_oop_base = CompressedOops::base();
_narrow_oop_shift = CompressedOops::shift();
_narrow_oop_mode = AOTMappedHeapWriter::narrow_oop_mode();
_narrow_oop_base = AOTMappedHeapWriter::narrow_oop_base();
_narrow_oop_shift = AOTMappedHeapWriter::narrow_oop_shift();
}
#endif
_compressed_oops = UseCompressedOops;
_compressed_class_ptrs = UseCompressedClassPointers;
if (UseCompressedClassPointers) {
@ -777,7 +779,9 @@ void FileMapInfo::open_as_output() {
}
_fd = fd;
_file_open = true;
}
void FileMapInfo::prepare_for_writing() {
// Seek past the header. We will write the header after all regions are written
// and their CRCs computed.
size_t header_bytes = header()->header_size();
@ -911,7 +915,7 @@ void FileMapInfo::write_region(int region, char* base, size_t size,
if (HeapShared::is_writing_mapping_mode()) {
requested_base = (char*)AOTMappedHeapWriter::requested_address();
if (UseCompressedOops) {
mapping_offset = (size_t)((address)requested_base - CompressedOops::base());
mapping_offset = (size_t)((address)requested_base - AOTMappedHeapWriter::narrow_oop_base());
assert((mapping_offset >> CompressedOops::shift()) << CompressedOops::shift() == mapping_offset, "must be");
}
} else {

View File

@ -290,7 +290,7 @@ public:
void log_paths(const char* msg, int start_idx, int end_idx);
FileMapInfo(const char* full_apth, bool is_static);
FileMapInfo(const char* full_path, bool is_static);
~FileMapInfo();
static void free_current_info();
@ -365,6 +365,7 @@ public:
// File manipulation.
bool open_as_input() NOT_CDS_RETURN_(false);
void open_as_output();
void prepare_for_writing();
void write_header();
void write_region(int region, char* base, size_t size,
bool read_only, bool allow_exec);

View File

@ -631,9 +631,8 @@ void HeapShared::init_scratch_objects_for_basic_type_mirrors(TRAPS) {
}
// Given java_mirror that represents a (primitive or reference) type T,
// return the "scratch" version that represents the same type T.
// Note that if java_mirror will be returned if it's already a
// scratch mirror.
// return the "scratch" version that represents the same type T. Note
// that java_mirror will be returned if the mirror is already a scratch mirror.
//
// See java_lang_Class::create_scratch_mirror() for more info.
oop HeapShared::scratch_java_mirror(oop java_mirror) {

View File

@ -332,7 +332,7 @@ public:
// Used by CDSHeapVerifier.
OopHandle _orig_referrer;
// The location of this object inside ArchiveHeapWriter::_buffer
// The location of this object inside {AOTMappedHeapWriter, AOTStreamedHeapWriter}::_buffer
size_t _buffer_offset;
// One or more fields in this object are pointing to non-null oops.

View File

@ -216,9 +216,6 @@ ciField::ciField(fieldDescriptor *fd) :
static bool trust_final_non_static_fields(ciInstanceKlass* holder) {
if (holder == nullptr)
return false;
if (holder->name() == ciSymbols::java_lang_System())
// Never trust strangely unstable finals: System.out, etc.
return false;
// Even if general trusting is disabled, trust system-built closures in these packages.
if (holder->is_in_package("java/lang/invoke") || holder->is_in_package("sun/invoke") ||
holder->is_in_package("java/lang/reflect") || holder->is_in_package("jdk/internal/reflect") ||
@ -230,15 +227,9 @@ static bool trust_final_non_static_fields(ciInstanceKlass* holder) {
// can't be serialized, so there is no hacking of finals going on with them.
if (holder->is_hidden())
return true;
// Trust final fields in all boxed classes
if (holder->is_box_klass())
return true;
// Trust final fields in records
if (holder->is_record())
return true;
// Trust final fields in String
if (holder->name() == ciSymbols::java_lang_String())
return true;
// Trust Atomic*FieldUpdaters: they are very important for performance, and make up one
// more reason not to use Unsafe, if their final fields are trusted. See more in JDK-8140483.
if (holder->name() == ciSymbols::java_util_concurrent_atomic_AtomicIntegerFieldUpdater_Impl() ||
@ -267,17 +258,7 @@ void ciField::initialize_from(fieldDescriptor* fd) {
// not be constant is when the field is a *special* static & final field
// whose value may change. The three examples are java.lang.System.in,
// java.lang.System.out, and java.lang.System.err.
assert(vmClasses::System_klass() != nullptr, "Check once per vm");
if (k == vmClasses::System_klass()) {
// Check offsets for case 2: System.in, System.out, or System.err
if (_offset == java_lang_System::in_offset() ||
_offset == java_lang_System::out_offset() ||
_offset == java_lang_System::err_offset()) {
_is_constant = false;
return;
}
}
_is_constant = true;
_is_constant = !fd->is_mutable_static_final();
} else {
// An instance field can be constant if it's a final static field or if
// it's a final non-static field of a trusted class (classes in

View File

@ -605,7 +605,7 @@ bool ciInstanceKlass::is_leaf_type() {
if (is_shared()) {
return is_final(); // approximately correct
} else {
return !has_subklass() && (nof_implementors() == 0);
return !has_subklass() && (!is_interface() || nof_implementors() == 0);
}
}
@ -619,6 +619,7 @@ bool ciInstanceKlass::is_leaf_type() {
// This is OK, since any dependencies we decide to assert
// will be checked later under the Compile_lock.
ciInstanceKlass* ciInstanceKlass::implementor() {
assert(is_interface(), "required");
ciInstanceKlass* impl = _implementor;
if (impl == nullptr) {
if (is_shared()) {

View File

@ -259,6 +259,7 @@ public:
ciInstanceKlass* unique_implementor() {
assert(is_loaded(), "must be loaded");
assert(is_interface(), "must be");
ciInstanceKlass* impl = implementor();
return (impl != this ? impl : nullptr);
}

View File

@ -412,31 +412,30 @@ ClassFileStream* ClassPathImageEntry::open_stream(JavaThread* current, const cha
//
ClassFileStream* ClassPathImageEntry::open_stream_for_loader(JavaThread* current, const char* name, ClassLoaderData* loader_data) {
jlong size;
JImageLocationRef location = (*JImageFindResource)(jimage_non_null(), "", get_jimage_version_string(), name, &size);
JImageLocationRef location = 0;
if (location == 0) {
TempNewSymbol class_name = SymbolTable::new_symbol(name);
TempNewSymbol pkg_name = ClassLoader::package_from_class_name(class_name);
TempNewSymbol class_name = SymbolTable::new_symbol(name);
TempNewSymbol pkg_name = ClassLoader::package_from_class_name(class_name);
if (pkg_name != nullptr) {
if (!Universe::is_module_initialized()) {
location = (*JImageFindResource)(jimage_non_null(), JAVA_BASE_NAME, get_jimage_version_string(), name, &size);
} else {
PackageEntry* package_entry = ClassLoader::get_package_entry(pkg_name, loader_data);
if (package_entry != nullptr) {
ResourceMark rm(current);
// Get the module name
ModuleEntry* module = package_entry->module();
assert(module != nullptr, "Boot classLoader package missing module");
assert(module->is_named(), "Boot classLoader package is in unnamed module");
const char* module_name = module->name()->as_C_string();
if (module_name != nullptr) {
location = (*JImageFindResource)(jimage_non_null(), module_name, get_jimage_version_string(), name, &size);
}
if (pkg_name != nullptr) {
if (!Universe::is_module_initialized()) {
location = (*JImageFindResource)(jimage_non_null(), JAVA_BASE_NAME, get_jimage_version_string(), name, &size);
} else {
PackageEntry* package_entry = ClassLoader::get_package_entry(pkg_name, loader_data);
if (package_entry != nullptr) {
ResourceMark rm(current);
// Get the module name
ModuleEntry* module = package_entry->module();
assert(module != nullptr, "Boot classLoader package missing module");
assert(module->is_named(), "Boot classLoader package is in unnamed module");
const char* module_name = module->name()->as_C_string();
if (module_name != nullptr) {
location = (*JImageFindResource)(jimage_non_null(), module_name, get_jimage_version_string(), name, &size);
}
}
}
}
if (location != 0) {
if (UsePerfData) {
ClassLoader::perf_sys_classfile_bytes_read()->inc(size);

View File

@ -1241,10 +1241,7 @@ bool java_lang_Class::restore_archived_mirror(Klass *k,
if (!k->is_array_klass()) {
// - local static final fields with initial values were initialized at dump time
// create the init_lock
typeArrayOop r = oopFactory::new_typeArray(T_INT, 0, CHECK_(false));
set_init_lock(mirror(), r);
assert(init_lock(mirror()) != nullptr, "allocated during AOT assembly");
if (protection_domain.not_null()) {
set_protection_domain(mirror(), protection_domain());
@ -1336,11 +1333,6 @@ void java_lang_Class::set_class_data(oop java_class, oop class_data) {
java_class->obj_field_put(_classData_offset, class_data);
}
void java_lang_Class::set_reflection_data(oop java_class, oop reflection_data) {
assert(_reflectionData_offset != 0, "must be set");
java_class->obj_field_put(_reflectionData_offset, reflection_data);
}
void java_lang_Class::set_class_loader(oop java_class, oop loader) {
assert(_class_loader_offset != 0, "offsets should have been initialized");
java_class->obj_field_put(_class_loader_offset, loader);
@ -1483,7 +1475,6 @@ Klass* java_lang_Class::array_klass_acquire(oop java_class) {
return k;
}
void java_lang_Class::release_set_array_klass(oop java_class, Klass* klass) {
assert(klass->is_klass() && klass->is_array_klass(), "should be array klass");
java_class->release_metadata_field_put(_array_klass_offset, klass);
@ -1589,11 +1580,6 @@ void java_lang_Class::set_modifiers(oop the_class_mirror, u2 value) {
the_class_mirror->char_field_put(_modifiers_offset, value);
}
int java_lang_Class::raw_access_flags(oop the_class_mirror) {
assert(_raw_access_flags_offset != 0, "offsets should have been initialized");
return the_class_mirror->char_field(_raw_access_flags_offset);
}
void java_lang_Class::set_raw_access_flags(oop the_class_mirror, u2 value) {
assert(_raw_access_flags_offset != 0, "offsets should have been initialized");
the_class_mirror->char_field_put(_raw_access_flags_offset, value);

View File

@ -273,6 +273,12 @@ class java_lang_Class : AllStatic {
static void initialize_mirror_fields(InstanceKlass* ik, Handle mirror, Handle protection_domain,
Handle classData, TRAPS);
static void set_mirror_module_field(JavaThread* current, Klass* K, Handle mirror, Handle module);
static void set_modifiers(oop java_class, u2 value);
static void set_raw_access_flags(oop java_class, u2 value);
static void set_is_primitive(oop java_class);
static void release_set_array_klass(oop java_class, Klass* klass);
public:
static void allocate_fixup_lists();
static void compute_offsets();
@ -307,12 +313,10 @@ class java_lang_Class : AllStatic {
static bool is_instance(oop obj);
static bool is_primitive(oop java_class);
static void set_is_primitive(oop java_class);
static BasicType primitive_type(oop java_class);
static oop primitive_mirror(BasicType t);
// JVM_NewArray support
static Klass* array_klass_acquire(oop java_class);
static void release_set_array_klass(oop java_class, Klass* klass);
// compiler support for class operations
static int klass_offset() { CHECK_INIT(_klass_offset); }
static int array_klass_offset() { CHECK_INIT(_array_klass_offset); }
@ -331,7 +335,6 @@ class java_lang_Class : AllStatic {
static objArrayOop signers(oop java_class);
static oop class_data(oop java_class);
static void set_class_data(oop java_class, oop classData);
static void set_reflection_data(oop java_class, oop reflection_data);
static int reflection_data_offset() { return _reflectionData_offset; }
static oop class_loader(oop java_class);
@ -344,10 +347,6 @@ class java_lang_Class : AllStatic {
static void set_source_file(oop java_class, oop source_file);
static int modifiers(oop java_class);
static void set_modifiers(oop java_class, u2 value);
static int raw_access_flags(oop java_class);
static void set_raw_access_flags(oop java_class, u2 value);
static size_t oop_size(oop java_class);
static void set_oop_size(HeapWord* java_class, size_t size);

View File

@ -26,6 +26,7 @@
#include "code/compiledIC.hpp"
#include "code/nmethod.hpp"
#include "code/relocInfo.hpp"
#include "cppstdlib/new.hpp"
#include "cppstdlib/type_traits.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
@ -37,8 +38,6 @@
#include "utilities/checkedCast.hpp"
#include "utilities/copy.hpp"
#include <new>
const RelocationHolder RelocationHolder::none; // its type is relocInfo::none

View File

@ -25,6 +25,7 @@
#ifndef SHARE_CODE_RELOCINFO_HPP
#define SHARE_CODE_RELOCINFO_HPP
#include "cppstdlib/new.hpp"
#include "memory/allocation.hpp"
#include "oops/oopsHierarchy.hpp"
#include "runtime/osInfo.hpp"
@ -32,8 +33,6 @@
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
#include <new>
class CodeBlob;
class Metadata;
class NativeMovConstReg;

View File

@ -0,0 +1,154 @@
/*
* Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_CPPSTDLIB_NEW_HPP
#define SHARE_CPPSTDLIB_NEW_HPP
#include "utilities/compilerWarnings.hpp"
// HotSpot usage:
// Only the following may be used:
// * std::nothrow_t, std::nothrow
// * std::align_val_t
// * The non-allocating forms of `operator new` and `operator new[]` are
// implicitly used by the corresponding `new` and `new[]` expressions.
// - operator new(size_t, void*) noexcept
// - operator new[](size_t, void*) noexcept
// Note that the non-allocating forms of `operator delete` and `operator
// delete[]` are not used, since they are only invoked by a placement new
// expression that fails by throwing an exception. But they might still
// end up being referenced in such a situation.
BEGIN_ALLOW_FORBIDDEN_FUNCTIONS
#include "utilities/vmassert_uninstall.hpp"
#include <new>
#include "utilities/vmassert_reinstall.hpp" // don't reorder
END_ALLOW_FORBIDDEN_FUNCTIONS
// Deprecation declarations to forbid use of the default global allocator.
// See C++17 21.6.1 Header <new> synopsis.
namespace std {
#if 0
// We could deprecate exception types, for completeness, but don't bother. We
// already have exceptions disabled, and run into compiler bugs when we try.
//
// gcc -Wattributes => type attributes ignored after type is already defined
// See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=122167
//
// clang -Wignored-attributes => attribute declaration must precede definition
// The clang warning is https://github.com/llvm/llvm-project/issues/135481,
// which should be fixed in clang 21.
class [[deprecated]] bad_alloc;
class [[deprecated]] bad_array_new_length;
#endif // #if 0
// Forbid new_handler manipulation by HotSpot code, leaving it untouched for
// use by application code.
[[deprecated]] new_handler get_new_handler() noexcept;
[[deprecated]] new_handler set_new_handler(new_handler) noexcept;
// Prefer HotSpot mechanisms for padding.
//
// The syntax for redeclaring these for deprecation is tricky, and not
// supported by some versions of some compilers. Dispatch on compiler and
// version to decide whether to redeclare deprecated.
#if defined(__clang__)
#if __clang_major__ >= 19
// clang18 and earlier may accept the declaration but go wrong with uses.
// Different warnings and link-time failures are both possible.
#define CAN_DEPRECATE_HARDWARE_INTERFERENCE_SIZES 1
#endif // restrict clang version
#elif defined(__GNUC__)
#if (__GNUC__ > 13) || (__GNUC__ == 13 && __GNUC_MINOR__ >= 2)
// g++11.5 accepts the declaration and reports deprecation for uses, but also
// has link-time failure for uses. Haven't tested intermediate versions.
#define CAN_DEPRECATE_HARDWARE_INTERFERENCE_SIZES 1
#endif // restrict gcc version
#elif defined(_MSVC)
// VS2022-17.13.2 => error C2370: '...': redefinition; different storage class
#endif // Compiler dispatch
// Redeclare deprecated if such is supported.
#ifdef CAN_DEPRECATE_HARDWARE_INTERFERENCE_SIZES
[[deprecated]] extern const size_t hardware_destructive_interference_size;
[[deprecated]] extern const size_t hardware_constructive_interference_size;
#undef CAN_DEPRECATE_HARDWARE_INTERFERENCE_SIZES
#endif // CAN_DEPRECATE_HARDWARE_INTERFERENCE_SIZES
} // namespace std
// Forbid using the global allocator by HotSpot code.
// This doesn't provide complete coverage. Some global allocation and
// deallocation functions are implicitly declared in all translation units,
// without needing to include <new>; see C++17 6.7.4. So this doesn't remove
// the need for the link-time verification that these functions aren't used.
//
// But don't poison them when compiling gtests. The gtest framework, the
// HotSpot wrapper around it (gtestMain.cpp), and even some tests, all have
// new/new[] and delete/delete[] expressions that use the default global
// allocator. We also don't apply the link-time check for gtests, for the
// same reason.
#ifndef HOTSPOT_GTEST
[[deprecated]] void* operator new(std::size_t);
[[deprecated]] void* operator new(std::size_t, std::align_val_t);
[[deprecated]] void* operator new(std::size_t, const std::nothrow_t&) noexcept;
[[deprecated]] void* operator new(std::size_t, std::align_val_t,
const std::nothrow_t&) noexcept;
[[deprecated]] void operator delete(void*) noexcept;
[[deprecated]] void operator delete(void*, std::size_t) noexcept;
[[deprecated]] void operator delete(void*, std::align_val_t) noexcept;
[[deprecated]] void operator delete(void*, std::size_t, std::align_val_t) noexcept;
[[deprecated]] void operator delete(void*, const std::nothrow_t&) noexcept;
[[deprecated]] void operator delete(void*, std::align_val_t,
const std::nothrow_t&) noexcept;
[[deprecated]] void* operator new[](std::size_t);
[[deprecated]] void* operator new[](std::size_t, std::align_val_t);
[[deprecated]] void* operator new[](std::size_t, const std::nothrow_t&) noexcept;
[[deprecated]] void* operator new[](std::size_t, std::align_val_t,
const std::nothrow_t&) noexcept;
[[deprecated]] void operator delete[](void*) noexcept;
[[deprecated]] void operator delete[](void*, std::size_t) noexcept;
[[deprecated]] void operator delete[](void*, std::align_val_t) noexcept;
[[deprecated]] void operator delete[](void*, std::size_t, std::align_val_t) noexcept;
[[deprecated]] void operator delete[](void*, const std::nothrow_t&) noexcept;
[[deprecated]] void operator delete[](void*, std::align_val_t,
const std::nothrow_t&) noexcept;
#endif // HOTSPOT_GTEST
// Allow (don't poison) the non-allocating forms from [new.delete.placement].
#endif // SHARE_CPPSTDLIB_NEW_HPP

View File

@ -123,6 +123,14 @@ void G1Allocator::reuse_retained_old_region(G1EvacInfo* evacuation_info,
}
}
size_t G1Allocator::free_bytes_in_retained_old_region() const {
if (_retained_old_gc_alloc_region == nullptr) {
return 0;
} else {
return _retained_old_gc_alloc_region->free();
}
}
void G1Allocator::init_gc_alloc_regions(G1EvacInfo* evacuation_info) {
assert_at_safepoint_on_vm_thread();

View File

@ -103,7 +103,10 @@ public:
void init_gc_alloc_regions(G1EvacInfo* evacuation_info);
void release_gc_alloc_regions(G1EvacInfo* evacuation_info);
void abandon_gc_alloc_regions();
bool is_retained_old_region(G1HeapRegion* hr);
// Return the amount of free bytes in the current retained old region.
size_t free_bytes_in_retained_old_region() const;
// Node index of current thread.
inline uint current_node_index() const;

View File

@ -2964,8 +2964,8 @@ void G1CollectedHeap::abandon_collection_set() {
}
size_t G1CollectedHeap::non_young_occupancy_after_allocation(size_t allocation_word_size) {
// For simplicity, just count whole regions.
const size_t cur_occupancy = (old_regions_count() + humongous_regions_count()) * G1HeapRegion::GrainBytes;
const size_t cur_occupancy = (old_regions_count() + humongous_regions_count()) * G1HeapRegion::GrainBytes -
_allocator->free_bytes_in_retained_old_region();
// Humongous allocations will always be assigned to non-young heap, so consider
// that allocation in the result as well. Otherwise the allocation will always
// be in young gen, so there is no need to account it here.

View File

@ -787,23 +787,13 @@ void G1HeapRegion::fill_range_with_dead_objects(HeapWord* start, HeapWord* end)
// possible that there is a pinned object that is not any more referenced by
// Java code (only by native).
//
// In this case we must not zap contents of such an array but we can overwrite
// the header; since only pinned typearrays are allowed, this fits nicely with
// putting filler arrays into the dead range as the object header sizes match and
// no user data is overwritten.
// In this case we should not zap, because that would overwrite
// user-observable data. Memory corresponding to obj-header is safe to
// change, since it's not directly user-observable.
//
// In particular String Deduplication might change the reference to the character
// array of the j.l.String after native code obtained a raw reference to it (via
// GetStringCritical()).
CollectedHeap::fill_with_objects(start, range_size, !has_pinned_objects());
HeapWord* current = start;
do {
// Update the BOT if the a threshold is crossed.
size_t obj_size = cast_to_oop(current)->size();
update_bot_for_block(current, current + obj_size);
// Advance to the next object.
current += obj_size;
guarantee(current <= end, "Should never go past end");
} while (current != end);
CollectedHeap::fill_with_object(start, range_size, !has_pinned_objects());
update_bot_for_block(start, start + range_size);
}

View File

@ -25,47 +25,95 @@
#include "gc/g1/g1Arguments.hpp"
#include "gc/g1/g1HeapRegion.hpp"
#include "gc/g1/g1YoungGenSizer.hpp"
#include "gc/shared/gc_globals.hpp"
#include "logging/log.hpp"
#include "runtime/globals_extension.hpp"
G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults),
_use_adaptive_sizing(true), _min_desired_young_length(0), _max_desired_young_length(0) {
precond(!FLAG_IS_ERGO(NewRatio));
precond(!FLAG_IS_ERGO(NewSize));
precond(!FLAG_IS_ERGO(MaxNewSize));
// Figure out compatible young gen sizing policies.
// This will either use all default, NewRatio or a combination of NewSize and
// MaxNewSize. If both ratio and size is user specified NewRatio will be ignored.
const bool user_specified_NewRatio = !FLAG_IS_DEFAULT(NewRatio);
const bool user_specified_NewSize = !FLAG_IS_DEFAULT(NewSize);
const bool user_specified_MaxNewSize = !FLAG_IS_DEFAULT(MaxNewSize);
// MaxNewSize is updated every time the heap is resized (and when initialized),
// as such the value of MaxNewSize is only modified if it is also used by the
// young generation sizing. (If MaxNewSize is user specified).
if (!user_specified_NewRatio && !user_specified_NewSize && !user_specified_MaxNewSize) {
// Using Defaults.
return;
}
if (user_specified_NewRatio && !user_specified_NewSize && !user_specified_MaxNewSize) {
// Using NewRatio.
_sizer_kind = SizerNewRatio;
_use_adaptive_sizing = false;
return;
}
if (FLAG_IS_CMDLINE(NewRatio)) {
if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
log_warning(gc, ergo)("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
} else {
_sizer_kind = SizerNewRatio;
_use_adaptive_sizing = false;
return;
// NewRatio ignored at this point, issue warning if NewRatio was specified
// on the command line.
log_warning(gc, ergo)("-XX:NewSize and -XX:MaxNewSize overrides -XX:NewRatio");
}
assert(!FLAG_IS_DEFAULT(InitialHeapSize), "Initial heap size must be selected");
if (user_specified_NewSize && NewSize > InitialHeapSize) {
// If user specifed NewSize is larger than the InitialHeapSize truncate the value.
if (FLAG_IS_CMDLINE(NewSize)) {
log_warning(gc, ergo)("NewSize (%zuk) is greater than the initial heap size (%zuk). "
"A new NewSize of %zuk will be used.",
NewSize/K, InitialHeapSize/K, InitialHeapSize/K);
}
FLAG_SET_ERGO(NewSize, InitialHeapSize);
}
assert(!FLAG_IS_DEFAULT(MaxHeapSize), "Max heap size must be selected");
if (user_specified_MaxNewSize && MaxNewSize > MaxHeapSize) {
// If user specifed MaxNewSize is larger than the MaxHeapSize truncate the value.
if (FLAG_IS_CMDLINE(MaxNewSize)) {
log_warning(gc, ergo)("MaxNewSize (%zuk) greater than the entire heap (%zuk). "
"A new MaxNewSize of %zuk will be used.",
MaxNewSize/K, MaxHeapSize/K, MaxHeapSize/K);
}
FLAG_SET_ERGO(MaxNewSize, MaxHeapSize);
}
if (NewSize > MaxNewSize) {
// Either NewSize, MaxNewSize or both have been specified and are incompatible.
// In either case set MaxNewSize to the value of NewSize.
if (FLAG_IS_CMDLINE(MaxNewSize)) {
log_warning(gc, ergo)("NewSize (%zuk) is greater than the MaxNewSize (%zuk). "
"A new max generation size of %zuk will be used.",
log_warning(gc, ergo)("NewSize (%zuk) is greater than MaxNewSize (%zuk). "
"A new MaxNewSize of %zuk will be used.",
NewSize/K, MaxNewSize/K, NewSize/K);
}
FLAG_SET_ERGO(MaxNewSize, NewSize);
}
if (FLAG_IS_CMDLINE(NewSize)) {
_min_desired_young_length = MAX2((uint) (NewSize / G1HeapRegion::GrainBytes),
1U);
if (FLAG_IS_CMDLINE(MaxNewSize)) {
_max_desired_young_length =
MAX2((uint) (MaxNewSize / G1HeapRegion::GrainBytes),
1U);
_sizer_kind = SizerMaxAndNewSize;
_use_adaptive_sizing = _min_desired_young_length != _max_desired_young_length;
} else {
_sizer_kind = SizerNewSizeOnly;
}
} else if (FLAG_IS_CMDLINE(MaxNewSize)) {
_max_desired_young_length =
MAX2((uint) (MaxNewSize / G1HeapRegion::GrainBytes),
1U);
if (user_specified_NewSize) {
_min_desired_young_length = MAX2((uint)(NewSize / G1HeapRegion::GrainBytes), 1U);
}
if (user_specified_MaxNewSize) {
_max_desired_young_length = MAX2((uint)(MaxNewSize / G1HeapRegion::GrainBytes), 1U);
}
if (user_specified_NewSize && user_specified_MaxNewSize) {
_sizer_kind = SizerMaxAndNewSize;
_use_adaptive_sizing = _min_desired_young_length != _max_desired_young_length;
} else if (user_specified_NewSize) {
_sizer_kind = SizerNewSizeOnly;
} else {
postcond(user_specified_MaxNewSize);
_sizer_kind = SizerMaxNewSizeOnly;
}
}

View File

@ -180,19 +180,6 @@ bool MutableSpace::cas_deallocate(HeapWord *obj, size_t size) {
return AtomicAccess::cmpxchg(top_addr(), expected_top, obj) == expected_top;
}
// Only used by oldgen allocation.
bool MutableSpace::needs_expand(size_t word_size) const {
// This method can be invoked either outside of safepoint by java threads or
// in safepoint by gc workers. Such accesses are synchronized by holding one
// of the following locks.
assert(Heap_lock->is_locked() || PSOldGenExpand_lock->is_locked(), "precondition");
// Holding the lock means end is stable. So while top may be advancing
// via concurrent allocations, there is no need to order the reads of top
// and end here, unlike in cas_allocate.
return pointer_delta(end(), top()) < word_size;
}
void MutableSpace::oop_iterate(OopIterateClosure* cl) {
HeapWord* obj_addr = bottom();
HeapWord* t = top();

View File

@ -127,11 +127,6 @@ public:
virtual HeapWord* cas_allocate(size_t word_size);
// Optional deallocation. Used in NUMA-allocator.
bool cas_deallocate(HeapWord *obj, size_t size);
// Return true if this space needs to be expanded in order to satisfy an
// allocation request of the indicated size. Concurrent allocations and
// resizes may change the result of a later call. Used by oldgen allocator.
// precondition: holding PSOldGenExpand_lock if not VM thread
bool needs_expand(size_t word_size) const;
// Iteration.
void oop_iterate(OopIterateClosure* cl);

View File

@ -370,6 +370,55 @@ void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
PSParallelCompact::invoke(clear_all_soft_refs, should_do_max_compaction);
}
bool ParallelScavengeHeap::should_attempt_young_gc() const {
const bool ShouldRunYoungGC = true;
const bool ShouldRunFullGC = false;
if (!_young_gen->to_space()->is_empty()) {
log_debug(gc, ergo)("To-space is not empty; run full-gc instead.");
return ShouldRunFullGC;
}
// Check if the predicted promoted bytes will overflow free space in old-gen.
PSAdaptiveSizePolicy* policy = _size_policy;
size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes();
size_t promotion_estimate = MIN2(avg_promoted, _young_gen->used_in_bytes());
// Total free size after possible old gen expansion
size_t free_in_old_gen_with_expansion = _old_gen->max_gen_size() - _old_gen->used_in_bytes();
log_trace(gc, ergo)("average_promoted %zu; padded_average_promoted %zu",
(size_t) policy->average_promoted_in_bytes(),
(size_t) policy->padded_average_promoted_in_bytes());
if (promotion_estimate >= free_in_old_gen_with_expansion) {
log_debug(gc, ergo)("Run full-gc; predicted promotion size >= max free space in old-gen: %zu >= %zu",
promotion_estimate, free_in_old_gen_with_expansion);
return ShouldRunFullGC;
}
if (UseAdaptiveSizePolicy) {
// Also checking OS has enough free memory to commit and expand old-gen.
// Otherwise, the recorded gc-pause-time might be inflated to include time
// of OS preparing free memory, resulting in inaccurate young-gen resizing.
assert(_old_gen->committed().byte_size() >= _old_gen->used_in_bytes(), "inv");
// Use uint64_t instead of size_t for 32bit compatibility.
uint64_t free_mem_in_os;
if (os::free_memory(free_mem_in_os)) {
size_t actual_free = (size_t)MIN2(_old_gen->committed().byte_size() - _old_gen->used_in_bytes() + free_mem_in_os,
(uint64_t)SIZE_MAX);
if (promotion_estimate > actual_free) {
log_debug(gc, ergo)("Run full-gc; predicted promotion size > free space in old-gen and OS: %zu > %zu",
promotion_estimate, actual_free);
return ShouldRunFullGC;
}
}
}
// No particular reasons to run full-gc, so young-gc.
return ShouldRunYoungGC;
}
static bool check_gc_heap_free_limit(size_t free_bytes, size_t capacity_bytes) {
return (free_bytes * 100 / capacity_bytes) < GCHeapFreeLimit;
}
@ -403,7 +452,16 @@ bool ParallelScavengeHeap::check_gc_overhead_limit() {
}
HeapWord* ParallelScavengeHeap::expand_heap_and_allocate(size_t size, bool is_tlab) {
#ifdef ASSERT
assert(Heap_lock->is_locked(), "precondition");
if (is_init_completed()) {
assert(SafepointSynchronize::is_at_safepoint(), "precondition");
assert(Thread::current()->is_VM_thread(), "precondition");
} else {
assert(Thread::current()->is_Java_thread(), "precondition");
assert(Heap_lock->owned_by_self(), "precondition");
}
#endif
HeapWord* result = young_gen()->expand_and_allocate(size);
@ -507,17 +565,18 @@ void ParallelScavengeHeap::collect(GCCause::Cause cause) {
VMThread::execute(&op);
}
void ParallelScavengeHeap::collect_at_safepoint(bool full) {
void ParallelScavengeHeap::collect_at_safepoint(bool is_full) {
assert(!GCLocker::is_active(), "precondition");
bool clear_soft_refs = GCCause::should_clear_all_soft_refs(_gc_cause);
if (!full) {
bool success = PSScavenge::invoke(clear_soft_refs);
if (success) {
if (!is_full && should_attempt_young_gc()) {
bool young_gc_success = PSScavenge::invoke(clear_soft_refs);
if (young_gc_success) {
return;
}
// Upgrade to Full-GC if young-gc fails
log_debug(gc, heap)("Upgrade to Full-GC since Young-gc failed.");
}
const bool should_do_max_compaction = false;
PSParallelCompact::invoke(clear_soft_refs, should_do_max_compaction);
}

View File

@ -119,6 +119,9 @@ class ParallelScavengeHeap : public CollectedHeap {
void print_tracing_info() const override;
void stop() override {};
// Returns true if a young GC should be attempted, false if a full GC is preferred.
bool should_attempt_young_gc() const;
public:
ParallelScavengeHeap() :
CollectedHeap(),

View File

@ -33,6 +33,7 @@
#include "gc/shared/spaceDecorator.hpp"
#include "logging/log.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/init.hpp"
#include "runtime/java.hpp"
#include "utilities/align.hpp"
@ -118,13 +119,22 @@ void PSOldGen::initialize_performance_counters() {
}
HeapWord* PSOldGen::expand_and_allocate(size_t word_size) {
#ifdef ASSERT
assert(Heap_lock->is_locked(), "precondition");
if (is_init_completed()) {
assert(SafepointSynchronize::is_at_safepoint(), "precondition");
assert(Thread::current()->is_VM_thread(), "precondition");
} else {
assert(Thread::current()->is_Java_thread(), "precondition");
assert(Heap_lock->owned_by_self(), "precondition");
}
#endif
if (object_space()->needs_expand(word_size)) {
if (pointer_delta(object_space()->end(), object_space()->top()) < word_size) {
expand(word_size*HeapWordSize);
}
// Reuse the CAS API even though this is VM thread in safepoint. This method
// Reuse the CAS API even though this is in a critical section. This method
// is not invoked repeatedly, so the CAS overhead should be negligible.
return cas_allocate_noexpand(word_size);
}
@ -168,7 +178,7 @@ bool PSOldGen::expand_for_allocate(size_t word_size) {
// true until we expand, since we have the lock. Other threads may take
// the space we need before we can allocate it, regardless of whether we
// expand. That's okay, we'll just try expanding again.
if (object_space()->needs_expand(word_size)) {
if (pointer_delta(object_space()->end(), object_space()->top()) < word_size) {
result = expand(word_size*HeapWordSize);
}
}
@ -192,10 +202,21 @@ void PSOldGen::try_expand_till_size(size_t target_capacity_bytes) {
bool PSOldGen::expand(size_t bytes) {
#ifdef ASSERT
if (!Thread::current()->is_VM_thread()) {
assert_lock_strong(PSOldGenExpand_lock);
// During startup (is_init_completed() == false), expansion can occur for
// 1. java-threads invoking heap-allocation (using Heap_lock)
// 2. CDS construction by a single thread (using PSOldGenExpand_lock but not needed)
//
// After startup (is_init_completed() == true), expansion can occur for
// 1. GC workers for promoting to old-gen (using PSOldGenExpand_lock)
// 2. VM thread to satisfy the pending allocation
// Both cases are inside safepoint pause, but are never overlapping.
//
if (is_init_completed()) {
assert(SafepointSynchronize::is_at_safepoint(), "precondition");
assert(Thread::current()->is_VM_thread() || PSOldGenExpand_lock->owned_by_self(), "precondition");
} else {
assert(Heap_lock->owned_by_self() || PSOldGenExpand_lock->owned_by_self(), "precondition");
}
assert_locked_or_safepoint(Heap_lock);
assert(bytes > 0, "precondition");
#endif
const size_t remaining_bytes = virtual_space()->uncommitted_size();

View File

@ -313,12 +313,6 @@ bool PSScavenge::invoke(bool clear_soft_refs) {
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
// Check for potential problems.
if (!should_attempt_scavenge()) {
log_info(gc, ergo)("Young-gc might fail so skipping");
return false;
}
IsSTWGCActiveMark mark;
_gc_timer.register_gc_start();
@ -336,8 +330,7 @@ bool PSScavenge::invoke(bool clear_soft_refs) {
PSOldGen* old_gen = heap->old_gen();
PSAdaptiveSizePolicy* size_policy = heap->size_policy();
assert(young_gen->to_space()->is_empty(),
"Attempt to scavenge with live objects in to_space");
assert(young_gen->to_space()->is_empty(), "precondition");
heap->increment_total_collections();
@ -520,59 +513,6 @@ void PSScavenge::clean_up_failed_promotion() {
NOT_PRODUCT(ParallelScavengeHeap::heap()->reset_promotion_should_fail();)
}
bool PSScavenge::should_attempt_scavenge() {
const bool ShouldRunYoungGC = true;
const bool ShouldRunFullGC = false;
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
PSYoungGen* young_gen = heap->young_gen();
PSOldGen* old_gen = heap->old_gen();
if (!young_gen->to_space()->is_empty()) {
log_debug(gc, ergo)("To-space is not empty; run full-gc instead.");
return ShouldRunFullGC;
}
// Check if the predicted promoted bytes will overflow free space in old-gen.
PSAdaptiveSizePolicy* policy = heap->size_policy();
size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes();
size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes());
// Total free size after possible old gen expansion
size_t free_in_old_gen_with_expansion = old_gen->max_gen_size() - old_gen->used_in_bytes();
log_trace(gc, ergo)("average_promoted %zu; padded_average_promoted %zu",
(size_t) policy->average_promoted_in_bytes(),
(size_t) policy->padded_average_promoted_in_bytes());
if (promotion_estimate >= free_in_old_gen_with_expansion) {
log_debug(gc, ergo)("Run full-gc; predicted promotion size >= max free space in old-gen: %zu >= %zu",
promotion_estimate, free_in_old_gen_with_expansion);
return ShouldRunFullGC;
}
if (UseAdaptiveSizePolicy) {
// Also checking OS has enough free memory to commit and expand old-gen.
// Otherwise, the recorded gc-pause-time might be inflated to include time
// of OS preparing free memory, resulting in inaccurate young-gen resizing.
assert(old_gen->committed().byte_size() >= old_gen->used_in_bytes(), "inv");
// Use uint64_t instead of size_t for 32bit compatibility.
uint64_t free_mem_in_os;
if (os::free_memory(free_mem_in_os)) {
size_t actual_free = (size_t)MIN2(old_gen->committed().byte_size() - old_gen->used_in_bytes() + free_mem_in_os,
(uint64_t)SIZE_MAX);
if (promotion_estimate > actual_free) {
log_debug(gc, ergo)("Run full-gc; predicted promotion size > free space in old-gen and OS: %zu > %zu",
promotion_estimate, actual_free);
return ShouldRunFullGC;
}
}
}
// No particular reasons to run full-gc, so young-gc.
return ShouldRunYoungGC;
}
// Adaptive size policy support.
void PSScavenge::set_young_generation_boundary(HeapWord* v) {
_young_generation_boundary = v;

View File

@ -64,8 +64,6 @@ class PSScavenge: AllStatic {
static void clean_up_failed_promotion();
static bool should_attempt_scavenge();
// Private accessors
static PSCardTable* card_table() { assert(_card_table != nullptr, "Sanity"); return _card_table; }
static const ParallelScavengeTracer* gc_tracer() { return &_gc_tracer; }

View File

@ -22,12 +22,11 @@
*
*/
#include "cppstdlib/new.hpp"
#include "gc/shared/bufferNode.hpp"
#include "memory/allocation.inline.hpp"
#include "utilities/debug.hpp"
#include <new>
BufferNode::AllocatorConfig::AllocatorConfig(size_t size)
: _buffer_capacity(size)
{

View File

@ -28,7 +28,7 @@
#include "logging/logStream.hpp"
#include "memory/allocation.inline.hpp"
#include "nmt/memTracker.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/atomic.hpp"
#include "runtime/globals.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/interfaceSupport.inline.hpp"
@ -122,7 +122,7 @@ OopStorage::ActiveArray::ActiveArray(size_t size) :
{}
OopStorage::ActiveArray::~ActiveArray() {
assert(_refcount == 0, "precondition");
assert(_refcount.load_relaxed() == 0, "precondition");
}
OopStorage::ActiveArray* OopStorage::ActiveArray::create(size_t size,
@ -144,32 +144,32 @@ size_t OopStorage::ActiveArray::size() const {
}
size_t OopStorage::ActiveArray::block_count() const {
return _block_count;
return _block_count.load_relaxed();
}
size_t OopStorage::ActiveArray::block_count_acquire() const {
return AtomicAccess::load_acquire(&_block_count);
return _block_count.load_acquire();
}
void OopStorage::ActiveArray::increment_refcount() const {
int new_value = AtomicAccess::add(&_refcount, 1);
assert(new_value >= 1, "negative refcount %d", new_value - 1);
int old_value = _refcount.fetch_then_add(1);
assert(old_value >= 0, "negative refcount %d", old_value);
}
bool OopStorage::ActiveArray::decrement_refcount() const {
int new_value = AtomicAccess::sub(&_refcount, 1);
int new_value = _refcount.sub_then_fetch(1);
assert(new_value >= 0, "negative refcount %d", new_value);
return new_value == 0;
}
bool OopStorage::ActiveArray::push(Block* block) {
size_t index = _block_count;
size_t index = _block_count.load_relaxed();
if (index < _size) {
block->set_active_index(index);
*block_ptr(index) = block;
// Use a release_store to ensure all the setup is complete before
// making the block visible.
AtomicAccess::release_store(&_block_count, index + 1);
_block_count.release_store(index + 1);
return true;
} else {
return false;
@ -177,19 +177,19 @@ bool OopStorage::ActiveArray::push(Block* block) {
}
void OopStorage::ActiveArray::remove(Block* block) {
assert(_block_count > 0, "array is empty");
assert(_block_count.load_relaxed() > 0, "array is empty");
size_t index = block->active_index();
assert(*block_ptr(index) == block, "block not present");
size_t last_index = _block_count - 1;
size_t last_index = _block_count.load_relaxed() - 1;
Block* last_block = *block_ptr(last_index);
last_block->set_active_index(index);
*block_ptr(index) = last_block;
_block_count = last_index;
_block_count.store_relaxed(last_index);
}
void OopStorage::ActiveArray::copy_from(const ActiveArray* from) {
assert(_block_count == 0, "array must be empty");
size_t count = from->_block_count;
assert(_block_count.load_relaxed() == 0, "array must be empty");
size_t count = from->_block_count.load_relaxed();
assert(count <= _size, "precondition");
Block* const* from_ptr = from->block_ptr(0);
Block** to_ptr = block_ptr(0);
@ -198,7 +198,7 @@ void OopStorage::ActiveArray::copy_from(const ActiveArray* from) {
assert(block->active_index() == i, "invariant");
*to_ptr++ = block;
}
_block_count = count;
_block_count.store_relaxed(count);
}
// Blocks start with an array of BitsPerWord oop entries. That array
@ -230,14 +230,17 @@ OopStorage::Block::Block(const OopStorage* owner, void* memory) :
assert(is_aligned(this, block_alignment), "misaligned block");
}
#ifdef ASSERT
OopStorage::Block::~Block() {
assert(_release_refcount == 0, "deleting block while releasing");
assert(_deferred_updates_next == nullptr, "deleting block with deferred update");
assert(_release_refcount.load_relaxed() == 0, "deleting block while releasing");
assert(_deferred_updates_next.load_relaxed() == nullptr, "deleting block with deferred update");
// Clear fields used by block_for_ptr and entry validation, which
// might help catch bugs. Volatile to prevent dead-store elimination.
const_cast<uintx volatile&>(_allocated_bitmask) = 0;
// might help catch bugs.
_allocated_bitmask.store_relaxed(0);
// Volatile to prevent dead-store elimination.
const_cast<intptr_t volatile&>(_owner_address) = 0;
}
#endif // ASSERT
size_t OopStorage::Block::allocation_size() {
// _data must be first member, so aligning Block aligns _data.
@ -272,16 +275,16 @@ uintx OopStorage::Block::bitmask_for_entry(const oop* ptr) const {
bool OopStorage::Block::is_safe_to_delete() const {
assert(is_empty(), "precondition");
OrderAccess::loadload();
return (AtomicAccess::load_acquire(&_release_refcount) == 0) &&
(AtomicAccess::load_acquire(&_deferred_updates_next) == nullptr);
return ((_release_refcount.load_acquire() == 0) &&
(_deferred_updates_next.load_acquire() == nullptr));
}
OopStorage::Block* OopStorage::Block::deferred_updates_next() const {
return _deferred_updates_next;
return _deferred_updates_next.load_relaxed();
}
void OopStorage::Block::set_deferred_updates_next(Block* block) {
_deferred_updates_next = block;
_deferred_updates_next.store_relaxed(block);
}
bool OopStorage::Block::contains(const oop* ptr) const {
@ -321,9 +324,8 @@ void OopStorage::Block::atomic_add_allocated(uintx add) {
// we can use an atomic add to implement the operation. The assert post
// facto verifies the precondition held; if there were any set bits in
// common, then after the add at least one of them will be zero.
uintx sum = AtomicAccess::add(&_allocated_bitmask, add);
assert((sum & add) == add, "some already present: %zu:%zu",
sum, add);
uintx sum = _allocated_bitmask.add_then_fetch(add);
assert((sum & add) == add, "some already present: %zu:%zu", sum, add);
}
oop* OopStorage::Block::allocate() {
@ -452,7 +454,7 @@ oop* OopStorage::allocate() {
oop* result = block->allocate();
assert(result != nullptr, "allocation failed");
assert(!block->is_empty(), "postcondition");
AtomicAccess::inc(&_allocation_count); // release updates outside lock.
_allocation_count.add_then_fetch(1u); // release updates outside lock.
if (block->is_full()) {
// Transitioning from not full to full.
// Remove full blocks from consideration by future allocates.
@ -490,7 +492,7 @@ size_t OopStorage::allocate(oop** ptrs, size_t size) {
assert(!is_empty_bitmask(taken), "invariant");
} // Drop lock, now that we've taken all available entries from block.
size_t num_taken = population_count(taken);
AtomicAccess::add(&_allocation_count, num_taken);
_allocation_count.add_then_fetch(num_taken);
// Fill ptrs from those taken entries.
size_t limit = MIN2(num_taken, size);
for (size_t i = 0; i < limit; ++i) {
@ -506,7 +508,7 @@ size_t OopStorage::allocate(oop** ptrs, size_t size) {
assert(size == limit, "invariant");
assert(num_taken == (limit + population_count(taken)), "invariant");
block->release_entries(taken, this);
AtomicAccess::sub(&_allocation_count, num_taken - limit);
_allocation_count.sub_then_fetch(num_taken - limit);
}
log_trace(oopstorage, ref)("%s: bulk allocate %zu, returned %zu",
name(), limit, num_taken - limit);
@ -527,9 +529,9 @@ bool OopStorage::try_add_block() {
if (block == nullptr) return false;
// Add new block to the _active_array, growing if needed.
if (!_active_array->push(block)) {
if (!_active_array.load_relaxed()->push(block)) {
if (expand_active_array()) {
guarantee(_active_array->push(block), "push failed after expansion");
guarantee(_active_array.load_relaxed()->push(block), "push failed after expansion");
} else {
log_debug(oopstorage, blocks)("%s: failed active array expand", name());
Block::delete_block(*block);
@ -576,7 +578,7 @@ OopStorage::Block* OopStorage::block_for_allocation() {
// indicate allocation failure.
bool OopStorage::expand_active_array() {
assert_lock_strong(_allocation_mutex);
ActiveArray* old_array = _active_array;
ActiveArray* old_array = _active_array.load_relaxed();
size_t new_size = 2 * old_array->size();
log_debug(oopstorage, blocks)("%s: expand active array %zu",
name(), new_size);
@ -599,7 +601,7 @@ void OopStorage::replace_active_array(ActiveArray* new_array) {
// Update new_array refcount to account for the new reference.
new_array->increment_refcount();
// Install new_array, ensuring its initialization is complete first.
AtomicAccess::release_store(&_active_array, new_array);
_active_array.release_store(new_array);
// Wait for any readers that could read the old array from _active_array.
// Can't use GlobalCounter here, because this is called from allocate(),
// which may be called in the scope of a GlobalCounter critical section
@ -617,7 +619,7 @@ void OopStorage::replace_active_array(ActiveArray* new_array) {
// using it.
OopStorage::ActiveArray* OopStorage::obtain_active_array() const {
SingleWriterSynchronizer::CriticalSection cs(&_protect_active);
ActiveArray* result = AtomicAccess::load_acquire(&_active_array);
ActiveArray* result = _active_array.load_acquire();
result->increment_refcount();
return result;
}
@ -625,7 +627,7 @@ OopStorage::ActiveArray* OopStorage::obtain_active_array() const {
// Decrement refcount of array and destroy if refcount is zero.
void OopStorage::relinquish_block_array(ActiveArray* array) const {
if (array->decrement_refcount()) {
assert(array != _active_array, "invariant");
assert(array != _active_array.load_relaxed(), "invariant");
ActiveArray::destroy(array);
}
}
@ -672,14 +674,14 @@ static void log_release_transitions(uintx releasing,
void OopStorage::Block::release_entries(uintx releasing, OopStorage* owner) {
assert(releasing != 0, "preconditon");
// Prevent empty block deletion when transitioning to empty.
AtomicAccess::inc(&_release_refcount);
_release_refcount.add_then_fetch(1u);
// Atomically update allocated bitmask.
uintx old_allocated = _allocated_bitmask;
uintx old_allocated = _allocated_bitmask.load_relaxed();
while (true) {
assert((releasing & ~old_allocated) == 0, "releasing unallocated entries");
uintx new_value = old_allocated ^ releasing;
uintx fetched = AtomicAccess::cmpxchg(&_allocated_bitmask, old_allocated, new_value);
uintx fetched = _allocated_bitmask.compare_exchange(old_allocated, new_value);
if (fetched == old_allocated) break; // Successful update.
old_allocated = fetched; // Retry with updated bitmask.
}
@ -698,12 +700,12 @@ void OopStorage::Block::release_entries(uintx releasing, OopStorage* owner) {
// then someone else has made such a claim and the deferred update has not
// yet been processed and will include our change, so we don't need to do
// anything further.
if (AtomicAccess::replace_if_null(&_deferred_updates_next, this)) {
if (_deferred_updates_next.compare_exchange(nullptr, this) == nullptr) {
// Successfully claimed. Push, with self-loop for end-of-list.
Block* head = owner->_deferred_updates;
Block* head = owner->_deferred_updates.load_relaxed();
while (true) {
_deferred_updates_next = (head == nullptr) ? this : head;
Block* fetched = AtomicAccess::cmpxchg(&owner->_deferred_updates, head, this);
_deferred_updates_next.store_relaxed((head == nullptr) ? this : head);
Block* fetched = owner->_deferred_updates.compare_exchange(head, this);
if (fetched == head) break; // Successful update.
head = fetched; // Retry with updated head.
}
@ -720,7 +722,7 @@ void OopStorage::Block::release_entries(uintx releasing, OopStorage* owner) {
}
}
// Release hold on empty block deletion.
AtomicAccess::dec(&_release_refcount);
_release_refcount.sub_then_fetch(1u);
}
// Process one available deferred update. Returns true if one was processed.
@ -729,13 +731,13 @@ bool OopStorage::reduce_deferred_updates() {
// Atomically pop a block off the list, if any available.
// No ABA issue because this is only called by one thread at a time.
// The atomicity is wrto pushes by release().
Block* block = AtomicAccess::load_acquire(&_deferred_updates);
Block* block = _deferred_updates.load_acquire();
while (true) {
if (block == nullptr) return false;
// Try atomic pop of block from list.
Block* tail = block->deferred_updates_next();
if (block == tail) tail = nullptr; // Handle self-loop end marker.
Block* fetched = AtomicAccess::cmpxchg(&_deferred_updates, block, tail);
Block* fetched = _deferred_updates.compare_exchange(block, tail);
if (fetched == block) break; // Update successful.
block = fetched; // Retry with updated block.
}
@ -780,7 +782,7 @@ void OopStorage::release(const oop* ptr) {
assert(block != nullptr, "%s: invalid release " PTR_FORMAT, name(), p2i(ptr));
log_trace(oopstorage, ref)("%s: releasing " PTR_FORMAT, name(), p2i(ptr));
block->release_entries(block->bitmask_for_entry(ptr), this);
AtomicAccess::dec(&_allocation_count);
_allocation_count.sub_then_fetch(1u);
}
void OopStorage::release(const oop* const* ptrs, size_t size) {
@ -806,7 +808,7 @@ void OopStorage::release(const oop* const* ptrs, size_t size) {
}
// Release the contiguous entries that are in block.
block->release_entries(releasing, this);
AtomicAccess::sub(&_allocation_count, count);
_allocation_count.sub_then_fetch(count);
}
}
@ -837,7 +839,7 @@ OopStorage::OopStorage(const char* name, MemTag mem_tag) :
_mem_tag(mem_tag),
_needs_cleanup(false)
{
_active_array->increment_refcount();
_active_array.load_relaxed()->increment_refcount();
assert(_active_mutex->rank() < _allocation_mutex->rank(),
"%s: active_mutex must have lower rank than allocation_mutex", _name);
assert(Service_lock->rank() < _active_mutex->rank(),
@ -852,20 +854,21 @@ void OopStorage::delete_empty_block(const Block& block) {
OopStorage::~OopStorage() {
Block* block;
while ((block = _deferred_updates) != nullptr) {
_deferred_updates = block->deferred_updates_next();
while ((block = _deferred_updates.load_relaxed()) != nullptr) {
_deferred_updates.store_relaxed(block->deferred_updates_next());
block->set_deferred_updates_next(nullptr);
}
while ((block = _allocation_list.head()) != nullptr) {
_allocation_list.unlink(*block);
}
bool unreferenced = _active_array->decrement_refcount();
ActiveArray* array = _active_array.load_relaxed();
bool unreferenced = array->decrement_refcount();
assert(unreferenced, "deleting storage while _active_array is referenced");
for (size_t i = _active_array->block_count(); 0 < i; ) {
block = _active_array->at(--i);
for (size_t i = array->block_count(); 0 < i; ) {
block = array->at(--i);
Block::delete_block(*block);
}
ActiveArray::destroy(_active_array);
ActiveArray::destroy(array);
os::free(const_cast<char*>(_name));
}
@ -894,7 +897,7 @@ bool OopStorage::should_report_num_dead() const {
// face of frequent explicit ServiceThread wakeups, hence the defer period.
// Global cleanup request state.
static volatile bool needs_cleanup_requested = false;
static Atomic<bool> needs_cleanup_requested{false};
// Time after which a cleanup is permitted.
static jlong cleanup_permit_time = 0;
@ -906,12 +909,11 @@ const jlong cleanup_defer_period = 500 * NANOSECS_PER_MILLISEC;
bool OopStorage::has_cleanup_work_and_reset() {
assert_lock_strong(Service_lock);
if (AtomicAccess::load_acquire(&needs_cleanup_requested) &&
os::javaTimeNanos() > cleanup_permit_time) {
cleanup_permit_time =
os::javaTimeNanos() + cleanup_defer_period;
if (needs_cleanup_requested.load_acquire() &&
(os::javaTimeNanos() > cleanup_permit_time)) {
cleanup_permit_time = os::javaTimeNanos() + cleanup_defer_period;
// Set the request flag false and return its old value.
AtomicAccess::release_store(&needs_cleanup_requested, false);
needs_cleanup_requested.release_store(false);
return true;
} else {
return false;
@ -923,22 +925,22 @@ bool OopStorage::has_cleanup_work_and_reset() {
void OopStorage::record_needs_cleanup() {
// Set local flag first, else ServiceThread could wake up and miss
// the request.
AtomicAccess::release_store(&_needs_cleanup, true);
AtomicAccess::release_store_fence(&needs_cleanup_requested, true);
_needs_cleanup.release_store(true);
needs_cleanup_requested.release_store_fence(true);
}
bool OopStorage::delete_empty_blocks() {
// ServiceThread might have oopstorage work, but not for this object.
// But check for deferred updates, which might provide cleanup work.
if (!AtomicAccess::load_acquire(&_needs_cleanup) &&
(AtomicAccess::load_acquire(&_deferred_updates) == nullptr)) {
if (!_needs_cleanup.load_acquire() &&
(_deferred_updates.load_acquire() == nullptr)) {
return false;
}
MutexLocker ml(_allocation_mutex, Mutex::_no_safepoint_check_flag);
// Clear the request before processing.
AtomicAccess::release_store_fence(&_needs_cleanup, false);
_needs_cleanup.release_store_fence(false);
// Other threads could be adding to the empty block count or the
// deferred update list while we're working. Set an upper bound on
@ -977,7 +979,7 @@ bool OopStorage::delete_empty_blocks() {
// but don't re-notify, to avoid useless spinning of the
// ServiceThread. Instead, iteration completion notifies.
if (_concurrent_iteration_count > 0) return true;
_active_array->remove(block);
_active_array.load_relaxed()->remove(block);
}
// Remove block from _allocation_list and delete it.
_allocation_list.unlink(*block);
@ -1001,8 +1003,9 @@ OopStorage::EntryStatus OopStorage::allocation_status(const oop* ptr) const {
MutexLocker ml(_allocation_mutex, Mutex::_no_safepoint_check_flag);
// Block could be a false positive, so get index carefully.
size_t index = Block::active_index_safe(block);
if ((index < _active_array->block_count()) &&
(block == _active_array->at(index)) &&
ActiveArray* array = _active_array.load_relaxed();
if ((index < array->block_count()) &&
(block == array->at(index)) &&
block->contains(ptr)) {
if ((block->allocated_bitmask() & block->bitmask_for_entry(ptr)) != 0) {
return ALLOCATED_ENTRY;
@ -1015,7 +1018,7 @@ OopStorage::EntryStatus OopStorage::allocation_status(const oop* ptr) const {
}
size_t OopStorage::allocation_count() const {
return _allocation_count;
return _allocation_count.load_relaxed();
}
size_t OopStorage::block_count() const {
@ -1084,7 +1087,7 @@ void OopStorage::BasicParState::update_concurrent_iteration_count(int value) {
bool OopStorage::BasicParState::claim_next_segment(IterationData* data) {
data->_processed += data->_segment_end - data->_segment_start;
size_t start = AtomicAccess::load_acquire(&_next_block);
size_t start = _next_block.load_acquire();
if (start >= _block_count) {
return finish_iteration(data); // No more blocks available.
}
@ -1097,11 +1100,11 @@ bool OopStorage::BasicParState::claim_next_segment(IterationData* data) {
size_t max_step = 10;
size_t remaining = _block_count - start;
size_t step = MIN2(max_step, 1 + (remaining / _estimated_thread_count));
// AtomicAccess::add with possible overshoot. This can perform better
// Atomic add with possible overshoot. This can perform better
// than a CAS loop on some platforms when there is contention.
// We can cope with the uncertainty by recomputing start/end from
// the result of the add, and dealing with potential overshoot.
size_t end = AtomicAccess::add(&_next_block, step);
size_t end = _next_block.add_then_fetch(step);
// _next_block may have changed, so recompute start from result of add.
start = end - step;
// _next_block may have changed so much that end has overshot.
@ -1128,15 +1131,15 @@ bool OopStorage::BasicParState::finish_iteration(const IterationData* data) cons
}
size_t OopStorage::BasicParState::num_dead() const {
return AtomicAccess::load(&_num_dead);
return _num_dead.load_relaxed();
}
void OopStorage::BasicParState::increment_num_dead(size_t num_dead) {
AtomicAccess::add(&_num_dead, num_dead);
_num_dead.add_then_fetch(num_dead);
}
void OopStorage::BasicParState::report_num_dead() const {
_storage->report_num_dead(AtomicAccess::load(&_num_dead));
_storage->report_num_dead(_num_dead.load_relaxed());
}
const char* OopStorage::name() const { return _name; }
@ -1164,8 +1167,8 @@ bool OopStorage::Block::print_containing(const oop* addr, outputStream* st) {
#ifndef PRODUCT
void OopStorage::print_on(outputStream* st) const {
size_t allocations = _allocation_count;
size_t blocks = _active_array->block_count();
size_t allocations = _allocation_count.load_relaxed();
size_t blocks = _active_array.load_relaxed()->block_count();
double data_size = section_size * section_count;
double alloc_percentage = percent_of((double)allocations, blocks * data_size);

View File

@ -27,6 +27,7 @@
#include "memory/allocation.hpp"
#include "oops/oop.hpp"
#include "runtime/atomic.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
#include "utilities/singleWriterSynchronizer.hpp"
@ -258,15 +259,15 @@ private:
private:
const char* _name;
ActiveArray* _active_array;
Atomic<ActiveArray*> _active_array;
AllocationList _allocation_list;
Block* volatile _deferred_updates;
Atomic<Block*> _deferred_updates;
Mutex* _allocation_mutex;
Mutex* _active_mutex;
NumDeadCallback _num_dead_callback;
// Volatile for racy unlocked accesses.
volatile size_t _allocation_count;
// Atomic for racy unlocked accesses.
Atomic<size_t> _allocation_count;
// Protection for _active_array.
mutable SingleWriterSynchronizer _protect_active;
@ -278,7 +279,7 @@ private:
MemTag _mem_tag;
// Flag indicating this storage object is a candidate for empty block deletion.
volatile bool _needs_cleanup;
Atomic<bool> _needs_cleanup;
// Clients construct via "create" factory function.
OopStorage(const char* name, MemTag mem_tag);

View File

@ -30,6 +30,7 @@
#include "cppstdlib/type_traits.hpp"
#include "memory/allocation.hpp"
#include "oops/oop.hpp"
#include "runtime/atomic.hpp"
#include "runtime/safepoint.hpp"
#include "utilities/align.hpp"
#include "utilities/count_trailing_zeros.hpp"
@ -42,8 +43,8 @@ class OopStorage::ActiveArray {
friend class OopStorage::TestAccess;
size_t _size;
volatile size_t _block_count;
mutable volatile int _refcount;
Atomic<size_t> _block_count;
mutable Atomic<int> _refcount;
// Block* _blocks[1]; // Pseudo flexible array member.
ActiveArray(size_t size);
@ -104,7 +105,7 @@ inline OopStorage::Block** OopStorage::ActiveArray::block_ptr(size_t index) {
}
inline OopStorage::Block* OopStorage::ActiveArray::at(size_t index) const {
assert(index < _block_count, "precondition");
assert(index < _block_count.load_relaxed(), "precondition");
return *block_ptr(index);
}
@ -135,16 +136,16 @@ class OopStorage::Block /* No base class, to avoid messing up alignment. */ {
oop _data[BitsPerWord];
static const unsigned _data_pos = 0; // Position of _data.
volatile uintx _allocated_bitmask; // One bit per _data element.
Atomic<uintx> _allocated_bitmask; // One bit per _data element.
intptr_t _owner_address;
void* _memory; // Unaligned storage containing block.
size_t _active_index;
AllocationListEntry _allocation_list_entry;
Block* volatile _deferred_updates_next;
volatile uintx _release_refcount;
Atomic<Block*> _deferred_updates_next;
Atomic<uintx> _release_refcount;
Block(const OopStorage* owner, void* memory);
~Block();
~Block() NOT_DEBUG(= default);
void check_index(unsigned index) const;
unsigned get_index(const oop* ptr) const;
@ -322,7 +323,7 @@ inline const oop* OopStorage::Block::get_pointer(unsigned index) const {
}
inline uintx OopStorage::Block::allocated_bitmask() const {
return _allocated_bitmask;
return _allocated_bitmask.load_relaxed();
}
inline uintx OopStorage::Block::bitmask_for_index(unsigned index) const {
@ -366,7 +367,7 @@ inline bool OopStorage::iterate_impl(F f, Storage* storage) {
// Propagate const/non-const iteration to the block layer, by using
// const or non-const blocks as corresponding to Storage.
using BlockPtr = std::conditional_t<std::is_const<Storage>::value, const Block*, Block*>;
ActiveArray* blocks = storage->_active_array;
ActiveArray* blocks = storage->_active_array.load_relaxed();
size_t limit = blocks->block_count();
for (size_t i = 0; i < limit; ++i) {
BlockPtr block = blocks->at(i);

View File

@ -27,6 +27,7 @@
#include "cppstdlib/type_traits.hpp"
#include "gc/shared/oopStorage.hpp"
#include "runtime/atomic.hpp"
#include "utilities/globalDefinitions.hpp"
//////////////////////////////////////////////////////////////////////////////
@ -131,10 +132,10 @@ class OopStorage::BasicParState {
const OopStorage* _storage;
ActiveArray* _active_array;
size_t _block_count;
volatile size_t _next_block;
Atomic<size_t> _next_block;
uint _estimated_thread_count;
bool _concurrent;
volatile size_t _num_dead;
Atomic<size_t> _num_dead;
NONCOPYABLE(BasicParState);

View File

@ -22,6 +22,7 @@
*
*/
#include "cppstdlib/new.hpp"
#include "gc/shared/partialArrayState.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/arena.hpp"
@ -33,8 +34,6 @@
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
#include <new>
PartialArrayState::PartialArrayState(oop src, oop dst,
size_t index, size_t length,
size_t initial_refcount)

View File

@ -198,10 +198,8 @@ void StringDedup::Processor::run(JavaThread* thread) {
void StringDedup::Processor::log_statistics() {
_total_stat.add(&_cur_stat);
Stat::log_summary(&_cur_stat, &_total_stat);
if (log_is_enabled(Debug, stringdedup)) {
_cur_stat.log_statistics(false);
_total_stat.log_statistics(true);
Table::log_statistics();
}
_cur_stat.emit_statistics(false /* total */);
_total_stat.emit_statistics(true /* total */);
Table::log_statistics();
_cur_stat = Stat{};
}

View File

@ -23,6 +23,7 @@
*/
#include "gc/shared/stringdedup/stringDedupStat.hpp"
#include "jfr/jfrEvents.hpp"
#include "logging/log.hpp"
#include "utilities/globalDefinitions.hpp"
@ -91,13 +92,6 @@ static double strdedup_elapsed_param_ms(Tickspan t) {
}
void StringDedup::Stat::log_summary(const Stat* last_stat, const Stat* total_stat) {
double total_deduped_bytes_percent = 0.0;
if (total_stat->_new_bytes > 0) {
// Avoid division by zero
total_deduped_bytes_percent = percent_of(total_stat->_deduped_bytes, total_stat->_new_bytes);
}
log_info(stringdedup)(
"Concurrent String Deduplication "
"%zu/" STRDEDUP_BYTES_FORMAT_NS " (new), "
@ -106,7 +100,7 @@ void StringDedup::Stat::log_summary(const Stat* last_stat, const Stat* total_sta
STRDEDUP_ELAPSED_FORMAT_MS " of " STRDEDUP_ELAPSED_FORMAT_MS,
last_stat->_new, STRDEDUP_BYTES_PARAM(last_stat->_new_bytes),
last_stat->_deduped, STRDEDUP_BYTES_PARAM(last_stat->_deduped_bytes),
total_deduped_bytes_percent,
percent_of(total_stat->_deduped_bytes, total_stat->_new_bytes),
strdedup_elapsed_param_ms(last_stat->_process_elapsed),
strdedup_elapsed_param_ms(last_stat->_active_elapsed));
}
@ -208,7 +202,7 @@ void StringDedup::Stat::log_times(const char* prefix) const {
}
}
void StringDedup::Stat::log_statistics(bool total) const {
void StringDedup::Stat::log_statistics() const {
double known_percent = percent_of(_known, _inspected);
double known_shared_percent = percent_of(_known_shared, _inspected);
double new_percent = percent_of(_new, _inspected);
@ -216,7 +210,6 @@ void StringDedup::Stat::log_statistics(bool total) const {
double deduped_bytes_percent = percent_of(_deduped_bytes, _new_bytes);
double replaced_percent = percent_of(_replaced, _new);
double deleted_percent = percent_of(_deleted, _new);
log_times(total ? "Total" : "Last");
log_debug(stringdedup)(" Inspected: %12zu", _inspected);
log_debug(stringdedup)(" Known: %12zu(%5.1f%%)", _known, known_percent);
log_debug(stringdedup)(" Shared: %12zu(%5.1f%%)", _known_shared, known_shared_percent);
@ -229,3 +222,40 @@ void StringDedup::Stat::log_statistics(bool total) const {
log_debug(stringdedup)(" Skipped: %zu (dead), %zu (incomplete), %zu (shared)",
_skipped_dead, _skipped_incomplete, _skipped_shared);
}
void StringDedup::Stat::emit_statistics(bool total) const {
if (log_is_enabled(Debug, stringdedup)) {
log_times(total ? "Total" : "Last");
log_statistics();
}
if (total) {
// Send only JFR events about the last stats
return;
}
EventStringDeduplication e;
if (e.should_commit()) {
e.set_starttime(_active_start);
Ticks active_end = _active_start;
active_end += _active_elapsed;
e.set_endtime(active_end);
e.set_inspected(_inspected);
e.set_known(_known);
e.set_shared(_known_shared);
e.set_newStrings(_new);
e.set_newSize(_new_bytes);
e.set_replaced(_replaced);
e.set_deleted(_deleted);
e.set_deduplicated(_deduped);
e.set_deduplicatedSize(_deduped_bytes);
e.set_skippedDead(_skipped_dead);
e.set_skippedIncomplete(_skipped_incomplete);
e.set_skippedShared(_skipped_shared);
e.set_processing(_process_elapsed);
e.set_tableResize(_resize_table_elapsed);
e.set_tableCleanup(_cleanup_table_elapsed);
e.commit();
}
}

View File

@ -71,6 +71,7 @@ private:
void report_phase_end(const char* phase, Tickspan* elapsed);
void log_times(const char* prefix) const;
void log_statistics() const;
public:
Stat();
@ -148,7 +149,7 @@ public:
void report_active_end();
void add(const Stat* const stat);
void log_statistics(bool total) const;
void emit_statistics(bool total) const;
static void log_summary(const Stat* last_stat, const Stat* total_stat);
};

View File

@ -730,6 +730,10 @@ void StringDedup::Table::verify() {
}
void StringDedup::Table::log_statistics() {
if (!log_is_enabled(Debug, stringdedup)) {
return;
}
size_t dead_count;
int dead_state;
{

View File

@ -37,7 +37,6 @@
#include "utilities/copy.hpp"
size_t ThreadLocalAllocBuffer::_max_size = 0;
int ThreadLocalAllocBuffer::_reserve_for_allocation_prefetch = 0;
unsigned int ThreadLocalAllocBuffer::_target_refills = 0;
ThreadLocalAllocBuffer::ThreadLocalAllocBuffer() :
@ -225,30 +224,6 @@ void ThreadLocalAllocBuffer::startup_initialization() {
// abort during VM initialization.
_target_refills = MAX2(_target_refills, 2U);
#ifdef COMPILER2
// If the C2 compiler is present, extra space is needed at the end of
// TLABs, otherwise prefetching instructions generated by the C2
// compiler will fault (due to accessing memory outside of heap).
// The amount of space is the max of the number of lines to
// prefetch for array and for instance allocations. (Extra space must be
// reserved to accommodate both types of allocations.)
//
// Only SPARC-specific BIS instructions are known to fault. (Those
// instructions are generated if AllocatePrefetchStyle==3 and
// AllocatePrefetchInstr==1). To be on the safe side, however,
// extra space is reserved for all combinations of
// AllocatePrefetchStyle and AllocatePrefetchInstr.
//
// If the C2 compiler is not present, no space is reserved.
// +1 for rounding up to next cache line, +1 to be safe
if (CompilerConfig::is_c2_or_jvmci_compiler_enabled()) {
int lines = MAX2(AllocatePrefetchLines, AllocateInstancePrefetchLines) + 2;
_reserve_for_allocation_prefetch = (AllocatePrefetchDistance + AllocatePrefetchStepSize * lines) /
(int)HeapWordSize;
}
#endif
// During jvm startup, the main thread is initialized
// before the heap is initialized. So reinitialize it now.
guarantee(Thread::current()->is_Java_thread(), "tlab initialization thread not Java thread");
@ -454,8 +429,7 @@ void ThreadLocalAllocStats::publish() {
}
size_t ThreadLocalAllocBuffer::end_reserve() {
size_t reserve_size = CollectedHeap::lab_alignment_reserve();
return MAX2(reserve_size, (size_t)_reserve_for_allocation_prefetch);
return CollectedHeap::lab_alignment_reserve();
}
const HeapWord* ThreadLocalAllocBuffer::start_relaxed() const {

View File

@ -58,7 +58,6 @@ private:
size_t _allocated_before_last_gc; // total bytes allocated up until the last gc
static size_t _max_size; // maximum size of any TLAB
static int _reserve_for_allocation_prefetch; // Reserve at the end of the TLAB
static unsigned _target_refills; // expected number of refills between GCs
unsigned _number_of_refills;

View File

@ -106,9 +106,6 @@ private:
size_t _used[UIntNumPartitions];
size_t _available[UIntNumPartitions];
// Measured in bytes.
size_t _allocated_since_gc_start[UIntNumPartitions];
// Some notes:
// total_region_counts[p] is _capacity[p] / region_size_bytes
// retired_regions[p] is total_region_counts[p] - _region_counts[p]

View File

@ -378,24 +378,20 @@ HeapWord* ShenandoahCardCluster::first_object_start(const size_t card_index, con
// evacuation phase) of young collections. This is never called
// during global collections during marking or update refs..
// 4. Every allocation under TAMS updates the object start array.
#ifdef ASSERT
oop obj = cast_to_oop(p);
assert(oopDesc::is_oop(obj), "Should be an object");
#ifdef ASSERT
#define WALK_FORWARD_IN_BLOCK_START true
#else
#define WALK_FORWARD_IN_BLOCK_START false
#endif // ASSERT
while (WALK_FORWARD_IN_BLOCK_START && p + obj->size() < left) {
while (p + obj->size() < left) {
p += obj->size();
obj = cast_to_oop(p);
assert(oopDesc::is_oop(obj), "Should be an object");
assert(Klass::is_valid(obj->klass()), "Not a valid klass ptr");
// Check assumptions in previous block comment if this assert fires
guarantee(false, "Should never need forward walk in block start");
fatal("Should never need forward walk in block start");
}
#undef WALK_FORWARD_IN_BLOCK_START
assert(p <= left, "p should start at or before left end of card");
assert(p + obj->size() > left, "obj should end after left end of card");
#endif // ASSERT
return p;
}

View File

@ -27,10 +27,9 @@
#include "gc/z/zDeferredConstructed.hpp"
#include "cppstdlib/new.hpp"
#include "cppstdlib/type_traits.hpp"
#include <new>
template <typename T>
inline ZDeferredConstructed<T>::ZDeferredConstructed()
DEBUG_ONLY(: _initialized(false)) {

View File

@ -53,7 +53,8 @@ enum {
JMM_VERSION_2 = 0x20020000, // JDK 10
JMM_VERSION_3 = 0x20030000, // JDK 14
JMM_VERSION_4 = 0x20040000, // JDK 21
JMM_VERSION = JMM_VERSION_4
JMM_VERSION_5 = 0x20050000, // JDK 26
JMM_VERSION = JMM_VERSION_5
};
typedef struct {
@ -81,6 +82,7 @@ typedef enum {
JMM_GC_TIME_MS = 9, /* Total accumulated time spent in collection */
JMM_GC_COUNT = 10, /* Total number of collections */
JMM_JVM_UPTIME_MS = 11, /* The JVM uptime in milliseconds */
JMM_TOTAL_GC_CPU_TIME = 12, /* Total accumulated GC CPU time */
JMM_INTERNAL_ATTRIBUTE_INDEX = 100,
JMM_CLASS_LOADED_BYTES = 101, /* Number of bytes loaded instance classes */

View File

@ -421,7 +421,9 @@ JVM_END
JVM_ENTRY_NO_ENV(jlong, jfr_host_total_swap_memory(JNIEnv* env, jclass jvm))
#ifdef LINUX
// We want the host swap memory, not the container value.
return os::Linux::host_swap();
physical_memory_size_type host_swap = 0;
(void)os::Linux::host_swap(host_swap); // Discard return value and treat as no swap
return static_cast<jlong>(host_swap);
#else
physical_memory_size_type total_swap_space = 0;
// Return value ignored - defaulting to 0 on failure.

View File

@ -1283,7 +1283,25 @@
<Field type="ulong" name="regionsImmediate" label="Regions Immediate" />
<Field type="ulong" contentType="bytes" name="immediateBytes" label="Immediate Bytes" />
</Event>
<Event name="StringDeduplication" category="Java Virtual Machine, GC, Detailed" label="String Deduplication Statistics" stackTrace="false">
<Field type="ulong" name="inspected" label="Inspected" />
<Field type="ulong" name="known" label="Known" />
<Field type="ulong" name="shared" label="Shared" />
<Field type="ulong" name="newStrings" label="New Strings" />
<Field type="ulong" name="newSize" contentType="bytes" label="New Size" />
<Field type="ulong" name="replaced" label="Replaced" />
<Field type="ulong" name="deleted" label="Deleted" />
<Field type="ulong" name="deduplicated" label="Deduplicated" />
<Field type="ulong" name="deduplicatedSize" contentType="bytes" label="Deduplicated Size" />
<Field type="ulong" name="skippedDead" label="Skipped Dead" />
<Field type="ulong" name="skippedIncomplete" label="Skipped Incomplete" />
<Field type="ulong" name="skippedShared" label="Skipped Shared" />
<Field type="Tickspan" name="processing" label="Processing" />
<Field type="Tickspan" name="tableResize" label="Table Resize" />
<Field type="Tickspan" name="tableCleanup" label="Table Cleanup" />
</Event>
<Event name="Flush" category="Flight Recorder" label="Flush" thread="false" experimental="true">
<Field type="ulong" name="flushId" label="Flush Identifier" relation="FlushId" />
<Field type="ulong" name="elements" label="Elements Written" />

View File

@ -25,14 +25,13 @@
#ifndef SHARE_MEMORY_ALLOCATION_HPP
#define SHARE_MEMORY_ALLOCATION_HPP
#include "cppstdlib/new.hpp"
#include "memory/allStatic.hpp"
#include "nmt/memTag.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
#include <new>
class outputStream;
class Thread;
class JavaThread;

View File

@ -24,6 +24,7 @@
*/
#include "compiler/compilationMemoryStatistic.hpp"
#include "cppstdlib/new.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/arena.hpp"
#include "memory/resourceArea.hpp"

View File

@ -31,8 +31,6 @@
#include "utilities/globalDefinitions.hpp"
#include "utilities/powerOfTwo.hpp"
#include <new>
// The byte alignment to be used by Arena::Amalloc.
#define ARENA_AMALLOC_ALIGNMENT BytesPerLong
#define ARENA_ALIGN(x) (align_up((x), ARENA_AMALLOC_ALIGNMENT))

View File

@ -557,32 +557,32 @@ void Universe::genesis(TRAPS) {
void Universe::initialize_basic_type_mirrors(TRAPS) {
#if INCLUDE_CDS_JAVA_HEAP
if (CDSConfig::is_using_archive() &&
HeapShared::is_archived_heap_in_use() &&
_basic_type_mirrors[T_INT].resolve() != nullptr) {
// check that all basic type mirrors are mapped also
for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
if (!is_reference_type((BasicType)i)) {
oop m = _basic_type_mirrors[i].resolve();
assert(m != nullptr, "archived mirrors should not be null");
}
if (CDSConfig::is_using_archive() &&
HeapShared::is_archived_heap_in_use() &&
_basic_type_mirrors[T_INT].resolve() != nullptr) {
// check that all basic type mirrors are mapped also
for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
if (!is_reference_type((BasicType)i)) {
oop m = _basic_type_mirrors[i].resolve();
assert(m != nullptr, "archived mirrors should not be null");
}
} else
// _basic_type_mirrors[T_INT], etc, are null if not using an archived heap
}
} else
// _basic_type_mirrors[T_INT], etc, are null if not using an archived heap
#endif
{
for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
BasicType bt = (BasicType)i;
if (!is_reference_type(bt)) {
oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);
_basic_type_mirrors[i] = OopHandle(vm_global(), m);
}
CDS_JAVA_HEAP_ONLY(_archived_basic_type_mirror_indices[i] = -1);
{
for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
BasicType bt = (BasicType)i;
if (!is_reference_type(bt)) {
oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);
_basic_type_mirrors[i] = OopHandle(vm_global(), m);
}
CDS_JAVA_HEAP_ONLY(_archived_basic_type_mirror_indices[i] = -1);
}
if (CDSConfig::is_dumping_heap()) {
HeapShared::init_scratch_objects_for_basic_type_mirrors(CHECK);
}
}
if (CDSConfig::is_dumping_heap()) {
HeapShared::init_scratch_objects_for_basic_type_mirrors(CHECK);
}
}
void Universe::fixup_mirrors(TRAPS) {

View File

@ -45,7 +45,7 @@
#include "utilities/ostream.hpp"
#include "utilities/vmError.hpp"
MallocMemorySnapshot MallocMemorySummary::_snapshot;
DeferredStatic<MallocMemorySnapshot> MallocMemorySummary::_snapshot;
void MemoryCounter::update_peak(size_t size, size_t cnt) {
size_t peak_sz = peak_size();
@ -101,7 +101,7 @@ void MallocMemorySnapshot::make_adjustment() {
}
void MallocMemorySummary::initialize() {
// Uses placement new operator to initialize static area.
_snapshot.initialize();
MallocLimitHandler::initialize(MallocLimit);
}

View File

@ -30,6 +30,7 @@
#include "nmt/memTag.hpp"
#include "nmt/nmtCommon.hpp"
#include "runtime/atomicAccess.hpp"
#include "utilities/deferredStatic.hpp"
#include "utilities/nativeCallStack.hpp"
class outputStream;
@ -204,7 +205,7 @@ class MallocMemorySnapshot {
class MallocMemorySummary : AllStatic {
private:
// Reserve memory for placement of MallocMemorySnapshot object
static MallocMemorySnapshot _snapshot;
static DeferredStatic<MallocMemorySnapshot> _snapshot;
static bool _have_limits;
// Called when a total limit break was detected.
@ -251,7 +252,7 @@ class MallocMemorySummary : AllStatic {
}
static MallocMemorySnapshot* as_snapshot() {
return &_snapshot;
return _snapshot.get();
}
// MallocLimit: returns true if allocating s bytes on f would trigger

View File

@ -32,7 +32,6 @@
#include "memory/universe.hpp"
#include "nmt/memMapPrinter.hpp"
#include "nmt/memTag.hpp"
#include "nmt/memTagBitmap.hpp"
#include "nmt/memTracker.hpp"
#include "nmt/virtualMemoryTracker.hpp"
#include "runtime/nonJavaThread.hpp"
@ -40,6 +39,8 @@
#include "runtime/thread.hpp"
#include "runtime/threadSMR.hpp"
#include "runtime/vmThread.hpp"
#include "utilities/bitMap.hpp"
#include "utilities/bitMap.inline.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/ostream.hpp"
#include "utilities/permitForbiddenFunctions.hpp"
@ -128,8 +129,8 @@ public:
}
// Given a vma [from, to), find all regions that intersect with this vma and
// return their collective flags.
MemTagBitmap lookup(const void* from, const void* to) const {
// fill out their collective flags into bm.
void lookup(const void* from, const void* to, ResourceBitMap& bm) const {
assert(from <= to, "Sanity");
// We optimize for sequential lookups. Since this class is used when a list
// of OS mappings is scanned (VirtualQuery, /proc/pid/maps), and these lists
@ -138,16 +139,14 @@ public:
// the range is to the right of the given section, we need to re-start the search
_last = 0;
}
MemTagBitmap bm;
for(uintx i = _last; i < _count; i++) {
if (range_intersects(from, to, _ranges[i].from, _ranges[i].to)) {
bm.set_tag(_mem_tags[i]);
bm.set_bit((BitMap::idx_t)_mem_tags[i]);
} else if (to <= _ranges[i].from) {
_last = i;
break;
}
}
return bm;
}
bool do_allocation_site(const ReservedMemoryRegion* rgn) override {
@ -247,11 +246,13 @@ bool MappingPrintSession::print_nmt_info_for_region(const void* vma_from, const
// print NMT information, if available
if (MemTracker::enabled()) {
// Correlate vma region (from, to) with NMT region(s) we collected previously.
const MemTagBitmap flags = _nmt_info.lookup(vma_from, vma_to);
if (flags.has_any()) {
ResourceMark rm;
ResourceBitMap flags(mt_number_of_tags);
_nmt_info.lookup(vma_from, vma_to, flags);
if (!flags.is_empty()) {
for (int i = 0; i < mt_number_of_tags; i++) {
const MemTag mem_tag = (MemTag)i;
if (flags.has_tag(mem_tag)) {
if (flags.at((BitMap::idx_t)mem_tag)) {
if (num_printed > 0) {
_out->put(',');
}

View File

@ -1,56 +0,0 @@
/*
* Copyright (c) 2023, 2024, Red Hat, Inc. All rights reserved.
* Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_NMT_MEMTAGBITMAP_HPP
#define SHARE_NMT_MEMTAGBITMAP_HPP
#include "nmt/memTag.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
class MemTagBitmap {
uint32_t _v;
STATIC_ASSERT(sizeof(_v) * BitsPerByte >= mt_number_of_tags);
public:
MemTagBitmap(uint32_t v = 0) : _v(v) {}
MemTagBitmap(const MemTagBitmap& o) : _v(o._v) {}
uint32_t raw_value() const { return _v; }
void set_tag(MemTag mem_tag) {
const int bitno = (int)mem_tag;
_v |= nth_bit(bitno);
}
bool has_tag(MemTag mem_tag) const {
const int bitno = (int)mem_tag;
return _v & nth_bit(bitno);
}
bool has_any() const { return _v > 0; }
};
#endif // SHARE_NMT_MEMTAGBITMAP_HPP

View File

@ -2870,7 +2870,7 @@ void InstanceKlass::restore_unshareable_info(ClassLoaderData* loader_data, Handl
}
bool InstanceKlass::can_be_verified_at_dumptime() const {
if (AOTMetaspace::in_aot_cache(this)) {
if (CDSConfig::is_dumping_dynamic_archive() && AOTMetaspace::in_aot_cache(this)) {
// This is a class that was dumped into the base archive, so we know
// it was verified at dump time.
return true;

View File

@ -23,8 +23,17 @@
*/
#include "cds/archiveBuilder.hpp"
#include "cppstdlib/type_traits.hpp"
#include "oops/resolvedFieldEntry.hpp"
static_assert(std::is_trivially_copyable_v<ResolvedFieldEntry>);
// Detect inadvertently introduced trailing padding.
class ResolvedFieldEntryWithExtra : public ResolvedFieldEntry {
u1 _extra_field;
};
static_assert(sizeof(ResolvedFieldEntryWithExtra) > sizeof(ResolvedFieldEntry));
void ResolvedFieldEntry::print_on(outputStream* st) const {
st->print_cr("Field Entry:");
@ -45,9 +54,7 @@ void ResolvedFieldEntry::print_on(outputStream* st) const {
#if INCLUDE_CDS
void ResolvedFieldEntry::remove_unshareable_info() {
u2 saved_cpool_index = _cpool_index;
memset(this, 0, sizeof(*this));
_cpool_index = saved_cpool_index;
*this = ResolvedFieldEntry(_cpool_index);
}
void ResolvedFieldEntry::mark_and_relocate() {

View File

@ -43,6 +43,9 @@
// Field bytecodes start with a constant pool index as their operand, which is then rewritten to
// a "field index", which is an index into the array of ResolvedFieldEntry.
// The explicit paddings are necessary for generating deterministic CDS archives. They prevent
// the C++ compiler from potentially inserting random values in unused gaps.
//class InstanceKlass;
class ResolvedFieldEntry {
friend class VMStructs;
@ -54,17 +57,9 @@ class ResolvedFieldEntry {
u1 _tos_state; // TOS state
u1 _flags; // Flags: [0000|00|is_final|is_volatile]
u1 _get_code, _put_code; // Get and Put bytecodes of the field
void copy_from(const ResolvedFieldEntry& other) {
_field_holder = other._field_holder;
_field_offset = other._field_offset;
_field_index = other._field_index;
_cpool_index = other._cpool_index;
_tos_state = other._tos_state;
_flags = other._flags;
_get_code = other._get_code;
_put_code = other._put_code;
}
#ifdef _LP64
u4 _padding;
#endif
public:
ResolvedFieldEntry(u2 cpi) :
@ -75,48 +70,15 @@ public:
_tos_state(0),
_flags(0),
_get_code(0),
_put_code(0) {}
_put_code(0)
#ifdef _LP64
, _padding(0)
#endif
{}
ResolvedFieldEntry() :
ResolvedFieldEntry(0) {}
// Notes on copy constructor, copy assignment operator, and copy_from().
// These are necessary for generating deterministic CDS archives.
//
// We have some unused padding on 64-bit platforms (4 bytes at the tail end).
//
// When ResolvedFieldEntries in a ConstantPoolCache are allocated from the metaspace,
// their entire content (including the padding) is filled with zeros. They are
// then initialized with initialize_resolved_entries_array() in cpCache.cpp from a
// GrowableArray.
//
// The GrowableArray is initialized in rewriter.cpp, using ResolvedFieldEntries that
// are originally allocated from the C++ stack. Functions like GrowableArray::expand_to()
// will also allocate ResolvedFieldEntries from the stack. These may have random bits
// in the padding as the C++ compiler is allowed to leave the padding in uninitialized
// states.
//
// If we use the default copy constructor and/or default copy assignment operator,
// the random padding will be copied into the GrowableArray, from there
// to the ConstantPoolCache, and eventually to the CDS archive. As a result, the
// CDS archive will contain random bits, causing failures in
// test/hotspot/jtreg/runtime/cds/DeterministicDump.java (usually on Windows).
//
// By using copy_from(), we can prevent the random padding from being copied,
// ensuring that the ResolvedFieldEntries in a ConstantPoolCache (and thus the
// CDS archive) will have all zeros in the padding.
// Copy constructor
ResolvedFieldEntry(const ResolvedFieldEntry& other) {
copy_from(other);
}
// Copy assignment operator
ResolvedFieldEntry& operator=(const ResolvedFieldEntry& other) {
copy_from(other);
return *this;
}
// Bit shift to get flags
// Note: Only two flags exists at the moment but more could be added
enum {

View File

@ -23,9 +23,18 @@
*/
#include "cds/archiveBuilder.hpp"
#include "cppstdlib/type_traits.hpp"
#include "oops/method.hpp"
#include "oops/resolvedMethodEntry.hpp"
static_assert(std::is_trivially_copyable_v<ResolvedMethodEntry>);
// Detect inadvertently introduced trailing padding.
class ResolvedMethodEntryWithExtra : public ResolvedMethodEntry {
u1 _extra_field;
};
static_assert(sizeof(ResolvedMethodEntryWithExtra) > sizeof(ResolvedMethodEntry));
bool ResolvedMethodEntry::check_no_old_or_obsolete_entry() {
// return false if m refers to a non-deleted old or obsolete method
if (_method != nullptr) {
@ -39,14 +48,10 @@ bool ResolvedMethodEntry::check_no_old_or_obsolete_entry() {
void ResolvedMethodEntry::reset_entry() {
if (has_resolved_references_index()) {
u2 saved_resolved_references_index = _entry_specific._resolved_references_index;
u2 saved_cpool_index = _cpool_index;
memset(this, 0, sizeof(*this));
*this = ResolvedMethodEntry(_cpool_index);
set_resolved_references_index(saved_resolved_references_index);
_cpool_index = saved_cpool_index;
} else {
u2 saved_cpool_index = _cpool_index;
memset(this, 0, sizeof(*this));
_cpool_index = saved_cpool_index;
*this = ResolvedMethodEntry(_cpool_index);
}
}

View File

@ -61,6 +61,9 @@
// pool entry and thus the same resolved method entry.
// The is_vfinal flag indicates method pointer for a final method or an index.
// The explicit paddings are necessary for generating deterministic CDS archives. They prevent
// the C++ compiler from potentially inserting random values in unused gaps.
class InstanceKlass;
class ResolvedMethodEntry {
friend class VMStructs;
@ -70,6 +73,7 @@ class ResolvedMethodEntry {
InstanceKlass* _interface_klass; // for interface and static
u2 _resolved_references_index; // Index of resolved references array that holds the appendix oop for invokehandle
u2 _table_index; // vtable/itable index for virtual and interface calls
// The padding field is unused here, as the parent constructor zeroes the union.
} _entry_specific;
u2 _cpool_index; // Constant pool index
@ -80,51 +84,36 @@ class ResolvedMethodEntry {
#ifdef ASSERT
bool _has_interface_klass;
bool _has_table_index;
# ifdef _LP64
u2 _padding1;
u4 _padding2;
# else
u1 _padding1;
u1 _padding2;
# endif
#endif
// See comments in resolvedFieldEntry.hpp about copy_from and padding.
// We have unused padding on debug builds.
void copy_from(const ResolvedMethodEntry& other) {
_method = other._method;
_entry_specific = other._entry_specific;
_cpool_index = other._cpool_index;
_number_of_parameters = other._number_of_parameters;
_tos_state = other._tos_state;
_flags = other._flags;
_bytecode1 = other._bytecode1;
_bytecode2 = other._bytecode2;
#ifdef ASSERT
_has_interface_klass = other._has_interface_klass;
_has_table_index = other._has_table_index;
#endif
}
// Constructors
public:
ResolvedMethodEntry(u2 cpi) :
_method(nullptr),
_entry_specific{nullptr},
_cpool_index(cpi),
_number_of_parameters(0),
_tos_state(0),
_flags(0),
_bytecode1(0),
_bytecode2(0) {
_entry_specific._interface_klass = nullptr;
DEBUG_ONLY(_has_interface_klass = false;)
DEBUG_ONLY(_has_table_index = false;)
}
_bytecode2(0)
#ifdef ASSERT
, _has_interface_klass(false),
_has_table_index(false),
_padding1(0),
_padding2(0)
#endif
{}
ResolvedMethodEntry() :
ResolvedMethodEntry(0) {}
ResolvedMethodEntry(const ResolvedMethodEntry& other) {
copy_from(other);
}
ResolvedMethodEntry& operator=(const ResolvedMethodEntry& other) {
copy_from(other);
return *this;
}
// Bit shift to get flags
enum {

View File

@ -28,8 +28,6 @@
#include "gc/shared/gc_globals.hpp"
#include "opto/arraycopynode.hpp"
#include "opto/graphKit.hpp"
#include "runtime/sharedRuntime.hpp"
#include "utilities/macros.hpp"
#include "utilities/powerOfTwo.hpp"
const TypeFunc* ArrayCopyNode::_arraycopy_type_Type = nullptr;
@ -779,15 +777,17 @@ bool ArrayCopyNode::modifies(intptr_t offset_lo, intptr_t offset_hi, PhaseValues
return false;
}
// As an optimization, choose optimum vector size for copy length known at compile time.
int ArrayCopyNode::get_partial_inline_vector_lane_count(BasicType type, int const_len) {
int lane_count = ArrayOperationPartialInlineSize/type2aelembytes(type);
if (const_len > 0) {
int size_in_bytes = const_len * type2aelembytes(type);
if (size_in_bytes <= 16)
lane_count = 16/type2aelembytes(type);
else if (size_in_bytes > 16 && size_in_bytes <= 32)
lane_count = 32/type2aelembytes(type);
// As an optimization, choose the optimal vector size for bounded copy length
int ArrayCopyNode::get_partial_inline_vector_lane_count(BasicType type, jlong max_len) {
assert(max_len > 0, JLONG_FORMAT, max_len);
// We only care whether max_size_in_bytes is not larger than 32, we also want to avoid
// multiplication overflow, so clamp max_len to [0, 64]
int max_size_in_bytes = MIN2<jlong>(max_len, 64) * type2aelembytes(type);
if (ArrayOperationPartialInlineSize > 16 && max_size_in_bytes <= 16) {
return 16 / type2aelembytes(type);
} else if (ArrayOperationPartialInlineSize > 32 && max_size_in_bytes <= 32) {
return 32 / type2aelembytes(type);
} else {
return ArrayOperationPartialInlineSize / type2aelembytes(type);
}
return lane_count;
}

View File

@ -191,7 +191,7 @@ public:
static bool may_modify(const TypeOopPtr* t_oop, MemBarNode* mb, PhaseValues* phase, ArrayCopyNode*& ac);
static int get_partial_inline_vector_lane_count(BasicType type, int const_len);
static int get_partial_inline_vector_lane_count(BasicType type, jlong max_len);
bool modifies(intptr_t offset_lo, intptr_t offset_hi, PhaseValues* phase, bool must_modify) const;

View File

@ -428,7 +428,7 @@
"0=print nothing except PhasePrintLevel directives, " \
"6=all details printed. " \
"Level of detail of printouts can be set on a per-method level " \
"as well by using CompileCommand=PrintPhaseLevel.") \
"as well by using CompileCommand=PhasePrintLevel.") \
range(-1, 6) \
\
develop(bool, PrintIdealGraph, false, \

View File

@ -5233,7 +5233,7 @@ void Compile::end_method() {
#ifndef PRODUCT
bool Compile::should_print_phase(const int level) const {
return PrintPhaseLevel > 0 && directive()->PhasePrintLevelOption >= level &&
return PrintPhaseLevel >= 0 && directive()->PhasePrintLevelOption >= level &&
_method != nullptr; // Do not print phases for stubs.
}

View File

@ -97,10 +97,9 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
Bytecodes::Code bytecode = caller->java_code_at_bci(bci);
ciMethod* orig_callee = caller->get_method_at_bci(bci);
const bool is_virtual_or_interface = (bytecode == Bytecodes::_invokevirtual) ||
(bytecode == Bytecodes::_invokeinterface) ||
(orig_callee->intrinsic_id() == vmIntrinsics::_linkToVirtual) ||
(orig_callee->intrinsic_id() == vmIntrinsics::_linkToInterface);
const bool is_virtual = (bytecode == Bytecodes::_invokevirtual) || (orig_callee->intrinsic_id() == vmIntrinsics::_linkToVirtual);
const bool is_interface = (bytecode == Bytecodes::_invokeinterface) || (orig_callee->intrinsic_id() == vmIntrinsics::_linkToInterface);
const bool is_virtual_or_interface = is_virtual || is_interface;
const bool check_access = !orig_callee->is_method_handle_intrinsic(); // method handle intrinsics don't perform access checks
@ -339,17 +338,25 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
// number of implementors for decl_interface is 0 or 1. If
// it's 0 then no class implements decl_interface and there's
// no point in inlining.
if (call_does_dispatch && bytecode == Bytecodes::_invokeinterface) {
ciInstanceKlass* declared_interface =
caller->get_declared_method_holder_at_bci(bci)->as_instance_klass();
if (call_does_dispatch && is_interface) {
ciInstanceKlass* declared_interface = nullptr;
if (orig_callee->intrinsic_id() == vmIntrinsics::_linkToInterface) {
// MemberName doesn't keep information about resolved interface class (REFC) once
// resolution is over, but resolved method holder (DECC) can be used as a
// conservative approximation.
declared_interface = callee->holder();
} else {
assert(!orig_callee->is_method_handle_intrinsic(), "not allowed");
declared_interface = caller->get_declared_method_holder_at_bci(bci)->as_instance_klass();
}
assert(declared_interface->is_interface(), "required");
ciInstanceKlass* singleton = declared_interface->unique_implementor();
if (singleton != nullptr) {
assert(singleton != declared_interface, "not a unique implementor");
assert(check_access, "required");
ciMethod* cha_monomorphic_target =
callee->find_monomorphic_target(caller->holder(), declared_interface, singleton);
callee->find_monomorphic_target(caller->holder(), declared_interface, singleton, check_access);
if (cha_monomorphic_target != nullptr &&
cha_monomorphic_target->holder() != env()->Object_klass()) { // subtype check against Object is useless
@ -372,7 +379,7 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
}
}
}
} // call_does_dispatch && bytecode == Bytecodes::_invokeinterface
} // call_does_dispatch && is_interface
// Nothing claimed the intrinsic, we go with straight-forward inlining
// for already discovered intrinsic.

View File

@ -1914,8 +1914,7 @@ Node* PhaseMacroExpand::prefetch_allocation(Node* i_o, Node*& needgc_false,
transform_later(cache_adr);
cache_adr = new CastP2XNode(needgc_false, cache_adr);
transform_later(cache_adr);
// Address is aligned to execute prefetch to the beginning of cache line size
// (it is important when BIS instruction is used on SPARC as prefetch).
// Address is aligned to execute prefetch to the beginning of cache line size.
Node* mask = _igvn.MakeConX(~(intptr_t)(step_size-1));
cache_adr = new AndXNode(cache_adr, mask);
transform_later(cache_adr);

View File

@ -204,53 +204,46 @@ void PhaseMacroExpand::generate_limit_guard(Node** ctrl, Node* offset, Node* sub
void PhaseMacroExpand::generate_partial_inlining_block(Node** ctrl, MergeMemNode** mem, const TypePtr* adr_type,
RegionNode** exit_block, Node** result_memory, Node* length,
Node* src_start, Node* dst_start, BasicType type) {
const TypePtr *src_adr_type = _igvn.type(src_start)->isa_ptr();
Node* inline_block = nullptr;
Node* stub_block = nullptr;
int inline_limit = ArrayOperationPartialInlineSize / type2aelembytes(type);
int const_len = -1;
const TypeInt* lty = nullptr;
uint shift = exact_log2(type2aelembytes(type));
if (length->Opcode() == Op_ConvI2L) {
lty = _igvn.type(length->in(1))->isa_int();
} else {
lty = _igvn.type(length)->isa_int();
}
if (lty && lty->is_con()) {
const_len = lty->get_con() << shift;
const TypeLong* length_type = _igvn.type(length)->isa_long();
if (length_type == nullptr) {
assert(_igvn.type(length) == Type::TOP, "");
return;
}
// Return if copy length is greater than partial inline size limit or
// target does not supports masked load/stores.
int lane_count = ArrayCopyNode::get_partial_inline_vector_lane_count(type, const_len);
if ( const_len > ArrayOperationPartialInlineSize ||
!Matcher::match_rule_supported_vector(Op_LoadVectorMasked, lane_count, type) ||
const TypeLong* inline_range = TypeLong::make(0, inline_limit, Type::WidenMin);
if (length_type->join(inline_range) == Type::TOP) {
// The ranges do not intersect, the inline check will surely fail
return;
}
// Return if the target does not supports masked load/stores.
int lane_count = ArrayCopyNode::get_partial_inline_vector_lane_count(type, length_type->_hi);
if (!Matcher::match_rule_supported_vector(Op_LoadVectorMasked, lane_count, type) ||
!Matcher::match_rule_supported_vector(Op_StoreVectorMasked, lane_count, type) ||
!Matcher::match_rule_supported_vector(Op_VectorMaskGen, lane_count, type)) {
return;
}
int inline_limit = ArrayOperationPartialInlineSize / type2aelembytes(type);
Node* casted_length = new CastLLNode(*ctrl, length, TypeLong::make(0, inline_limit, Type::WidenMin));
transform_later(casted_length);
Node* copy_bytes = new LShiftXNode(length, intcon(shift));
transform_later(copy_bytes);
Node* cmp_le = new CmpULNode(copy_bytes, longcon(ArrayOperationPartialInlineSize));
Node* cmp_le = new CmpULNode(length, longcon(inline_limit));
transform_later(cmp_le);
Node* bol_le = new BoolNode(cmp_le, BoolTest::le);
transform_later(bol_le);
inline_block = generate_guard(ctrl, bol_le, nullptr, PROB_FAIR);
stub_block = *ctrl;
Node* inline_block = generate_guard(ctrl, bol_le, nullptr, PROB_FAIR);
Node* stub_block = *ctrl;
Node* casted_length = new CastLLNode(inline_block, length, inline_range, ConstraintCastNode::RegularDependency);
transform_later(casted_length);
Node* mask_gen = VectorMaskGenNode::make(casted_length, type);
transform_later(mask_gen);
unsigned vec_size = lane_count * type2aelembytes(type);
unsigned vec_size = lane_count * type2aelembytes(type);
if (C->max_vector_size() < vec_size) {
C->set_max_vector_size(vec_size);
}
const TypePtr* src_adr_type = _igvn.type(src_start)->isa_ptr();
const TypeVect * vt = TypeVect::make(type, lane_count);
Node* mm = (*mem)->memory_at(C->get_alias_index(src_adr_type));
Node* masked_load = new LoadVectorMaskedNode(inline_block, mm, src_start,

View File

@ -2012,11 +2012,7 @@ const Type* LoadNode::Value(PhaseGVN* phase) const {
assert(off != Type::OffsetTop, "case covered by TypePtr::empty");
Compile* C = phase->C;
// If we are loading from a freshly-allocated object, produce a zero,
// if the load is provably beyond the header of the object.
// (Also allow a variable load from a fresh array to produce zero.)
const TypeOopPtr* tinst = tp->isa_oopptr();
bool is_instance = (tinst != nullptr) && tinst->is_known_instance_field();
// If load can see a previous constant store, use that.
Node* value = can_see_stored_value(mem, phase);
if (value != nullptr && value->is_Con()) {
assert(value->bottom_type()->higher_equal(_type), "sanity");
@ -2227,13 +2223,16 @@ const Type* LoadNode::Value(PhaseGVN* phase) const {
}
}
bool is_vect = (_type->isa_vect() != nullptr);
if (is_instance && !is_vect) {
// If we have an instance type and our memory input is the
// programs's initial memory state, there is no matching store,
// so just return a zero of the appropriate type -
// except if it is vectorized - then we have no zero constant.
Node *mem = in(MemNode::Memory);
// If we are loading from a freshly-allocated object/array, produce a zero.
// Things to check:
// 1. Load is beyond the header: headers are not guaranteed to be zero
// 2. Load is not vectorized: vectors have no zero constant
// 3. Load has no matching store, i.e. the input is the initial memory state
const TypeOopPtr* tinst = tp->isa_oopptr();
bool is_not_header = (tinst != nullptr) && tinst->is_known_instance_field();
bool is_not_vect = (_type->isa_vect() == nullptr);
if (is_not_header && is_not_vect) {
Node* mem = in(MemNode::Memory);
if (mem->is_Parm() && mem->in(0)->is_Start()) {
assert(mem->as_Parm()->_con == TypeFunc::Memory, "must be memory Parm");
return Type::get_zero_type(_type->basic_type());

View File

@ -1209,9 +1209,12 @@ bool Node::has_special_unique_user() const {
if (this->is_Store()) {
// Condition for back-to-back stores folding.
return n->Opcode() == op && n->in(MemNode::Memory) == this;
} else if (this->is_Load() || this->is_DecodeN() || this->is_Phi()) {
} else if ((this->is_Load() || this->is_DecodeN() || this->is_Phi()) && n->Opcode() == Op_MemBarAcquire) {
// Condition for removing an unused LoadNode or DecodeNNode from the MemBarAcquire precedence input
return n->Opcode() == Op_MemBarAcquire;
return true;
} else if (this->is_Load() && n->is_Move()) {
// Condition for MoveX2Y (LoadX mem) => LoadY mem
return true;
} else if (op == Op_AddL) {
// Condition for convL2I(addL(x,y)) ==> addI(convL2I(x),convL2I(y))
return n->Opcode() == Op_ConvL2I && n->in(1) == this;

View File

@ -1252,7 +1252,6 @@ bool VTransformReductionVectorNode::optimize_move_non_strict_order_reductions_ou
// back to the phi. Check that all non strict order reductions only have a single
// use, except for the last (last_red), which only has phi as a use in the loop,
// and all other uses are outside the loop.
VTransformReductionVectorNode* first_red = this;
VTransformReductionVectorNode* last_red = phi->in_req(2)->isa_ReductionVector();
VTransformReductionVectorNode* current_red = last_red;
while (true) {
@ -1264,7 +1263,11 @@ bool VTransformReductionVectorNode::optimize_move_non_strict_order_reductions_ou
tty->print(" Cannot move out of loop, other reduction node does not match:");
print();
tty->print(" other: ");
current_red->print();
if (current_red != nullptr) {
current_red->print();
} else {
tty->print("nullptr");
}
)
return false; // not compatible
}

View File

@ -1867,6 +1867,32 @@ address jni_GetDoubleField_addr() {
return (address)jni_GetDoubleField;
}
static void log_debug_if_final_static_field(JavaThread* current, const char* func_name, InstanceKlass* ik, int offset) {
if (log_is_enabled(Debug, jni)) {
fieldDescriptor fd;
bool found = ik->find_field_from_offset(offset, true, &fd);
assert(found, "bad field offset");
assert(fd.is_static(), "static/instance mismatch");
if (fd.is_final() && !fd.is_mutable_static_final()) {
ResourceMark rm(current);
log_debug(jni)("%s mutated final static field %s.%s", func_name, ik->external_name(), fd.name()->as_C_string());
}
}
}
static void log_debug_if_final_instance_field(JavaThread* current, const char* func_name, InstanceKlass* ik, int offset) {
if (log_is_enabled(Debug, jni)) {
fieldDescriptor fd;
bool found = ik->find_field_from_offset(offset, false, &fd);
assert(found, "bad field offset");
assert(!fd.is_static(), "static/instance mismatch");
if (fd.is_final()) {
ResourceMark rm(current);
log_debug(jni)("%s mutated final instance field %s.%s", func_name, ik->external_name(), fd.name()->as_C_string());
}
}
}
JNI_ENTRY_NO_PRESERVE(void, jni_SetObjectField(JNIEnv *env, jobject obj, jfieldID fieldID, jobject value))
HOTSPOT_JNI_SETOBJECTFIELD_ENTRY(env, obj, (uintptr_t) fieldID, value);
oop o = JNIHandles::resolve_non_null(obj);
@ -1879,6 +1905,7 @@ JNI_ENTRY_NO_PRESERVE(void, jni_SetObjectField(JNIEnv *env, jobject obj, jfieldI
o = JvmtiExport::jni_SetField_probe(thread, obj, o, k, fieldID, false, JVM_SIGNATURE_CLASS, (jvalue *)&field_value);
}
HeapAccess<ON_UNKNOWN_OOP_REF>::oop_store_at(o, offset, JNIHandles::resolve(value));
log_debug_if_final_instance_field(thread, "SetObjectField", InstanceKlass::cast(k), offset);
HOTSPOT_JNI_SETOBJECTFIELD_RETURN();
JNI_END
@ -1901,6 +1928,7 @@ JNI_ENTRY_NO_PRESERVE(void, jni_Set##Result##Field(JNIEnv *env, jobject obj, jfi
o = JvmtiExport::jni_SetField_probe(thread, obj, o, k, fieldID, false, SigType, (jvalue *)&field_value); \
} \
o->Fieldname##_field_put(offset, value); \
log_debug_if_final_instance_field(thread, "Set<Type>Field", InstanceKlass::cast(k), offset); \
ReturnProbe; \
JNI_END
@ -2072,6 +2100,7 @@ JNI_ENTRY(void, jni_SetStaticObjectField(JNIEnv *env, jclass clazz, jfieldID fie
JvmtiExport::jni_SetField_probe(thread, nullptr, nullptr, id->holder(), fieldID, true, JVM_SIGNATURE_CLASS, (jvalue *)&field_value);
}
id->holder()->java_mirror()->obj_field_put(id->offset(), JNIHandles::resolve(value));
log_debug_if_final_static_field(THREAD, "SetStaticObjectField", id->holder(), id->offset());
HOTSPOT_JNI_SETSTATICOBJECTFIELD_RETURN();
JNI_END
@ -2093,6 +2122,7 @@ JNI_ENTRY(void, jni_SetStatic##Result##Field(JNIEnv *env, jclass clazz, jfieldID
JvmtiExport::jni_SetField_probe(thread, nullptr, nullptr, id->holder(), fieldID, true, SigType, (jvalue *)&field_value); \
} \
id->holder()->java_mirror()-> Fieldname##_field_put (id->offset(), value); \
log_debug_if_final_static_field(THREAD, "SetStatic<Type>Field", id->holder(), id->offset()); \
ReturnProbe;\
JNI_END

Some files were not shown because too many files have changed in this diff Show More