diff --git a/.gitignore b/.gitignore
index 852b692f99b..0743489f8ec 100644
--- a/.gitignore
+++ b/.gitignore
@@ -26,3 +26,8 @@ NashornProfile.txt
*.rej
*.orig
test/benchmarks/**/target
+/src/hotspot/CMakeLists.txt
+/src/hotspot/compile_commands.json
+/src/hotspot/cmake-build-debug/
+/src/hotspot/.cache/
+/src/hotspot/.idea/
diff --git a/doc/building.html b/doc/building.html
index 99eb3e0c473..19313ebf43a 100644
--- a/doc/building.html
+++ b/doc/building.html
@@ -668,7 +668,7 @@ update.
(Note that this version is often presented as "MSVC 14.28", and reported
by cl.exe as 19.28.) Older versions will not be accepted by
configure and will not work. The maximum accepted version
-of Visual Studio is 2022.
+of Visual Studio is 2026.
If you have multiple versions of Visual Studio installed,
configure will by default pick the latest. You can request
a specific version to be used by setting
diff --git a/doc/building.md b/doc/building.md
index 047255d1848..1fbd395a9d1 100644
--- a/doc/building.md
+++ b/doc/building.md
@@ -468,7 +468,7 @@ available for this update.
The minimum accepted version is Visual Studio 2019 version 16.8. (Note that
this version is often presented as "MSVC 14.28", and reported by cl.exe as
19.28.) Older versions will not be accepted by `configure` and will not work.
-The maximum accepted version of Visual Studio is 2022.
+The maximum accepted version of Visual Studio is 2026.
If you have multiple versions of Visual Studio installed, `configure` will by
default pick the latest. You can request a specific version to be used by
diff --git a/make/autoconf/toolchain_microsoft.m4 b/make/autoconf/toolchain_microsoft.m4
index 17ad2666b3a..f577cf1a2a1 100644
--- a/make/autoconf/toolchain_microsoft.m4
+++ b/make/autoconf/toolchain_microsoft.m4
@@ -25,7 +25,7 @@
################################################################################
# The order of these defines the priority by which we try to find them.
-VALID_VS_VERSIONS="2022 2019"
+VALID_VS_VERSIONS="2022 2019 2026"
VS_DESCRIPTION_2019="Microsoft Visual Studio 2019"
VS_VERSION_INTERNAL_2019=142
@@ -57,6 +57,21 @@ VS_SDK_PLATFORM_NAME_2022=
VS_SUPPORTED_2022=true
VS_TOOLSET_SUPPORTED_2022=true
+VS_DESCRIPTION_2026="Microsoft Visual Studio 2026"
+VS_VERSION_INTERNAL_2026=145
+VS_MSVCR_2026=vcruntime140.dll
+VS_VCRUNTIME_1_2026=vcruntime140_1.dll
+VS_MSVCP_2026=msvcp140.dll
+VS_ENVVAR_2026="VS180COMNTOOLS"
+VS_USE_UCRT_2026="true"
+VS_VS_INSTALLDIR_2026="Microsoft Visual Studio/18"
+VS_EDITIONS_2026="BuildTools Community Professional Enterprise"
+VS_SDK_INSTALLDIR_2026=
+VS_VS_PLATFORM_NAME_2026="v145"
+VS_SDK_PLATFORM_NAME_2026=
+VS_SUPPORTED_2026=true
+VS_TOOLSET_SUPPORTED_2026=true
+
################################################################################
AC_DEFUN([TOOLCHAIN_CHECK_POSSIBLE_VISUAL_STUDIO_ROOT],
diff --git a/make/hotspot/lib/CompileGtest.gmk b/make/hotspot/lib/CompileGtest.gmk
index d615e254f5a..60912992134 100644
--- a/make/hotspot/lib/CompileGtest.gmk
+++ b/make/hotspot/lib/CompileGtest.gmk
@@ -95,6 +95,7 @@ $(eval $(call SetupJdkLibrary, BUILD_GTEST_LIBJVM, \
EXTRA_OBJECT_FILES := $(BUILD_LIBJVM_ALL_OBJS), \
DEFAULT_CFLAGS := false, \
CFLAGS := $(JVM_CFLAGS) \
+ -DHOTSPOT_GTEST \
-I$(GTEST_FRAMEWORK_SRC)/googletest/include \
-I$(GTEST_FRAMEWORK_SRC)/googlemock/include \
$(addprefix -I, $(GTEST_TEST_SRC)), \
diff --git a/make/hotspot/lib/CompileJvm.gmk b/make/hotspot/lib/CompileJvm.gmk
index fd574b9e42d..a8b90c92e4d 100644
--- a/make/hotspot/lib/CompileJvm.gmk
+++ b/make/hotspot/lib/CompileJvm.gmk
@@ -337,6 +337,30 @@ TARGETS += $(BUILD_LIBJVM)
# for the associated class. If the class doesn't provide a more specific
# declaration (either directly or by inheriting from a class that provides
# one) then the global definition will be used, triggering this check.
+#
+
+# The HotSpot wrapper for declares as deprecated all the allocation and
+# deallocation functions that use the global allocator. But that blocking
+# isn't a bullet-proof. Some of these functions are implicitly available in
+# every translation unit, without the need to include . So even with that
+# wrapper we still need this link-time check. The implicitly declared
+# functions and their mangled names are - from C++17 6.7.4:
+#
+# void* operator new(size_t) // _Znwm
+# void* operator new(size_t, align_val_t) // _ZnwmSt11align_val_t
+#
+# void operator delete(void*) noexcept // _ZdlPv
+# void operator delete(void*, size_t) noexcept // _ZdlPvm
+# void operator delete(void*, align_val_t) noexcept // _ZdlPvSt11align_val_t
+# void operator delete(void*, size_t, align_val_t) noexcept // _ZdlPvmSt11align_val_t
+#
+# void* operator new[](size_t) // _Znam
+# void* operator new[](size_t, align_val_t) // _ZnamSt11align_val_t
+#
+# void operator delete[](void*) noexcept // _ZdaPv
+# void operator delete[](void*, size_t) noexcept // _ZdaPvm
+# void operator delete[](void*, align_val_t) noexcept // _ZdaPvSt11align_val_t
+# void operator delete[](void*, size_t, align_val_t) noexcept // _ZdaPvmSt11align_val_t
ifneq ($(GENERATE_COMPILE_COMMANDS_ONLY), true)
ifneq ($(filter $(TOOLCHAIN_TYPE), gcc clang), )
@@ -347,10 +371,18 @@ ifneq ($(GENERATE_COMPILE_COMMANDS_ONLY), true)
# so use mangled names when looking for symbols.
# Save the demangling for when something is actually found.
MANGLED_SYMS := \
- _ZdaPv \
- _ZdlPv \
- _Znam \
_Znwm \
+ _ZnwmSt11align_val_t \
+ _ZdlPv \
+ _ZdlPvm \
+ _ZdlPvSt11align_val_t \
+ _ZdlPvmSt11align_val_t \
+ _Znam \
+ _ZnamSt11align_val_t \
+ _ZdaPv \
+ _ZdaPvm \
+ _ZdaPvSt11align_val_t \
+ _ZdaPvmSt11align_val_t \
#
UNDEF_PATTERN := ' U '
diff --git a/make/jdk/src/classes/build/tools/taglet/SealedGraph.java b/make/jdk/src/classes/build/tools/taglet/SealedGraph.java
index 17867b99595..3e93826c180 100644
--- a/make/jdk/src/classes/build/tools/taglet/SealedGraph.java
+++ b/make/jdk/src/classes/build/tools/taglet/SealedGraph.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -219,13 +219,13 @@ public final class SealedGraph implements Taglet {
// This implies the module is always the same.
private String relativeLink(TypeElement node) {
var util = SealedGraph.this.docletEnvironment.getElementUtils();
- var rootPackage = util.getPackageOf(rootNode);
var nodePackage = util.getPackageOf(node);
- var backNavigator = rootPackage.getQualifiedName().toString().chars()
+ // Note: SVG files for nested types use the simple names of containing types as parent directories.
+ // We therefore need to convert all dots in the qualified name to "../" below.
+ var backNavigator = rootNode.getQualifiedName().toString().chars()
.filter(c -> c == '.')
.mapToObj(c -> "../")
- .collect(joining()) +
- "../";
+ .collect(joining());
var forwardNavigator = nodePackage.getQualifiedName().toString()
.replace(".", "/");
diff --git a/make/langtools/tools/propertiesparser/parser/MessageType.java b/make/langtools/tools/propertiesparser/parser/MessageType.java
index a4ea0ddc3c0..4b7064e3872 100644
--- a/make/langtools/tools/propertiesparser/parser/MessageType.java
+++ b/make/langtools/tools/propertiesparser/parser/MessageType.java
@@ -84,6 +84,7 @@ public interface MessageType {
FILE_OBJECT("file object", "JavaFileObject", "javax.tools"),
PATH("path", "Path", "java.nio.file"),
NAME("name", "Name", "com.sun.tools.javac.util"),
+ LONG("long", "long", null),
NUMBER("number", "int", null),
OPTION_NAME("option name", "Option", "com.sun.tools.javac.main"),
PROFILE("profile", "Profile", "com.sun.tools.javac.jvm"),
diff --git a/make/test/JtregNativeJdk.gmk b/make/test/JtregNativeJdk.gmk
index a204467a77b..0482011f561 100644
--- a/make/test/JtregNativeJdk.gmk
+++ b/make/test/JtregNativeJdk.gmk
@@ -80,6 +80,7 @@ else
BUILD_JDK_JTREG_LIBRARIES_LDFLAGS_libExplicitAttach := -pthread
BUILD_JDK_JTREG_LIBRARIES_LDFLAGS_libImplicitAttach := -pthread
+ BUILD_JDK_JTREG_LIBRARIES_LDFLAGS_libJNIAttachMutator := -pthread
BUILD_JDK_JTREG_EXCLUDE += exerevokeall.c
ifeq ($(call isTargetOs, linux), true)
BUILD_JDK_JTREG_EXECUTABLES_LIBS_exelauncher := -ldl
diff --git a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp
index 5d4f0801ec6..07a2d6fbfa0 100644
--- a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp
@@ -879,7 +879,6 @@ void ZBarrierSetAssembler::patch_barrier_relocation(address addr, int format) {
ShouldNotReachHere();
}
- OrderAccess::fence();
ICache::invalidate_word((address)patch_addr);
}
diff --git a/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp
index c1eabed8ade..dd70c98797f 100644
--- a/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp
@@ -1375,7 +1375,6 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
__ ldr(r10, Address(rmethod, Method::native_function_offset()));
ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
__ lea(rscratch2, unsatisfied);
- __ ldr(rscratch2, rscratch2);
__ cmp(r10, rscratch2);
__ br(Assembler::NE, L);
__ call_VM(noreg,
diff --git a/src/hotspot/cpu/ppc/ppc.ad b/src/hotspot/cpu/ppc/ppc.ad
index 7fcd096d2ad..c169d673aaf 100644
--- a/src/hotspot/cpu/ppc/ppc.ad
+++ b/src/hotspot/cpu/ppc/ppc.ad
@@ -6328,36 +6328,8 @@ instruct loadConD_Ex(regD dst, immD src) %{
// Prefetch instructions.
// Must be safe to execute with invalid address (cannot fault).
-// Special prefetch versions which use the dcbz instruction.
-instruct prefetch_alloc_zero(indirectMemory mem, iRegLsrc src) %{
- match(PrefetchAllocation (AddP mem src));
- predicate(AllocatePrefetchStyle == 3);
- ins_cost(MEMORY_REF_COST);
-
- format %{ "PREFETCH $mem, 2, $src \t// Prefetch write-many with zero" %}
- size(4);
- ins_encode %{
- __ dcbz($src$$Register, $mem$$base$$Register);
- %}
- ins_pipe(pipe_class_memory);
-%}
-
-instruct prefetch_alloc_zero_no_offset(indirectMemory mem) %{
- match(PrefetchAllocation mem);
- predicate(AllocatePrefetchStyle == 3);
- ins_cost(MEMORY_REF_COST);
-
- format %{ "PREFETCH $mem, 2 \t// Prefetch write-many with zero" %}
- size(4);
- ins_encode %{
- __ dcbz($mem$$base$$Register);
- %}
- ins_pipe(pipe_class_memory);
-%}
-
instruct prefetch_alloc(indirectMemory mem, iRegLsrc src) %{
match(PrefetchAllocation (AddP mem src));
- predicate(AllocatePrefetchStyle != 3);
ins_cost(MEMORY_REF_COST);
format %{ "PREFETCH $mem, 2, $src \t// Prefetch write-many" %}
@@ -6370,7 +6342,6 @@ instruct prefetch_alloc(indirectMemory mem, iRegLsrc src) %{
instruct prefetch_alloc_no_offset(indirectMemory mem) %{
match(PrefetchAllocation mem);
- predicate(AllocatePrefetchStyle != 3);
ins_cost(MEMORY_REF_COST);
format %{ "PREFETCH $mem, 2 \t// Prefetch write-many" %}
diff --git a/src/hotspot/cpu/riscv/templateInterpreterGenerator_riscv.cpp b/src/hotspot/cpu/riscv/templateInterpreterGenerator_riscv.cpp
index 692335d8c08..f073909bf5d 100644
--- a/src/hotspot/cpu/riscv/templateInterpreterGenerator_riscv.cpp
+++ b/src/hotspot/cpu/riscv/templateInterpreterGenerator_riscv.cpp
@@ -1146,9 +1146,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
Label L;
__ ld(x28, Address(xmethod, Method::native_function_offset()));
ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
- __ la(t, unsatisfied);
- __ load_long_misaligned(t1, Address(t, 0), t0, 2); // 2 bytes aligned, but not 4 or 8
-
+ __ la(t1, unsatisfied);
__ bne(x28, t1, L);
__ call_VM(noreg,
CAST_FROM_FN_PTR(address,
diff --git a/src/hotspot/os/linux/cgroupSubsystem_linux.cpp b/src/hotspot/os/linux/cgroupSubsystem_linux.cpp
index f5c4abeb4ca..f9c6f794ebd 100644
--- a/src/hotspot/os/linux/cgroupSubsystem_linux.cpp
+++ b/src/hotspot/os/linux/cgroupSubsystem_linux.cpp
@@ -612,7 +612,6 @@ void CgroupSubsystemFactory::cleanup(CgroupInfo* cg_infos) {
*
* cpu affinity
* cgroup cpu quota & cpu period
- * cgroup cpu shares
*
* Algorithm:
*
@@ -623,19 +622,18 @@ void CgroupSubsystemFactory::cleanup(CgroupInfo* cg_infos) {
*
* All results of division are rounded up to the next whole number.
*
- * If quotas have not been specified, return the
- * number of active processors in the system.
+ * If quotas have not been specified, sets the result reference to
+ * the number of active processors in the system.
*
- * If quotas have been specified, the resulting number
- * returned will never exceed the number of active processors.
+ * If quotas have been specified, the number set in the result
+ * reference will never exceed the number of active processors.
*
* return:
- * number of CPUs
+ * true if there were no errors. false otherwise.
*/
-int CgroupSubsystem::active_processor_count() {
- int quota_count = 0;
+bool CgroupSubsystem::active_processor_count(int& value) {
int cpu_count;
- int result;
+ int result = -1;
// We use a cache with a timeout to avoid performing expensive
// computations in the event this function is called frequently.
@@ -643,38 +641,50 @@ int CgroupSubsystem::active_processor_count() {
CachingCgroupController* contrl = cpu_controller();
CachedMetric* cpu_limit = contrl->metrics_cache();
if (!cpu_limit->should_check_metric()) {
- int val = (int)cpu_limit->value();
- log_trace(os, container)("CgroupSubsystem::active_processor_count (cached): %d", val);
- return val;
+ value = (int)cpu_limit->value();
+ log_trace(os, container)("CgroupSubsystem::active_processor_count (cached): %d", value);
+ return true;
}
cpu_count = os::Linux::active_processor_count();
- result = CgroupUtil::processor_count(contrl->controller(), cpu_count);
+ if (!CgroupUtil::processor_count(contrl->controller(), cpu_count, result)) {
+ return false;
+ }
+ assert(result > 0 && result <= cpu_count, "must be");
// Update cached metric to avoid re-reading container settings too often
cpu_limit->set_value(result, OSCONTAINER_CACHE_TIMEOUT);
+ value = result;
- return result;
+ return true;
}
/* memory_limit_in_bytes
*
- * Return the limit of available memory for this process.
+ * Return the limit of available memory for this process in the provided
+ * physical_memory_size_type reference. If there was no limit value set in the underlying
+ * interface files 'value_unlimited' is returned.
*
* return:
- * memory limit in bytes or
- * -1 for unlimited
- * OSCONTAINER_ERROR for not supported
+ * false if retrieving the value failed
+ * true if retrieving the value was successfull and the value was
+ * set in the 'value' reference.
*/
-jlong CgroupSubsystem::memory_limit_in_bytes(julong upper_bound) {
+bool CgroupSubsystem::memory_limit_in_bytes(physical_memory_size_type upper_bound,
+ physical_memory_size_type& value) {
CachingCgroupController* contrl = memory_controller();
CachedMetric* memory_limit = contrl->metrics_cache();
if (!memory_limit->should_check_metric()) {
- return memory_limit->value();
+ value = memory_limit->value();
+ return true;
+ }
+ physical_memory_size_type mem_limit = 0;
+ if (!contrl->controller()->read_memory_limit_in_bytes(upper_bound, mem_limit)) {
+ return false;
}
- jlong mem_limit = contrl->controller()->read_memory_limit_in_bytes(upper_bound);
// Update cached metric to avoid re-reading container settings too often
memory_limit->set_value(mem_limit, OSCONTAINER_CACHE_TIMEOUT);
- return mem_limit;
+ value = mem_limit;
+ return true;
}
bool CgroupController::read_string(const char* filename, char* buf, size_t buf_size) {
@@ -719,36 +729,35 @@ bool CgroupController::read_string(const char* filename, char* buf, size_t buf_s
return true;
}
-bool CgroupController::read_number(const char* filename, julong* result) {
+bool CgroupController::read_number(const char* filename, uint64_t& result) {
char buf[1024];
bool is_ok = read_string(filename, buf, 1024);
if (!is_ok) {
return false;
}
- int matched = sscanf(buf, JULONG_FORMAT, result);
+ int matched = sscanf(buf, UINT64_FORMAT, &result);
if (matched == 1) {
return true;
}
return false;
}
-bool CgroupController::read_number_handle_max(const char* filename, jlong* result) {
+bool CgroupController::read_number_handle_max(const char* filename, uint64_t& result) {
char buf[1024];
bool is_ok = read_string(filename, buf, 1024);
if (!is_ok) {
return false;
}
- jlong val = limit_from_str(buf);
- if (val == OSCONTAINER_ERROR) {
+ uint64_t val = 0;
+ if (!limit_from_str(buf, val)) {
return false;
}
- *result = val;
+ result = val;
return true;
}
-bool CgroupController::read_numerical_key_value(const char* filename, const char* key, julong* result) {
+bool CgroupController::read_numerical_key_value(const char* filename, const char* key, uint64_t& result) {
assert(key != nullptr, "key must be given");
- assert(result != nullptr, "result pointer must not be null");
assert(filename != nullptr, "file to search in must be given");
const char* s_path = subsystem_path();
if (s_path == nullptr) {
@@ -786,7 +795,7 @@ bool CgroupController::read_numerical_key_value(const char* filename, const char
&& after_key != '\n') {
// Skip key, skip space
const char* value_substr = line + key_len + 1;
- int matched = sscanf(value_substr, JULONG_FORMAT, result);
+ int matched = sscanf(value_substr, UINT64_FORMAT, &result);
found_match = matched == 1;
if (found_match) {
break;
@@ -797,12 +806,12 @@ bool CgroupController::read_numerical_key_value(const char* filename, const char
if (found_match) {
return true;
}
- log_debug(os, container)("Type %s (key == %s) not found in file %s", JULONG_FORMAT,
+ log_debug(os, container)("Type %s (key == %s) not found in file %s", UINT64_FORMAT,
key, absolute_path);
return false;
}
-bool CgroupController::read_numerical_tuple_value(const char* filename, bool use_first, jlong* result) {
+bool CgroupController::read_numerical_tuple_value(const char* filename, bool use_first, uint64_t& result) {
char buf[1024];
bool is_ok = read_string(filename, buf, 1024);
if (!is_ok) {
@@ -813,80 +822,90 @@ bool CgroupController::read_numerical_tuple_value(const char* filename, bool use
if (matched != 1) {
return false;
}
- jlong val = limit_from_str(token);
- if (val == OSCONTAINER_ERROR) {
+ uint64_t val = 0;
+ if (!limit_from_str(token, val)) {
return false;
}
- *result = val;
+ result = val;
return true;
}
-jlong CgroupController::limit_from_str(char* limit_str) {
+bool CgroupController::limit_from_str(char* limit_str, uint64_t& value) {
if (limit_str == nullptr) {
- return OSCONTAINER_ERROR;
+ return false;
}
// Unlimited memory in cgroups is the literal string 'max' for
// some controllers, for example the pids controller.
if (strcmp("max", limit_str) == 0) {
- return (jlong)-1;
+ value = value_unlimited;
+ return true;
}
- julong limit;
- if (sscanf(limit_str, JULONG_FORMAT, &limit) != 1) {
- return OSCONTAINER_ERROR;
+ uint64_t limit;
+ if (sscanf(limit_str, UINT64_FORMAT, &limit) != 1) {
+ return false;
}
- return (jlong)limit;
+ value = limit;
+ return true;
}
// CgroupSubsystem implementations
-
-jlong CgroupSubsystem::memory_and_swap_limit_in_bytes(julong upper_mem_bound, julong upper_swap_bound) {
- return memory_controller()->controller()->memory_and_swap_limit_in_bytes(upper_mem_bound, upper_swap_bound);
+bool CgroupSubsystem::memory_and_swap_limit_in_bytes(physical_memory_size_type upper_mem_bound,
+ physical_memory_size_type upper_swap_bound,
+ physical_memory_size_type& value) {
+ return memory_controller()->controller()->memory_and_swap_limit_in_bytes(upper_mem_bound,
+ upper_swap_bound,
+ value);
}
-jlong CgroupSubsystem::memory_and_swap_usage_in_bytes(julong upper_mem_bound, julong upper_swap_bound) {
- return memory_controller()->controller()->memory_and_swap_usage_in_bytes(upper_mem_bound, upper_swap_bound);
+bool CgroupSubsystem::memory_and_swap_usage_in_bytes(physical_memory_size_type upper_mem_bound,
+ physical_memory_size_type upper_swap_bound,
+ physical_memory_size_type& value) {
+ return memory_controller()->controller()->memory_and_swap_usage_in_bytes(upper_mem_bound,
+ upper_swap_bound,
+ value);
}
-jlong CgroupSubsystem::memory_soft_limit_in_bytes(julong upper_bound) {
- return memory_controller()->controller()->memory_soft_limit_in_bytes(upper_bound);
+bool CgroupSubsystem::memory_soft_limit_in_bytes(physical_memory_size_type upper_bound,
+ physical_memory_size_type& value) {
+ return memory_controller()->controller()->memory_soft_limit_in_bytes(upper_bound, value);
}
-jlong CgroupSubsystem::memory_throttle_limit_in_bytes() {
- return memory_controller()->controller()->memory_throttle_limit_in_bytes();
+bool CgroupSubsystem::memory_throttle_limit_in_bytes(physical_memory_size_type& value) {
+ return memory_controller()->controller()->memory_throttle_limit_in_bytes(value);
}
-jlong CgroupSubsystem::memory_usage_in_bytes() {
- return memory_controller()->controller()->memory_usage_in_bytes();
+bool CgroupSubsystem::memory_usage_in_bytes(physical_memory_size_type& value) {
+ return memory_controller()->controller()->memory_usage_in_bytes(value);
}
-jlong CgroupSubsystem::memory_max_usage_in_bytes() {
- return memory_controller()->controller()->memory_max_usage_in_bytes();
+bool CgroupSubsystem::memory_max_usage_in_bytes(physical_memory_size_type& value) {
+ return memory_controller()->controller()->memory_max_usage_in_bytes(value);
}
-jlong CgroupSubsystem::rss_usage_in_bytes() {
- return memory_controller()->controller()->rss_usage_in_bytes();
+bool CgroupSubsystem::rss_usage_in_bytes(physical_memory_size_type& value) {
+ return memory_controller()->controller()->rss_usage_in_bytes(value);
}
-jlong CgroupSubsystem::cache_usage_in_bytes() {
- return memory_controller()->controller()->cache_usage_in_bytes();
+bool CgroupSubsystem::cache_usage_in_bytes(physical_memory_size_type& value) {
+ return memory_controller()->controller()->cache_usage_in_bytes(value);
}
-int CgroupSubsystem::cpu_quota() {
- return cpu_controller()->controller()->cpu_quota();
+bool CgroupSubsystem::cpu_quota(int& value) {
+ return cpu_controller()->controller()->cpu_quota(value);
}
-int CgroupSubsystem::cpu_period() {
- return cpu_controller()->controller()->cpu_period();
+bool CgroupSubsystem::cpu_period(int& value) {
+ return cpu_controller()->controller()->cpu_period(value);
}
-int CgroupSubsystem::cpu_shares() {
- return cpu_controller()->controller()->cpu_shares();
+bool CgroupSubsystem::cpu_shares(int& value) {
+ return cpu_controller()->controller()->cpu_shares(value);
}
-jlong CgroupSubsystem::cpu_usage_in_micros() {
- return cpuacct_controller()->cpu_usage_in_micros();
+bool CgroupSubsystem::cpu_usage_in_micros(uint64_t& value) {
+ return cpuacct_controller()->cpu_usage_in_micros(value);
}
-void CgroupSubsystem::print_version_specific_info(outputStream* st, julong upper_mem_bound) {
+void CgroupSubsystem::print_version_specific_info(outputStream* st, physical_memory_size_type upper_mem_bound) {
memory_controller()->controller()->print_version_specific_info(st, upper_mem_bound);
}
diff --git a/src/hotspot/os/linux/cgroupSubsystem_linux.hpp b/src/hotspot/os/linux/cgroupSubsystem_linux.hpp
index bf0ad03fc56..522b64f8816 100644
--- a/src/hotspot/os/linux/cgroupSubsystem_linux.hpp
+++ b/src/hotspot/os/linux/cgroupSubsystem_linux.hpp
@@ -72,23 +72,29 @@
#define CONTAINER_READ_NUMBER_CHECKED(controller, filename, log_string, retval) \
{ \
bool is_ok; \
- is_ok = controller->read_number(filename, &retval); \
+ is_ok = controller->read_number(filename, retval); \
if (!is_ok) { \
- log_trace(os, container)(log_string " failed: %d", OSCONTAINER_ERROR); \
- return OSCONTAINER_ERROR; \
+ log_trace(os, container)(log_string " failed"); \
+ return false; \
} \
- log_trace(os, container)(log_string " is: " JULONG_FORMAT, retval); \
+ log_trace(os, container)(log_string " is: " UINT64_FORMAT, retval); \
+ return true; \
}
#define CONTAINER_READ_NUMBER_CHECKED_MAX(controller, filename, log_string, retval) \
{ \
bool is_ok; \
- is_ok = controller->read_number_handle_max(filename, &retval); \
+ is_ok = controller->read_number_handle_max(filename, retval); \
if (!is_ok) { \
- log_trace(os, container)(log_string " failed: %d", OSCONTAINER_ERROR); \
- return OSCONTAINER_ERROR; \
+ log_trace(os, container)(log_string " failed"); \
+ return false; \
} \
- log_trace(os, container)(log_string " is: " JLONG_FORMAT, retval); \
+ if (retval == value_unlimited) { \
+ log_trace(os, container)(log_string " is: unlimited"); \
+ } else { \
+ log_trace(os, container)(log_string " is: " UINT64_FORMAT, retval); \
+ } \
+ return true; \
}
#define CONTAINER_READ_STRING_CHECKED(controller, filename, log_string, retval, buf_size) \
@@ -96,7 +102,7 @@
bool is_ok; \
is_ok = controller->read_string(filename, retval, buf_size); \
if (!is_ok) { \
- log_trace(os, container)(log_string " failed: %d", OSCONTAINER_ERROR); \
+ log_trace(os, container)(log_string " failed"); \
return nullptr; \
} \
log_trace(os, container)(log_string " is: %s", retval); \
@@ -105,12 +111,13 @@
#define CONTAINER_READ_NUMERICAL_KEY_VALUE_CHECKED(controller, filename, key, log_string, retval) \
{ \
bool is_ok; \
- is_ok = controller->read_numerical_key_value(filename, key, &retval); \
+ is_ok = controller->read_numerical_key_value(filename, key, retval); \
if (!is_ok) { \
- log_trace(os, container)(log_string " failed: %d", OSCONTAINER_ERROR); \
- return OSCONTAINER_ERROR; \
+ log_trace(os, container)(log_string " failed"); \
+ return false; \
} \
- log_trace(os, container)(log_string " is: " JULONG_FORMAT, retval); \
+ log_trace(os, container)(log_string " is: " UINT64_FORMAT, retval); \
+ return true; \
}
class CgroupController: public CHeapObj {
@@ -124,21 +131,22 @@ class CgroupController: public CHeapObj {
const char* mount_point() { return _mount_point; }
virtual bool needs_hierarchy_adjustment() { return false; }
- /* Read a numerical value as unsigned long
+ /* Read a numerical value as uint64_t
*
* returns: false if any error occurred. true otherwise and
- * the parsed value is set in the provided julong pointer.
+ * the parsed value is set in the provided result reference.
*/
- bool read_number(const char* filename, julong* result);
+ bool read_number(const char* filename, uint64_t& result);
/* Convenience method to deal with numbers as well as the string 'max'
* in interface files. Otherwise same as read_number().
*
* returns: false if any error occurred. true otherwise and
- * the parsed value (which might be negative) is being set in
- * the provided jlong pointer.
+ * the parsed value will be set in the provided result reference.
+ * When the value was the string 'max' then 'value_unlimited' is
+ * being set as the value.
*/
- bool read_number_handle_max(const char* filename, jlong* result);
+ bool read_number_handle_max(const char* filename, uint64_t& result);
/* Read a string of at most buf_size - 1 characters from the interface file.
* The provided buffer must be at least buf_size in size so as to account
@@ -156,37 +164,37 @@ class CgroupController: public CHeapObj {
* parsing interface files like cpu.max which contain such tuples.
*
* returns: false if any error occurred. true otherwise and the parsed
- * value of the appropriate tuple entry set in the provided jlong pointer.
+ * value of the appropriate tuple entry set in the provided result reference.
*/
- bool read_numerical_tuple_value(const char* filename, bool use_first, jlong* result);
+ bool read_numerical_tuple_value(const char* filename, bool use_first, uint64_t& result);
/* Read a numerical value from a multi-line interface file. The matched line is
* determined by the provided 'key'. The associated numerical value is being set
- * via the passed in julong pointer. Example interface file 'memory.stat'
+ * via the passed in result reference. Example interface file 'memory.stat'
*
* returns: false if any error occurred. true otherwise and the parsed value is
- * being set in the provided julong pointer.
+ * being set in the provided result reference.
*/
- bool read_numerical_key_value(const char* filename, const char* key, julong* result);
+ bool read_numerical_key_value(const char* filename, const char* key, uint64_t& result);
private:
- static jlong limit_from_str(char* limit_str);
+ static bool limit_from_str(char* limit_str, physical_memory_size_type& value);
};
class CachedMetric : public CHeapObj{
private:
- volatile jlong _metric;
+ volatile physical_memory_size_type _metric;
volatile jlong _next_check_counter;
public:
CachedMetric() {
- _metric = -1;
+ _metric = value_unlimited;
_next_check_counter = min_jlong;
}
bool should_check_metric() {
return os::elapsed_counter() > _next_check_counter;
}
- jlong value() { return _metric; }
- void set_value(jlong value, jlong timeout) {
+ physical_memory_size_type value() { return _metric; }
+ void set_value(physical_memory_size_type value, jlong timeout) {
_metric = value;
// Metric is unlikely to change, but we want to remain
// responsive to configuration changes. A very short grace time
@@ -216,9 +224,9 @@ class CachingCgroupController : public CHeapObj {
// Pure virtual class representing version agnostic CPU controllers
class CgroupCpuController: public CHeapObj {
public:
- virtual int cpu_quota() = 0;
- virtual int cpu_period() = 0;
- virtual int cpu_shares() = 0;
+ virtual bool cpu_quota(int& value) = 0;
+ virtual bool cpu_period(int& value) = 0;
+ virtual bool cpu_shares(int& value) = 0;
virtual bool needs_hierarchy_adjustment() = 0;
virtual bool is_read_only() = 0;
virtual const char* subsystem_path() = 0;
@@ -230,7 +238,7 @@ class CgroupCpuController: public CHeapObj {
// Pure virtual class representing version agnostic CPU accounting controllers
class CgroupCpuacctController: public CHeapObj {
public:
- virtual jlong cpu_usage_in_micros() = 0;
+ virtual bool cpu_usage_in_micros(uint64_t& value) = 0;
virtual bool needs_hierarchy_adjustment() = 0;
virtual bool is_read_only() = 0;
virtual const char* subsystem_path() = 0;
@@ -242,16 +250,22 @@ class CgroupCpuacctController: public CHeapObj {
// Pure virtual class representing version agnostic memory controllers
class CgroupMemoryController: public CHeapObj {
public:
- virtual jlong read_memory_limit_in_bytes(julong upper_bound) = 0;
- virtual jlong memory_usage_in_bytes() = 0;
- virtual jlong memory_and_swap_limit_in_bytes(julong upper_mem_bound, julong upper_swap_bound) = 0;
- virtual jlong memory_and_swap_usage_in_bytes(julong upper_mem_bound, julong upper_swap_bound) = 0;
- virtual jlong memory_soft_limit_in_bytes(julong upper_bound) = 0;
- virtual jlong memory_throttle_limit_in_bytes() = 0;
- virtual jlong memory_max_usage_in_bytes() = 0;
- virtual jlong rss_usage_in_bytes() = 0;
- virtual jlong cache_usage_in_bytes() = 0;
- virtual void print_version_specific_info(outputStream* st, julong upper_mem_bound) = 0;
+ virtual bool read_memory_limit_in_bytes(physical_memory_size_type upper_bound,
+ physical_memory_size_type& value) = 0;
+ virtual bool memory_usage_in_bytes(physical_memory_size_type& value) = 0;
+ virtual bool memory_and_swap_limit_in_bytes(physical_memory_size_type upper_mem_bound,
+ physical_memory_size_type upper_swap_bound,
+ physical_memory_size_type& value) = 0;
+ virtual bool memory_and_swap_usage_in_bytes(physical_memory_size_type upper_mem_bound,
+ physical_memory_size_type upper_swap_bound,
+ physical_memory_size_type& value) = 0;
+ virtual bool memory_soft_limit_in_bytes(physical_memory_size_type upper_bound,
+ physical_memory_size_type& value) = 0;
+ virtual bool memory_throttle_limit_in_bytes(physical_memory_size_type& value) = 0;
+ virtual bool memory_max_usage_in_bytes(physical_memory_size_type& value) = 0;
+ virtual bool rss_usage_in_bytes(physical_memory_size_type& value) = 0;
+ virtual bool cache_usage_in_bytes(physical_memory_size_type& value) = 0;
+ virtual void print_version_specific_info(outputStream* st, physical_memory_size_type upper_mem_bound) = 0;
virtual bool needs_hierarchy_adjustment() = 0;
virtual bool is_read_only() = 0;
virtual const char* subsystem_path() = 0;
@@ -262,11 +276,11 @@ class CgroupMemoryController: public CHeapObj {
class CgroupSubsystem: public CHeapObj {
public:
- jlong memory_limit_in_bytes(julong upper_bound);
- int active_processor_count();
+ bool memory_limit_in_bytes(physical_memory_size_type upper_bound, physical_memory_size_type& value);
+ bool active_processor_count(int& value);
- virtual jlong pids_max() = 0;
- virtual jlong pids_current() = 0;
+ virtual bool pids_max(uint64_t& value) = 0;
+ virtual bool pids_current(uint64_t& value) = 0;
virtual bool is_containerized() = 0;
virtual char * cpu_cpuset_cpus() = 0;
@@ -276,21 +290,26 @@ class CgroupSubsystem: public CHeapObj {
virtual CachingCgroupController* cpu_controller() = 0;
virtual CgroupCpuacctController* cpuacct_controller() = 0;
- int cpu_quota();
- int cpu_period();
- int cpu_shares();
+ bool cpu_quota(int& value);
+ bool cpu_period(int& value);
+ bool cpu_shares(int& value);
- jlong cpu_usage_in_micros();
+ bool cpu_usage_in_micros(uint64_t& value);
- jlong memory_usage_in_bytes();
- jlong memory_and_swap_limit_in_bytes(julong upper_mem_bound, julong upper_swap_bound);
- jlong memory_and_swap_usage_in_bytes(julong upper_mem_bound, julong upper_swap_bound);
- jlong memory_soft_limit_in_bytes(julong upper_bound);
- jlong memory_throttle_limit_in_bytes();
- jlong memory_max_usage_in_bytes();
- jlong rss_usage_in_bytes();
- jlong cache_usage_in_bytes();
- void print_version_specific_info(outputStream* st, julong upper_mem_bound);
+ bool memory_usage_in_bytes(physical_memory_size_type& value);
+ bool memory_and_swap_limit_in_bytes(physical_memory_size_type upper_mem_bound,
+ physical_memory_size_type upper_swap_bound,
+ physical_memory_size_type& value);
+ bool memory_and_swap_usage_in_bytes(physical_memory_size_type upper_mem_bound,
+ physical_memory_size_type upper_swap_bound,
+ physical_memory_size_type& value);
+ bool memory_soft_limit_in_bytes(physical_memory_size_type upper_bound,
+ physical_memory_size_type& value);
+ bool memory_throttle_limit_in_bytes(physical_memory_size_type& value);
+ bool memory_max_usage_in_bytes(physical_memory_size_type& value);
+ bool rss_usage_in_bytes(physical_memory_size_type& value);
+ bool cache_usage_in_bytes(physical_memory_size_type& value);
+ void print_version_specific_info(outputStream* st, physical_memory_size_type upper_mem_bound);
};
// Utility class for storing info retrieved from /proc/cgroups,
diff --git a/src/hotspot/os/linux/cgroupUtil_linux.cpp b/src/hotspot/os/linux/cgroupUtil_linux.cpp
index de027db812a..7aa07d53148 100644
--- a/src/hotspot/os/linux/cgroupUtil_linux.cpp
+++ b/src/hotspot/os/linux/cgroupUtil_linux.cpp
@@ -25,13 +25,19 @@
#include "cgroupUtil_linux.hpp"
#include "os_linux.hpp"
-int CgroupUtil::processor_count(CgroupCpuController* cpu_ctrl, int host_cpus) {
- assert(host_cpus > 0, "physical host cpus must be positive");
- int limit_count = host_cpus;
- int quota = cpu_ctrl->cpu_quota();
- int period = cpu_ctrl->cpu_period();
+bool CgroupUtil::processor_count(CgroupCpuController* cpu_ctrl, int upper_bound, int& value) {
+ assert(upper_bound > 0, "upper bound of cpus must be positive");
+ int limit_count = upper_bound;
+ int quota = -1;
+ int period = -1;
+ if (!cpu_ctrl->cpu_quota(quota)) {
+ return false;
+ }
+ if (!cpu_ctrl->cpu_period(period)) {
+ return false;
+ }
int quota_count = 0;
- int result = 0;
+ int result = upper_bound;
if (quota > -1 && period > 0) {
quota_count = ceilf((float)quota / (float)period);
@@ -43,16 +49,50 @@ int CgroupUtil::processor_count(CgroupCpuController* cpu_ctrl, int host_cpus) {
limit_count = quota_count;
}
- result = MIN2(host_cpus, limit_count);
+ result = MIN2(upper_bound, limit_count);
log_trace(os, container)("OSContainer::active_processor_count: %d", result);
- return result;
+ value = result;
+ return true;
+}
+
+// Get an updated memory limit. The return value is strictly less than or equal to the
+// passed in 'lowest' value.
+physical_memory_size_type CgroupUtil::get_updated_mem_limit(CgroupMemoryController* mem,
+ physical_memory_size_type lowest,
+ physical_memory_size_type upper_bound) {
+ assert(lowest <= upper_bound, "invariant");
+ physical_memory_size_type current_limit = value_unlimited;
+ if (mem->read_memory_limit_in_bytes(upper_bound, current_limit) && current_limit != value_unlimited) {
+ assert(current_limit <= upper_bound, "invariant");
+ if (lowest > current_limit) {
+ return current_limit;
+ }
+ }
+ return lowest;
+}
+
+// Get an updated cpu limit. The return value is strictly less than or equal to the
+// passed in 'lowest' value.
+int CgroupUtil::get_updated_cpu_limit(CgroupCpuController* cpu,
+ int lowest,
+ int upper_bound) {
+ assert(lowest > 0 && lowest <= upper_bound, "invariant");
+ int cpu_limit_val = -1;
+ if (CgroupUtil::processor_count(cpu, upper_bound, cpu_limit_val) && cpu_limit_val != upper_bound) {
+ assert(cpu_limit_val <= upper_bound, "invariant");
+ if (lowest > cpu_limit_val) {
+ return cpu_limit_val;
+ }
+ }
+ return lowest;
}
void CgroupUtil::adjust_controller(CgroupMemoryController* mem) {
assert(mem->cgroup_path() != nullptr, "invariant");
if (strstr(mem->cgroup_path(), "../") != nullptr) {
- log_warning(os, container)("Cgroup memory controller path at '%s' seems to have moved to '%s', detected limits won't be accurate",
- mem->mount_point(), mem->cgroup_path());
+ log_warning(os, container)("Cgroup memory controller path at '%s' seems to have moved "
+ "to '%s'. Detected limits won't be accurate",
+ mem->mount_point(), mem->cgroup_path());
mem->set_subsystem_path("/");
return;
}
@@ -65,17 +105,18 @@ void CgroupUtil::adjust_controller(CgroupMemoryController* mem) {
char* cg_path = os::strdup(orig);
char* last_slash;
assert(cg_path[0] == '/', "cgroup path must start with '/'");
- julong phys_mem = static_cast(os::Linux::physical_memory());
+ physical_memory_size_type phys_mem = os::Linux::physical_memory();
char* limit_cg_path = nullptr;
- jlong limit = mem->read_memory_limit_in_bytes(phys_mem);
- jlong lowest_limit = limit < 0 ? phys_mem : limit;
- julong orig_limit = ((julong)lowest_limit) != phys_mem ? lowest_limit : phys_mem;
+ physical_memory_size_type limit = value_unlimited;
+ physical_memory_size_type lowest_limit = phys_mem;
+ lowest_limit = get_updated_mem_limit(mem, lowest_limit, phys_mem);
+ physical_memory_size_type orig_limit = lowest_limit != phys_mem ? lowest_limit : phys_mem;
while ((last_slash = strrchr(cg_path, '/')) != cg_path) {
*last_slash = '\0'; // strip path
// update to shortened path and try again
mem->set_subsystem_path(cg_path);
- limit = mem->read_memory_limit_in_bytes(phys_mem);
- if (limit >= 0 && limit < lowest_limit) {
+ limit = get_updated_mem_limit(mem, lowest_limit, phys_mem);
+ if (limit < lowest_limit) {
lowest_limit = limit;
os::free(limit_cg_path); // handles nullptr
limit_cg_path = os::strdup(cg_path);
@@ -83,24 +124,24 @@ void CgroupUtil::adjust_controller(CgroupMemoryController* mem) {
}
// need to check limit at mount point
mem->set_subsystem_path("/");
- limit = mem->read_memory_limit_in_bytes(phys_mem);
- if (limit >= 0 && limit < lowest_limit) {
+ limit = get_updated_mem_limit(mem, lowest_limit, phys_mem);
+ if (limit < lowest_limit) {
lowest_limit = limit;
os::free(limit_cg_path); // handles nullptr
limit_cg_path = os::strdup("/");
}
- assert(lowest_limit >= 0, "limit must be positive");
- if ((julong)lowest_limit != orig_limit) {
+ assert(lowest_limit <= phys_mem, "limit must not exceed host memory");
+ if (lowest_limit != orig_limit) {
// we've found a lower limit anywhere in the hierarchy,
// set the path to the limit path
assert(limit_cg_path != nullptr, "limit path must be set");
mem->set_subsystem_path(limit_cg_path);
log_trace(os, container)("Adjusted controller path for memory to: %s. "
- "Lowest limit was: " JLONG_FORMAT,
+ "Lowest limit was: " PHYS_MEM_TYPE_FORMAT,
mem->subsystem_path(),
lowest_limit);
} else {
- log_trace(os, container)("Lowest limit was: " JLONG_FORMAT, lowest_limit);
+ log_trace(os, container)("Lowest limit was: " PHYS_MEM_TYPE_FORMAT, lowest_limit);
log_trace(os, container)("No lower limit found for memory in hierarchy %s, "
"adjusting to original path %s",
mem->mount_point(), orig);
@@ -114,8 +155,9 @@ void CgroupUtil::adjust_controller(CgroupMemoryController* mem) {
void CgroupUtil::adjust_controller(CgroupCpuController* cpu) {
assert(cpu->cgroup_path() != nullptr, "invariant");
if (strstr(cpu->cgroup_path(), "../") != nullptr) {
- log_warning(os, container)("Cgroup cpu controller path at '%s' seems to have moved to '%s', detected limits won't be accurate",
- cpu->mount_point(), cpu->cgroup_path());
+ log_warning(os, container)("Cgroup cpu controller path at '%s' seems to have moved "
+ "to '%s'. Detected limits won't be accurate",
+ cpu->mount_point(), cpu->cgroup_path());
cpu->set_subsystem_path("/");
return;
}
@@ -129,15 +171,15 @@ void CgroupUtil::adjust_controller(CgroupCpuController* cpu) {
char* last_slash;
assert(cg_path[0] == '/', "cgroup path must start with '/'");
int host_cpus = os::Linux::active_processor_count();
- int cpus = CgroupUtil::processor_count(cpu, host_cpus);
- int lowest_limit = cpus < host_cpus ? cpus: host_cpus;
+ int lowest_limit = host_cpus;
+ int cpus = get_updated_cpu_limit(cpu, lowest_limit, host_cpus);
int orig_limit = lowest_limit != host_cpus ? lowest_limit : host_cpus;
char* limit_cg_path = nullptr;
while ((last_slash = strrchr(cg_path, '/')) != cg_path) {
*last_slash = '\0'; // strip path
// update to shortened path and try again
cpu->set_subsystem_path(cg_path);
- cpus = CgroupUtil::processor_count(cpu, host_cpus);
+ cpus = get_updated_cpu_limit(cpu, lowest_limit, host_cpus);
if (cpus != host_cpus && cpus < lowest_limit) {
lowest_limit = cpus;
os::free(limit_cg_path); // handles nullptr
@@ -146,7 +188,7 @@ void CgroupUtil::adjust_controller(CgroupCpuController* cpu) {
}
// need to check limit at mount point
cpu->set_subsystem_path("/");
- cpus = CgroupUtil::processor_count(cpu, host_cpus);
+ cpus = get_updated_cpu_limit(cpu, lowest_limit, host_cpus);
if (cpus != host_cpus && cpus < lowest_limit) {
lowest_limit = cpus;
os::free(limit_cg_path); // handles nullptr
@@ -160,8 +202,7 @@ void CgroupUtil::adjust_controller(CgroupCpuController* cpu) {
cpu->set_subsystem_path(limit_cg_path);
log_trace(os, container)("Adjusted controller path for cpu to: %s. "
"Lowest limit was: %d",
- cpu->subsystem_path(),
- lowest_limit);
+ cpu->subsystem_path(), lowest_limit);
} else {
log_trace(os, container)("Lowest limit was: %d", lowest_limit);
log_trace(os, container)("No lower limit found for cpu in hierarchy %s, "
diff --git a/src/hotspot/os/linux/cgroupUtil_linux.hpp b/src/hotspot/os/linux/cgroupUtil_linux.hpp
index aa63a7457cc..d72bbd1cf1e 100644
--- a/src/hotspot/os/linux/cgroupUtil_linux.hpp
+++ b/src/hotspot/os/linux/cgroupUtil_linux.hpp
@@ -31,13 +31,20 @@
class CgroupUtil: AllStatic {
public:
- static int processor_count(CgroupCpuController* cpu, int host_cpus);
+ static bool processor_count(CgroupCpuController* cpu, int upper_bound, int& value);
// Given a memory controller, adjust its path to a point in the hierarchy
// that represents the closest memory limit.
static void adjust_controller(CgroupMemoryController* m);
// Given a cpu controller, adjust its path to a point in the hierarchy
// that represents the closest cpu limit.
static void adjust_controller(CgroupCpuController* c);
+ private:
+ static physical_memory_size_type get_updated_mem_limit(CgroupMemoryController* m,
+ physical_memory_size_type lowest,
+ physical_memory_size_type upper_bound);
+ static int get_updated_cpu_limit(CgroupCpuController* c,
+ int lowest,
+ int upper_bound);
};
#endif // CGROUP_UTIL_LINUX_HPP
diff --git a/src/hotspot/os/linux/cgroupV1Subsystem_linux.cpp b/src/hotspot/os/linux/cgroupV1Subsystem_linux.cpp
index 87870e647eb..ddcb0db2161 100644
--- a/src/hotspot/os/linux/cgroupV1Subsystem_linux.cpp
+++ b/src/hotspot/os/linux/cgroupV1Subsystem_linux.cpp
@@ -124,10 +124,13 @@ void CgroupV1Controller::set_subsystem_path(const char* cgroup_path) {
}
}
-jlong CgroupV1MemoryController::uses_mem_hierarchy() {
- julong use_hierarchy;
- CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.use_hierarchy", "Use Hierarchy", use_hierarchy);
- return (jlong)use_hierarchy;
+bool CgroupV1MemoryController::read_use_hierarchy_val(physical_memory_size_type& result) {
+ CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.use_hierarchy", "Use Hierarchy", result);
+}
+
+bool CgroupV1MemoryController::uses_mem_hierarchy() {
+ physical_memory_size_type use_hierarchy = 0;
+ return read_use_hierarchy_val(use_hierarchy) && use_hierarchy > 0;
}
/*
@@ -141,125 +144,177 @@ bool CgroupV1Controller::needs_hierarchy_adjustment() {
return strcmp(_root, _cgroup_path) != 0;
}
-static inline
-void verbose_log(julong read_mem_limit, julong upper_mem_bound) {
- if (log_is_enabled(Debug, os, container)) {
- jlong mem_limit = (jlong)read_mem_limit; // account for negative values
- if (mem_limit < 0 || read_mem_limit >= upper_mem_bound) {
- const char *reason;
- if (mem_limit == OSCONTAINER_ERROR) {
- reason = "failed";
- } else if (mem_limit == -1) {
- reason = "unlimited";
- } else {
- assert(read_mem_limit >= upper_mem_bound, "Expected read value exceeding upper memory bound");
- // Exceeding physical memory is treated as unlimited. This implementation
- // caps it at host_mem since Cg v1 has no value to represent 'max'.
- reason = "ignored";
- }
- log_debug(os, container)("container memory limit %s: " JLONG_FORMAT ", upper bound is " JLONG_FORMAT,
- reason, mem_limit, upper_mem_bound);
+bool CgroupV1MemoryController::read_memory_limit_val(physical_memory_size_type& result) {
+ CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.limit_in_bytes", "Memory Limit", result);
+}
+
+bool CgroupV1MemoryController::read_hierarchical_memory_limit_val(physical_memory_size_type& result) {
+ CONTAINER_READ_NUMERICAL_KEY_VALUE_CHECKED(reader(), "/memory.stat",
+ "hierarchical_memory_limit", "Hierarchical Memory Limit",
+ result);
+}
+
+bool CgroupV1MemoryController::read_memory_limit_in_bytes(physical_memory_size_type upper_bound,
+ physical_memory_size_type& result) {
+ physical_memory_size_type memlimit = 0;
+ if (!read_memory_limit_val(memlimit)) {
+ log_trace(os, container)("container memory limit failed, upper bound is " PHYS_MEM_TYPE_FORMAT, upper_bound);
+ return false;
+ }
+ if (memlimit >= upper_bound) {
+ physical_memory_size_type hierlimit = 0;
+ if (uses_mem_hierarchy() && read_hierarchical_memory_limit_val(hierlimit) &&
+ hierlimit < upper_bound) {
+ log_trace(os, container)("Memory Limit is: " PHYS_MEM_TYPE_FORMAT, hierlimit);
+ result = hierlimit;
+ } else {
+ // Exceeding physical memory is treated as unlimited. This implementation
+ // caps it at host_mem since Cg v1 has no value to represent 'max'.
+ log_trace(os, container)("container memory limit ignored: " PHYS_MEM_TYPE_FORMAT
+ ", upper bound is " PHYS_MEM_TYPE_FORMAT, memlimit, upper_bound);
+ result = value_unlimited;
}
+ } else {
+ result = memlimit;
}
+ return true;
}
-jlong CgroupV1MemoryController::read_memory_limit_in_bytes(julong upper_bound) {
- julong memlimit;
- CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.limit_in_bytes", "Memory Limit", memlimit);
- if (memlimit >= upper_bound && uses_mem_hierarchy()) {
- CONTAINER_READ_NUMERICAL_KEY_VALUE_CHECKED(reader(), "/memory.stat",
- "hierarchical_memory_limit", "Hierarchical Memory Limit",
- memlimit);
- }
- verbose_log(memlimit, upper_bound);
- return (jlong)((memlimit < upper_bound) ? memlimit : -1);
+bool CgroupV1MemoryController::read_mem_swap(physical_memory_size_type& result) {
+ CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.memsw.limit_in_bytes", "Memory and Swap Limit", result);
}
-/* read_mem_swap
+bool CgroupV1MemoryController::read_hierarchical_mem_swap_val(physical_memory_size_type& result) {
+ CONTAINER_READ_NUMERICAL_KEY_VALUE_CHECKED(reader(), "/memory.stat",
+ "hierarchical_memsw_limit", "Hierarchical Memory and Swap Limit",
+ result);
+}
+
+/* memory_and_swap_limit_in_bytes
*
- * Determine the memory and swap limit metric. Returns a positive limit value strictly
- * lower than the physical memory and swap limit iff there is a limit. Otherwise a
- * negative value is returned indicating the determined status.
+ * Determine the memory and swap limit metric. Sets the 'result' reference to a positive limit value or
+ * 'value_unlimited' (for unlimited).
*
* returns:
- * * A number > 0 if the limit is available and lower than a physical upper bound.
- * * OSCONTAINER_ERROR if the limit cannot be retrieved (i.e. not supported) or
- * * -1 if there isn't any limit in place (note: includes values which exceed a physical
- * upper bound)
+ * * false if an error occurred. 'result' reference remains unchanged.
+ * * true if the limit value has been set in the 'result' reference
*/
-jlong CgroupV1MemoryController::read_mem_swap(julong upper_memsw_bound) {
- julong memswlimit;
- CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.memsw.limit_in_bytes", "Memory and Swap Limit", memswlimit);
- if (memswlimit >= upper_memsw_bound && uses_mem_hierarchy()) {
- CONTAINER_READ_NUMERICAL_KEY_VALUE_CHECKED(reader(), "/memory.stat",
- "hierarchical_memsw_limit", "Hierarchical Memory and Swap Limit",
- memswlimit);
+bool CgroupV1MemoryController::memory_and_swap_limit_in_bytes(physical_memory_size_type upper_mem_bound,
+ physical_memory_size_type upper_swap_bound,
+ physical_memory_size_type& result) {
+ physical_memory_size_type total_mem_swap = upper_mem_bound + upper_swap_bound;
+ physical_memory_size_type memory_swap = 0;
+ bool mem_swap_read_failed = false;
+ if (!read_mem_swap(memory_swap)) {
+ mem_swap_read_failed = true;
+ }
+ if (memory_swap >= total_mem_swap) {
+ physical_memory_size_type hiermswlimit = 0;
+ if (uses_mem_hierarchy() && read_hierarchical_mem_swap_val(hiermswlimit) &&
+ hiermswlimit < total_mem_swap) {
+ log_trace(os, container)("Memory and Swap Limit is: " PHYS_MEM_TYPE_FORMAT, hiermswlimit);
+ memory_swap = hiermswlimit;
+ } else {
+ memory_swap = value_unlimited;
+ }
+ }
+ if (memory_swap == value_unlimited) {
+ log_trace(os, container)("Memory and Swap Limit is: Unlimited");
+ result = value_unlimited;
+ return true;
}
- verbose_log(memswlimit, upper_memsw_bound);
- return (jlong)((memswlimit < upper_memsw_bound) ? memswlimit : -1);
-}
-jlong CgroupV1MemoryController::memory_and_swap_limit_in_bytes(julong upper_mem_bound, julong upper_swap_bound) {
- jlong memory_swap = read_mem_swap(upper_mem_bound + upper_swap_bound);
- if (memory_swap == -1) {
- return memory_swap;
- }
// If there is a swap limit, but swappiness == 0, reset the limit
// to the memory limit. Do the same for cases where swap isn't
// supported.
- jlong swappiness = read_mem_swappiness();
- if (swappiness == 0 || memory_swap == OSCONTAINER_ERROR) {
- jlong memlimit = read_memory_limit_in_bytes(upper_mem_bound);
- if (memory_swap == OSCONTAINER_ERROR) {
- log_trace(os, container)("Memory and Swap Limit has been reset to " JLONG_FORMAT " because swap is not supported", memlimit);
- } else {
- log_trace(os, container)("Memory and Swap Limit has been reset to " JLONG_FORMAT " because swappiness is 0", memlimit);
- }
- return memlimit;
+ physical_memory_size_type swappiness = 0;
+ if (!read_mem_swappiness(swappiness)) {
+ // assume no swap
+ mem_swap_read_failed = true;
}
- return memory_swap;
+ if (swappiness == 0 || mem_swap_read_failed) {
+ physical_memory_size_type memlimit = value_unlimited;
+ if (!read_memory_limit_in_bytes(upper_mem_bound, memlimit)) {
+ return false;
+ }
+ if (memlimit == value_unlimited) {
+ result = value_unlimited; // No memory limit, thus no swap limit
+ return true;
+ }
+ if (mem_swap_read_failed) {
+ log_trace(os, container)("Memory and Swap Limit has been reset to " PHYS_MEM_TYPE_FORMAT
+ " because swap is not supported", memlimit);
+ } else {
+ log_trace(os, container)("Memory and Swap Limit has been reset to " PHYS_MEM_TYPE_FORMAT
+ " because swappiness is 0", memlimit);
+ }
+ result = memlimit;
+ return true;
+ }
+ result = memory_swap;
+ return true;
}
static inline
-jlong memory_swap_usage_impl(CgroupController* ctrl) {
- julong memory_swap_usage;
- CONTAINER_READ_NUMBER_CHECKED(ctrl, "/memory.memsw.usage_in_bytes", "mem swap usage", memory_swap_usage);
- return (jlong)memory_swap_usage;
+bool memory_swap_usage_impl(CgroupController* ctrl, physical_memory_size_type& result) {
+ CONTAINER_READ_NUMBER_CHECKED(ctrl, "/memory.memsw.usage_in_bytes", "mem swap usage", result);
}
-jlong CgroupV1MemoryController::memory_and_swap_usage_in_bytes(julong upper_mem_bound, julong upper_swap_bound) {
- jlong memory_sw_limit = memory_and_swap_limit_in_bytes(upper_mem_bound, upper_swap_bound);
- jlong memory_limit = read_memory_limit_in_bytes(upper_mem_bound);
- if (memory_sw_limit > 0 && memory_limit > 0) {
- jlong delta_swap = memory_sw_limit - memory_limit;
- if (delta_swap > 0) {
- return memory_swap_usage_impl(reader());
+bool CgroupV1MemoryController::memory_and_swap_usage_in_bytes(physical_memory_size_type upper_mem_bound,
+ physical_memory_size_type upper_swap_bound,
+ physical_memory_size_type& result) {
+ physical_memory_size_type memory_sw_limit = value_unlimited;
+ if (!memory_and_swap_limit_in_bytes(upper_mem_bound, upper_swap_bound, memory_sw_limit)) {
+ return false;
+ }
+ physical_memory_size_type mem_limit_val = value_unlimited;
+ physical_memory_size_type memory_limit = value_unlimited;
+ if (read_memory_limit_in_bytes(upper_mem_bound, mem_limit_val)) {
+ if (mem_limit_val != value_unlimited) {
+ memory_limit = mem_limit_val;
}
}
- return memory_usage_in_bytes();
-}
-
-jlong CgroupV1MemoryController::read_mem_swappiness() {
- julong swappiness;
- CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.swappiness", "Swappiness", swappiness);
- return (jlong)swappiness;
-}
-
-jlong CgroupV1MemoryController::memory_soft_limit_in_bytes(julong upper_bound) {
- julong memsoftlimit;
- CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.soft_limit_in_bytes", "Memory Soft Limit", memsoftlimit);
- if (memsoftlimit >= upper_bound) {
- log_trace(os, container)("Memory Soft Limit is: Unlimited");
- return (jlong)-1;
- } else {
- return (jlong)memsoftlimit;
+ if (memory_sw_limit != value_unlimited && memory_limit != value_unlimited) {
+ if (memory_limit < memory_sw_limit) {
+ // swap allowed and > 0
+ physical_memory_size_type swap_usage = 0;
+ if (!memory_swap_usage_impl(reader(), swap_usage)) {
+ return false;
+ }
+ result = swap_usage;
+ return true;
+ }
}
+ return memory_usage_in_bytes(result);
}
-jlong CgroupV1MemoryController::memory_throttle_limit_in_bytes() {
+bool CgroupV1MemoryController::read_mem_swappiness(physical_memory_size_type& result) {
+ CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.swappiness", "Swappiness", result);
+}
+
+bool CgroupV1MemoryController::memory_soft_limit_val(physical_memory_size_type& result) {
+ CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.soft_limit_in_bytes", "Memory Soft Limit", result);
+}
+
+bool CgroupV1MemoryController::memory_soft_limit_in_bytes(physical_memory_size_type upper_bound,
+ physical_memory_size_type& result) {
+ physical_memory_size_type mem_soft_limit = 0;
+ if (!memory_soft_limit_val(mem_soft_limit)) {
+ return false;
+ }
+ if (mem_soft_limit >= upper_bound) {
+ log_trace(os, container)("Memory Soft Limit is: Unlimited");
+ result = value_unlimited;
+ } else {
+ result = mem_soft_limit;
+ }
+ return true;
+}
+
+bool CgroupV1MemoryController::memory_throttle_limit_in_bytes(physical_memory_size_type& result) {
// Log this string at trace level so as to make tests happy.
log_trace(os, container)("Memory Throttle Limit is not supported.");
- return OSCONTAINER_ERROR; // not supported
+ return false;
}
// Constructor
@@ -288,80 +343,129 @@ bool CgroupV1Subsystem::is_containerized() {
_cpuset->is_read_only();
}
-/* memory_usage_in_bytes
+bool CgroupV1MemoryController::memory_usage_in_bytes(physical_memory_size_type& result) {
+ physical_memory_size_type memory_usage = 0;
+ if (!memory_usage_val(memory_usage)) {
+ return false;
+ }
+ result = memory_usage;
+ return true;
+}
+
+/* memory_usage_val
*
- * Return the amount of used memory for this process.
+ * Read the amount of used memory for this process into the passed in reference 'result'
*
* return:
- * memory usage in bytes or
- * -1 for unlimited
- * OSCONTAINER_ERROR for not supported
+ * true when reading of the file was successful and 'result' was set appropriately
+ * false when reading of the file failed
*/
-jlong CgroupV1MemoryController::memory_usage_in_bytes() {
- julong memusage;
- CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.usage_in_bytes", "Memory Usage", memusage);
- return (jlong)memusage;
+bool CgroupV1MemoryController::memory_usage_val(physical_memory_size_type& result) {
+ CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.usage_in_bytes", "Memory Usage", result);
+}
+
+bool CgroupV1MemoryController::memory_max_usage_val(physical_memory_size_type& result) {
+ CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.max_usage_in_bytes", "Maximum Memory Usage", result);
}
/* memory_max_usage_in_bytes
*
- * Return the maximum amount of used memory for this process.
+ * Return the maximum amount of used memory for this process in the
+ * result reference.
*
* return:
- * max memory usage in bytes or
- * OSCONTAINER_ERROR for not supported
+ * true if the result reference has been set
+ * false otherwise (e.g. on error)
*/
-jlong CgroupV1MemoryController::memory_max_usage_in_bytes() {
- julong memmaxusage;
- CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.max_usage_in_bytes", "Maximum Memory Usage", memmaxusage);
- return (jlong)memmaxusage;
-}
-
-jlong CgroupV1MemoryController::rss_usage_in_bytes() {
- julong rss;
- bool is_ok = reader()->read_numerical_key_value("/memory.stat", "rss", &rss);
- if (!is_ok) {
- return OSCONTAINER_ERROR;
+bool CgroupV1MemoryController::memory_max_usage_in_bytes(physical_memory_size_type& result) {
+ physical_memory_size_type memory_max_usage = 0;
+ if (!memory_max_usage_val(memory_max_usage)) {
+ return false;
}
- log_trace(os, container)("RSS usage is: " JULONG_FORMAT, rss);
- return (jlong)rss;
+ result = memory_max_usage;
+ return true;
}
-jlong CgroupV1MemoryController::cache_usage_in_bytes() {
- julong cache;
- bool is_ok = reader()->read_numerical_key_value("/memory.stat", "cache", &cache);
- if (!is_ok) {
- return OSCONTAINER_ERROR;
+bool CgroupV1MemoryController::rss_usage_in_bytes(physical_memory_size_type& result) {
+ physical_memory_size_type rss = 0;
+
+ if (!reader()->read_numerical_key_value("/memory.stat", "rss", rss)) {
+ return false;
}
- log_trace(os, container)("Cache usage is: " JULONG_FORMAT, cache);
- return cache;
+ log_trace(os, container)("RSS usage is: " PHYS_MEM_TYPE_FORMAT, rss);
+ result = rss;
+ return true;
}
-jlong CgroupV1MemoryController::kernel_memory_usage_in_bytes() {
- julong kmem_usage;
- CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.kmem.usage_in_bytes", "Kernel Memory Usage", kmem_usage);
- return (jlong)kmem_usage;
+bool CgroupV1MemoryController::cache_usage_in_bytes(physical_memory_size_type& result) {
+ physical_memory_size_type cache = 0;
+ if (!reader()->read_numerical_key_value("/memory.stat", "cache", cache)) {
+ return false;
+ }
+ log_trace(os, container)("Cache usage is: " PHYS_MEM_TYPE_FORMAT, cache);
+ result = cache;
+ return true;
}
-jlong CgroupV1MemoryController::kernel_memory_limit_in_bytes(julong upper_bound) {
- julong kmem_limit;
- CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.kmem.limit_in_bytes", "Kernel Memory Limit", kmem_limit);
+bool CgroupV1MemoryController::kernel_memory_usage_val(physical_memory_size_type& result) {
+ CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.kmem.usage_in_bytes", "Kernel Memory Usage", result);
+}
+
+bool CgroupV1MemoryController::kernel_memory_usage_in_bytes(physical_memory_size_type& result) {
+ physical_memory_size_type kmem_usage = 0;
+ if (!kernel_memory_usage_val(kmem_usage)) {
+ return false;
+ }
+ result = kmem_usage;
+ return true;
+}
+
+bool CgroupV1MemoryController::kernel_memory_limit_val(physical_memory_size_type& result) {
+ CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.kmem.limit_in_bytes", "Kernel Memory Limit", result);
+}
+
+bool CgroupV1MemoryController::kernel_memory_limit_in_bytes(physical_memory_size_type upper_bound,
+ physical_memory_size_type& result) {
+ physical_memory_size_type kmem_limit = 0;
+ if (!kernel_memory_limit_val(kmem_limit)) {
+ return false;
+ }
if (kmem_limit >= upper_bound) {
- return (jlong)-1;
+ kmem_limit = value_unlimited;
}
- return (jlong)kmem_limit;
+ result = kmem_limit;
+ return true;
}
-jlong CgroupV1MemoryController::kernel_memory_max_usage_in_bytes() {
- julong kmem_max_usage;
- CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.kmem.max_usage_in_bytes", "Maximum Kernel Memory Usage", kmem_max_usage);
- return (jlong)kmem_max_usage;
+bool CgroupV1MemoryController::kernel_memory_max_usage_val(physical_memory_size_type& result) {
+ CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.kmem.max_usage_in_bytes", "Maximum Kernel Memory Usage", result);
}
-void CgroupV1MemoryController::print_version_specific_info(outputStream* st, julong mem_bound) {
- jlong kmem_usage = kernel_memory_usage_in_bytes();
- jlong kmem_limit = kernel_memory_limit_in_bytes(mem_bound);
- jlong kmem_max_usage = kernel_memory_max_usage_in_bytes();
+bool CgroupV1MemoryController::kernel_memory_max_usage_in_bytes(physical_memory_size_type& result) {
+ physical_memory_size_type kmem_max_usage = 0;
+ if (!kernel_memory_max_usage_val(kmem_max_usage)) {
+ return false;
+ }
+ result = kmem_max_usage;
+ return true;
+}
+
+void CgroupV1MemoryController::print_version_specific_info(outputStream* st, physical_memory_size_type mem_bound) {
+ MetricResult kmem_usage;
+ physical_memory_size_type temp = 0;
+ if (kernel_memory_usage_in_bytes(temp)) {
+ kmem_usage.set_value(temp);
+ }
+ MetricResult kmem_limit;
+ temp = value_unlimited;
+ if (kernel_memory_limit_in_bytes(mem_bound, temp)) {
+ kmem_limit.set_value(temp);
+ }
+ MetricResult kmem_max_usage;
+ temp = 0;
+ if (kernel_memory_max_usage_in_bytes(temp)) {
+ kmem_max_usage.set_value(temp);
+ }
OSContainer::print_container_helper(st, kmem_limit, "kernel_memory_limit_in_bytes");
OSContainer::print_container_helper(st, kmem_usage, "kernel_memory_usage_in_bytes");
@@ -383,74 +487,114 @@ char* CgroupV1Subsystem::cpu_cpuset_memory_nodes() {
/* cpu_quota
*
* Return the number of microseconds per period
- * process is guaranteed to run.
+ * a process is guaranteed to run in the provided
+ * result reference.
*
* return:
- * quota time in microseconds
- * -1 for no quota
- * OSCONTAINER_ERROR for not supported
+ * true if the value was set in the result reference
+ * false on failure to read the number from the file
+ * and the result reference has not been touched.
*/
-int CgroupV1CpuController::cpu_quota() {
- julong quota;
- bool is_ok = reader()->read_number("/cpu.cfs_quota_us", "a);
- if (!is_ok) {
- log_trace(os, container)("CPU Quota failed: %d", OSCONTAINER_ERROR);
- return OSCONTAINER_ERROR;
+bool CgroupV1CpuController::cpu_quota(int& result) {
+ uint64_t quota = 0;
+
+ // intentionally not using the macro so as to not log a
+ // negative value as a large unsiged int
+ if (!reader()->read_number("/cpu.cfs_quota_us", quota)) {
+ log_trace(os, container)("CPU Quota failed");
+ return false;
}
// cast to int since the read value might be negative
// and we want to avoid logging -1 as a large unsigned value.
- int quota_int = (int)quota;
+ int quota_int = static_cast(quota);
log_trace(os, container)("CPU Quota is: %d", quota_int);
- return quota_int;
+ result = quota_int;
+ return true;
}
-int CgroupV1CpuController::cpu_period() {
- julong period;
- CONTAINER_READ_NUMBER_CHECKED(reader(), "/cpu.cfs_period_us", "CPU Period", period);
- return (int)period;
+bool CgroupV1CpuController::cpu_period_val(uint64_t& result) {
+ CONTAINER_READ_NUMBER_CHECKED(reader(), "/cpu.cfs_period_us", "CPU Period", result);
+}
+
+bool CgroupV1CpuController::cpu_period(int& result) {
+ uint64_t period = value_unlimited;
+ if (!cpu_period_val(period)) {
+ return false;
+ }
+ result = static_cast(period);
+ return true;
+}
+
+bool CgroupV1CpuController::cpu_shares_val(uint64_t& result) {
+ CONTAINER_READ_NUMBER_CHECKED(reader(), "/cpu.shares", "CPU Shares", result);
}
/* cpu_shares
*
* Return the amount of cpu shares available to the process
+ * - Share number (typically a number relative to 1024)
+ * - (2048 typically expresses 2 CPUs worth of processing)
*
* return:
- * Share number (typically a number relative to 1024)
- * (2048 typically expresses 2 CPUs worth of processing)
- * -1 for no share setup
- * OSCONTAINER_ERROR for not supported
+ * false on error
+ * true if the result has been set in the result reference
*/
-int CgroupV1CpuController::cpu_shares() {
- julong shares;
- CONTAINER_READ_NUMBER_CHECKED(reader(), "/cpu.shares", "CPU Shares", shares);
- int shares_int = (int)shares;
- // Convert 1024 to no shares setup
- if (shares_int == 1024) return -1;
+bool CgroupV1CpuController::cpu_shares(int& result) {
+ uint64_t shares = 0;
+ if (!cpu_shares_val(shares)) {
+ return false;
+ }
+ int shares_int = static_cast(shares);
+ // Convert 1024 to no shares setup (-1)
+ if (shares_int == 1024) {
+ shares_int = -1;
+ }
- return shares_int;
+ result = shares_int;
+ return true;
}
-jlong CgroupV1CpuacctController::cpu_usage_in_micros() {
- julong cpu_usage;
- CONTAINER_READ_NUMBER_CHECKED(reader(), "/cpuacct.usage", "CPU Usage", cpu_usage);
+bool CgroupV1CpuacctController::cpu_usage_in_micros_val(uint64_t& result) {
+ CONTAINER_READ_NUMBER_CHECKED(reader(), "/cpuacct.usage", "CPU Usage", result);
+}
+
+bool CgroupV1CpuacctController::cpu_usage_in_micros(uint64_t& result) {
+ uint64_t cpu_usage = 0;
+ if (!cpu_usage_in_micros_val(cpu_usage)) {
+ return false;
+ }
// Output is in nanoseconds, convert to microseconds.
- return (jlong)cpu_usage / 1000;
+ result = static_cast(cpu_usage / 1000);
+ return true;
+}
+
+static
+bool pids_max_val(CgroupController* ctrl, uint64_t& result) {
+ CONTAINER_READ_NUMBER_CHECKED_MAX(ctrl, "/pids.max", "Maximum number of tasks", result);
}
/* pids_max
*
* Return the maximum number of tasks available to the process
+ * in the passed result reference (might be value_unlimited).
*
* return:
- * maximum number of tasks
- * -1 for unlimited
- * OSCONTAINER_ERROR for not supported
+ * false on error
+ * true when the result reference has been appropriately set
*/
-jlong CgroupV1Subsystem::pids_max() {
- if (_pids == nullptr) return OSCONTAINER_ERROR;
- jlong pids_max;
- CONTAINER_READ_NUMBER_CHECKED_MAX(_pids, "/pids.max", "Maximum number of tasks", pids_max);
- return pids_max;
+bool CgroupV1Subsystem::pids_max(uint64_t& result) {
+ if (_pids == nullptr) return false;
+ uint64_t pids_val = 0;
+ if (!pids_max_val(_pids, pids_val)) {
+ return false;
+ }
+ result = pids_val;
+ return true;
+}
+
+static
+bool pids_current_val(CgroupController* ctrl, uint64_t& result) {
+ CONTAINER_READ_NUMBER_CHECKED(ctrl, "/pids.current", "Current number of tasks", result);
}
/* pids_current
@@ -458,12 +602,15 @@ jlong CgroupV1Subsystem::pids_max() {
* The number of tasks currently in the cgroup (and its descendants) of the process
*
* return:
- * current number of tasks
- * OSCONTAINER_ERROR for not supported
+ * true if the current number of tasks has been set in the result reference
+ * false if an error occurred
*/
-jlong CgroupV1Subsystem::pids_current() {
- if (_pids == nullptr) return OSCONTAINER_ERROR;
- julong pids_current;
- CONTAINER_READ_NUMBER_CHECKED(_pids, "/pids.current", "Current number of tasks", pids_current);
- return (jlong)pids_current;
+bool CgroupV1Subsystem::pids_current(uint64_t& result) {
+ if (_pids == nullptr) return false;
+ uint64_t pids_current = 0;
+ if (!pids_current_val(_pids, pids_current)) {
+ return false;
+ }
+ result = pids_current;
+ return true;
}
diff --git a/src/hotspot/os/linux/cgroupV1Subsystem_linux.hpp b/src/hotspot/os/linux/cgroupV1Subsystem_linux.hpp
index ce3184992e8..8aeb64ef18c 100644
--- a/src/hotspot/os/linux/cgroupV1Subsystem_linux.hpp
+++ b/src/hotspot/os/linux/cgroupV1Subsystem_linux.hpp
@@ -73,23 +73,44 @@ class CgroupV1MemoryController final : public CgroupMemoryController {
private:
CgroupV1Controller _reader;
CgroupV1Controller* reader() { return &_reader; }
+ bool read_memory_limit_val(physical_memory_size_type& result);
+ bool read_hierarchical_memory_limit_val(physical_memory_size_type& result);
+ bool read_hierarchical_mem_swap_val(physical_memory_size_type& result);
+ bool read_use_hierarchy_val(physical_memory_size_type& result);
+ bool memory_usage_val(physical_memory_size_type& result);
+ bool read_mem_swappiness(physical_memory_size_type& result);
+ bool read_mem_swap(physical_memory_size_type& result);
+ bool memory_soft_limit_val(physical_memory_size_type& result);
+ bool memory_max_usage_val(physical_memory_size_type& result);
+ bool kernel_memory_usage_val(physical_memory_size_type& result);
+ bool kernel_memory_limit_val(physical_memory_size_type& result);
+ bool kernel_memory_max_usage_val(physical_memory_size_type& result);
+ bool uses_mem_hierarchy();
+
public:
void set_subsystem_path(const char *cgroup_path) override {
reader()->set_subsystem_path(cgroup_path);
}
- jlong read_memory_limit_in_bytes(julong upper_bound) override;
- jlong memory_usage_in_bytes() override;
- jlong memory_and_swap_limit_in_bytes(julong upper_mem_bound, julong upper_swap_bound) override;
- jlong memory_and_swap_usage_in_bytes(julong upper_mem_bound, julong upper_swap_bound) override;
- jlong memory_soft_limit_in_bytes(julong upper_bound) override;
- jlong memory_throttle_limit_in_bytes() override;
- jlong memory_max_usage_in_bytes() override;
- jlong rss_usage_in_bytes() override;
- jlong cache_usage_in_bytes() override;
- jlong kernel_memory_usage_in_bytes();
- jlong kernel_memory_limit_in_bytes(julong upper_bound);
- jlong kernel_memory_max_usage_in_bytes();
- void print_version_specific_info(outputStream* st, julong upper_mem_bound) override;
+ bool read_memory_limit_in_bytes(physical_memory_size_type upper_bound,
+ physical_memory_size_type& value) override;
+ bool memory_usage_in_bytes(physical_memory_size_type& result) override;
+ bool memory_and_swap_limit_in_bytes(physical_memory_size_type upper_mem_bound,
+ physical_memory_size_type upper_swap_bound,
+ physical_memory_size_type& result) override;
+ bool memory_and_swap_usage_in_bytes(physical_memory_size_type upper_mem_bound,
+ physical_memory_size_type upper_swap_bound,
+ physical_memory_size_type& result) override;
+ bool memory_soft_limit_in_bytes(physical_memory_size_type upper_bound,
+ physical_memory_size_type& result) override;
+ bool memory_throttle_limit_in_bytes(physical_memory_size_type& result) override;
+ bool memory_max_usage_in_bytes(physical_memory_size_type& result) override;
+ bool rss_usage_in_bytes(physical_memory_size_type& result) override;
+ bool cache_usage_in_bytes(physical_memory_size_type& result) override;
+ bool kernel_memory_usage_in_bytes(physical_memory_size_type& result);
+ bool kernel_memory_limit_in_bytes(physical_memory_size_type upper_bound,
+ physical_memory_size_type& result);
+ bool kernel_memory_max_usage_in_bytes(physical_memory_size_type& result);
+ void print_version_specific_info(outputStream* st, physical_memory_size_type upper_mem_bound) override;
bool needs_hierarchy_adjustment() override {
return reader()->needs_hierarchy_adjustment();
}
@@ -99,10 +120,6 @@ class CgroupV1MemoryController final : public CgroupMemoryController {
const char* subsystem_path() override { return reader()->subsystem_path(); }
const char* mount_point() override { return reader()->mount_point(); }
const char* cgroup_path() override { return reader()->cgroup_path(); }
- private:
- jlong uses_mem_hierarchy();
- jlong read_mem_swappiness();
- jlong read_mem_swap(julong upper_memsw_bound);
public:
CgroupV1MemoryController(const CgroupV1Controller& reader)
@@ -116,10 +133,12 @@ class CgroupV1CpuController final : public CgroupCpuController {
private:
CgroupV1Controller _reader;
CgroupV1Controller* reader() { return &_reader; }
+ bool cpu_period_val(uint64_t& result);
+ bool cpu_shares_val(uint64_t& result);
public:
- int cpu_quota() override;
- int cpu_period() override;
- int cpu_shares() override;
+ bool cpu_quota(int& result) override;
+ bool cpu_period(int& result) override;
+ bool cpu_shares(int& result) override;
void set_subsystem_path(const char *cgroup_path) override {
reader()->set_subsystem_path(cgroup_path);
}
@@ -147,8 +166,9 @@ class CgroupV1CpuacctController final : public CgroupCpuacctController {
private:
CgroupV1Controller _reader;
CgroupV1Controller* reader() { return &_reader; }
+ bool cpu_usage_in_micros_val(uint64_t& result);
public:
- jlong cpu_usage_in_micros() override;
+ bool cpu_usage_in_micros(uint64_t& result) override;
void set_subsystem_path(const char *cgroup_path) override {
reader()->set_subsystem_path(cgroup_path);
}
@@ -180,15 +200,15 @@ class CgroupV1Subsystem: public CgroupSubsystem {
CgroupV1Controller* pids,
CgroupV1MemoryController* memory);
- jlong kernel_memory_usage_in_bytes();
- jlong kernel_memory_limit_in_bytes();
- jlong kernel_memory_max_usage_in_bytes();
+ bool kernel_memory_usage_in_bytes(physical_memory_size_type& result);
+ bool kernel_memory_limit_in_bytes(physical_memory_size_type& result);
+ bool kernel_memory_max_usage_in_bytes(physical_memory_size_type& result);
- char * cpu_cpuset_cpus();
- char * cpu_cpuset_memory_nodes();
+ char * cpu_cpuset_cpus() override;
+ char * cpu_cpuset_memory_nodes() override;
- jlong pids_max();
- jlong pids_current();
+ bool pids_max(uint64_t& result) override;
+ bool pids_current(uint64_t& result) override;
bool is_containerized();
const char * container_type() {
diff --git a/src/hotspot/os/linux/cgroupV2Subsystem_linux.cpp b/src/hotspot/os/linux/cgroupV2Subsystem_linux.cpp
index 38258a1f049..f435e53c02c 100644
--- a/src/hotspot/os/linux/cgroupV2Subsystem_linux.cpp
+++ b/src/hotspot/os/linux/cgroupV2Subsystem_linux.cpp
@@ -26,6 +26,8 @@
#include "cgroupUtil_linux.hpp"
#include "cgroupV2Subsystem_linux.hpp"
+#include
+
// Constructor
CgroupV2Controller::CgroupV2Controller(char* mount_path,
char *cgroup_path,
@@ -42,43 +44,72 @@ CgroupV2Controller::CgroupV2Controller(const CgroupV2Controller& o) :
_mount_point = o._mount_point;
}
+static
+bool read_cpu_shares_value(CgroupV2Controller* ctrl, uint64_t& value) {
+ CONTAINER_READ_NUMBER_CHECKED(ctrl, "/cpu.weight", "Raw value for CPU Shares", value);
+}
+
/* cpu_shares
*
- * Return the amount of cpu shares available to the process
+ * Return the amount of cpu shares available to the process in the
+ * 'result' reference.
*
- * return:
* Share number (typically a number relative to 1024)
* (2048 typically expresses 2 CPUs worth of processing)
- * -1 for no share setup
- * OSCONTAINER_ERROR for not supported
+ *
+ * return:
+ * true if the result reference got updated
+ * false if there was an error
*/
-int CgroupV2CpuController::cpu_shares() {
- julong shares;
- CONTAINER_READ_NUMBER_CHECKED(reader(), "/cpu.weight", "Raw value for CPU Shares", shares);
- int shares_int = (int)shares;
+bool CgroupV2CpuController::cpu_shares(int& result) {
+ uint64_t shares = 0;
+ bool is_ok = read_cpu_shares_value(reader(), shares);
+ if (!is_ok) {
+ return false;
+ }
+ int shares_int = static_cast(shares);
// Convert default value of 100 to no shares setup
if (shares_int == 100) {
- log_debug(os, container)("CPU Shares is: %d", -1);
- return -1;
+ log_debug(os, container)("CPU Shares is: unlimited");
+ result = -1;
+ return true;
}
+ // cg v2 values must be in range [1-10000]
+ assert(shares_int >= 1 && shares_int <= 10000, "invariant");
// CPU shares (OCI) value needs to get translated into
// a proper Cgroups v2 value. See:
- // https://github.com/containers/crun/blob/master/crun.1.md#cpu-controller
+ // https://github.com/containers/crun/blob/1.24/crun.1.md#cpu-controller
//
// Use the inverse of (x == OCI value, y == cgroupsv2 value):
- // ((262142 * y - 1)/9999) + 2 = x
+ // y = 10^(log2(x)^2/612 + 125/612 * log2(x) - 7.0/34.0)
//
- int x = 262142 * shares_int - 1;
- double frac = x/9999.0;
- x = ((int)frac) + 2;
+ // By re-arranging it to the standard quadratic form:
+ // log2(x)^2 + 125 * log2(x) - (126 + 612 * log_10(y)) = 0
+ //
+ // Therefore, log2(x) = (-125 + sqrt( 125^2 - 4 * (-(126 + 612 * log_10(y)))))/2
+ //
+ // As a result we have the inverse (we can discount substraction of the
+ // square root value since those values result in very small numbers and the
+ // cpu shares values - OCI - are in range [2,262144]):
+ //
+ // x = 2^((-125 + sqrt(16129 + 2448* log10(y)))/2)
+ //
+ double log_multiplicand = log10(shares_int);
+ double discriminant = 16129 + 2448 * log_multiplicand;
+ double square_root = sqrt(discriminant);
+ double exponent = (-125 + square_root)/2;
+ double scaled_val = pow(2, exponent);
+ int x = (int) scaled_val;
log_trace(os, container)("Scaled CPU shares value is: %d", x);
// Since the scaled value is not precise, return the closest
// multiple of PER_CPU_SHARES for a more conservative mapping
if ( x <= PER_CPU_SHARES ) {
- // will always map to 1 CPU
+ // Don't do the multiples of PER_CPU_SHARES mapping since we
+ // have a value <= PER_CPU_SHARES
log_debug(os, container)("CPU Shares is: %d", x);
- return x;
+ result = x;
+ return true;
}
int f = x/PER_CPU_SHARES;
int lower_multiple = f * PER_CPU_SHARES;
@@ -88,28 +119,33 @@ int CgroupV2CpuController::cpu_shares() {
x = distance_lower <= distance_upper ? lower_multiple : upper_multiple;
log_trace(os, container)("Closest multiple of %d of the CPU Shares value is: %d", PER_CPU_SHARES, x);
log_debug(os, container)("CPU Shares is: %d", x);
- return x;
+ result = x;
+ return true;
}
/* cpu_quota
*
* Return the number of microseconds per period
- * process is guaranteed to run.
+ * process is guaranteed to run in the passed in 'result' reference.
*
* return:
- * quota time in microseconds
- * -1 for no quota
- * OSCONTAINER_ERROR for not supported
+ * true if the result reference has been set
+ * false on error
*/
-int CgroupV2CpuController::cpu_quota() {
- jlong quota_val;
- bool is_ok = reader()->read_numerical_tuple_value("/cpu.max", true /* use_first */, "a_val);
- if (!is_ok) {
- return OSCONTAINER_ERROR;
+bool CgroupV2CpuController::cpu_quota(int& result) {
+ uint64_t quota_val = 0;
+ if (!reader()->read_numerical_tuple_value("/cpu.max", true /* use_first */, quota_val)) {
+ return false;
+ }
+ int limit = -1;
+ // The read first tuple value might be 'max' which maps
+ // to value_unlimited. Keep that at -1;
+ if (quota_val != value_unlimited) {
+ limit = static_cast(quota_val);
}
- int limit = (int)quota_val;
log_trace(os, container)("CPU Quota is: %d", limit);
- return limit;
+ result = limit;
+ return true;
}
// Constructor
@@ -143,80 +179,67 @@ char* CgroupV2Subsystem::cpu_cpuset_memory_nodes() {
return os::strdup(mems);
}
-int CgroupV2CpuController::cpu_period() {
- jlong period_val;
- bool is_ok = reader()->read_numerical_tuple_value("/cpu.max", false /* use_first */, &period_val);
- if (!is_ok) {
- log_trace(os, container)("CPU Period failed: %d", OSCONTAINER_ERROR);
- return OSCONTAINER_ERROR;
+bool CgroupV2CpuController::cpu_period(int& result) {
+ uint64_t cpu_period = 0;
+ if (!reader()->read_numerical_tuple_value("/cpu.max", false /* use_first */, cpu_period)) {
+ log_trace(os, container)("CPU Period failed");
+ return false;
}
- int period = (int)period_val;
- log_trace(os, container)("CPU Period is: %d", period);
- return period;
+ int period_int = static_cast(cpu_period);
+ log_trace(os, container)("CPU Period is: %d", period_int);
+ result = period_int;
+ return true;
}
-jlong CgroupV2CpuController::cpu_usage_in_micros() {
- julong cpu_usage;
- bool is_ok = reader()->read_numerical_key_value("/cpu.stat", "usage_usec", &cpu_usage);
+bool CgroupV2CpuController::cpu_usage_in_micros(uint64_t& value) {
+ bool is_ok = reader()->read_numerical_key_value("/cpu.stat", "usage_usec", value);
if (!is_ok) {
- log_trace(os, container)("CPU Usage failed: %d", OSCONTAINER_ERROR);
- return OSCONTAINER_ERROR;
+ log_trace(os, container)("CPU Usage failed");
+ return false;
}
- log_trace(os, container)("CPU Usage is: " JULONG_FORMAT, cpu_usage);
- return (jlong)cpu_usage;
+ log_trace(os, container)("CPU Usage is: " UINT64_FORMAT, value);
+ return true;
}
/* memory_usage_in_bytes
*
- * Return the amount of used memory used by this cgroup and descendents
+ * read the amount of used memory used by this cgroup and descendents
+ * into the passed in 'value' reference.
*
* return:
- * memory usage in bytes or
- * -1 for unlimited
- * OSCONTAINER_ERROR for not supported
+ * false on failure, true otherwise.
*/
-jlong CgroupV2MemoryController::memory_usage_in_bytes() {
- julong memusage;
- CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.current", "Memory Usage", memusage);
- return (jlong)memusage;
+bool CgroupV2MemoryController::memory_usage_in_bytes(physical_memory_size_type& value) {
+ CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.current", "Memory Usage", value);
}
-jlong CgroupV2MemoryController::memory_soft_limit_in_bytes(julong upper_bound) {
- jlong mem_soft_limit;
- CONTAINER_READ_NUMBER_CHECKED_MAX(reader(), "/memory.low", "Memory Soft Limit", mem_soft_limit);
- return mem_soft_limit;
+bool CgroupV2MemoryController::memory_soft_limit_in_bytes(physical_memory_size_type upper_bound,
+ physical_memory_size_type& value) {
+ CONTAINER_READ_NUMBER_CHECKED_MAX(reader(), "/memory.low", "Memory Soft Limit", value);
}
-jlong CgroupV2MemoryController::memory_throttle_limit_in_bytes() {
- jlong mem_throttle_limit;
- CONTAINER_READ_NUMBER_CHECKED_MAX(reader(), "/memory.high", "Memory Throttle Limit", mem_throttle_limit);
- return mem_throttle_limit;
+bool CgroupV2MemoryController::memory_throttle_limit_in_bytes(physical_memory_size_type& value) {
+ CONTAINER_READ_NUMBER_CHECKED_MAX(reader(), "/memory.high", "Memory Throttle Limit", value);
}
-jlong CgroupV2MemoryController::memory_max_usage_in_bytes() {
- julong mem_max_usage;
- CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.peak", "Maximum Memory Usage", mem_max_usage);
- return mem_max_usage;
+bool CgroupV2MemoryController::memory_max_usage_in_bytes(physical_memory_size_type& value) {
+ CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.peak", "Maximum Memory Usage", value);
}
-jlong CgroupV2MemoryController::rss_usage_in_bytes() {
- julong rss;
- bool is_ok = reader()->read_numerical_key_value("/memory.stat", "anon", &rss);
- if (!is_ok) {
- return OSCONTAINER_ERROR;
+bool CgroupV2MemoryController::rss_usage_in_bytes(physical_memory_size_type& value) {
+ if (!reader()->read_numerical_key_value("/memory.stat", "anon", value)) {
+ return false;
}
- log_trace(os, container)("RSS usage is: " JULONG_FORMAT, rss);
- return (jlong)rss;
+ log_trace(os, container)("RSS usage is: " PHYS_MEM_TYPE_FORMAT, value);
+ return true;
}
-jlong CgroupV2MemoryController::cache_usage_in_bytes() {
- julong cache;
- bool is_ok = reader()->read_numerical_key_value("/memory.stat", "file", &cache);
- if (!is_ok) {
- return OSCONTAINER_ERROR;
+bool CgroupV2MemoryController::cache_usage_in_bytes(physical_memory_size_type& value) {
+ if (!reader()->read_numerical_key_value("/memory.stat", "file", value)) {
+ return false;
}
- log_trace(os, container)("Cache usage is: " JULONG_FORMAT, cache);
- return (jlong)cache;
+ log_trace(os, container)("Cache usage is: " PHYS_MEM_TYPE_FORMAT, value);
+ return true;
}
// Note that for cgroups v2 the actual limits set for swap and
@@ -224,91 +247,108 @@ jlong CgroupV2MemoryController::cache_usage_in_bytes() {
// respectively. In order to properly report a cgroup v1 like
// compound value we need to sum the two values. Setting a swap limit
// without also setting a memory limit is not allowed.
-jlong CgroupV2MemoryController::memory_and_swap_limit_in_bytes(julong upper_mem_bound,
- julong upper_swap_bound /* unused in cg v2 */) {
- jlong swap_limit;
- bool is_ok = reader()->read_number_handle_max("/memory.swap.max", &swap_limit);
- if (!is_ok) {
+bool CgroupV2MemoryController::memory_and_swap_limit_in_bytes(physical_memory_size_type upper_mem_bound,
+ physical_memory_size_type upper_swap_bound, /* unused in cg v2 */
+ physical_memory_size_type& result) {
+ physical_memory_size_type swap_limit_val = 0;
+ if (!reader()->read_number_handle_max("/memory.swap.max", swap_limit_val)) {
// Some container tests rely on this trace logging to happen.
- log_trace(os, container)("Swap Limit failed: %d", OSCONTAINER_ERROR);
+ log_trace(os, container)("Swap Limit failed");
// swap disabled at kernel level, treat it as no swap
- return read_memory_limit_in_bytes(upper_mem_bound);
+ physical_memory_size_type mem_limit = value_unlimited;
+ if (!read_memory_limit_in_bytes(upper_mem_bound, mem_limit)) {
+ return false;
+ }
+ result = mem_limit;
+ return true;
}
- log_trace(os, container)("Swap Limit is: " JLONG_FORMAT, swap_limit);
- if (swap_limit >= 0) {
- jlong memory_limit = read_memory_limit_in_bytes(upper_mem_bound);
- assert(memory_limit >= 0, "swap limit without memory limit?");
- return memory_limit + swap_limit;
+ if (swap_limit_val == value_unlimited) {
+ log_trace(os, container)("Memory and Swap Limit is: Unlimited");
+ result = swap_limit_val;
+ return true;
+ }
+ log_trace(os, container)("Swap Limit is: " PHYS_MEM_TYPE_FORMAT, swap_limit_val);
+ physical_memory_size_type memory_limit = 0;
+ if (read_memory_limit_in_bytes(upper_mem_bound, memory_limit)) {
+ assert(memory_limit != value_unlimited, "swap limit without memory limit?");
+ result = memory_limit + swap_limit_val;
+ log_trace(os, container)("Memory and Swap Limit is: " PHYS_MEM_TYPE_FORMAT, result);
+ return true;
+ } else {
+ return false;
}
- log_trace(os, container)("Memory and Swap Limit is: " JLONG_FORMAT, swap_limit);
- return swap_limit;
}
// memory.swap.current : total amount of swap currently used by the cgroup and its descendants
static
-jlong memory_swap_current_value(CgroupV2Controller* ctrl) {
- julong swap_current;
- CONTAINER_READ_NUMBER_CHECKED(ctrl, "/memory.swap.current", "Swap currently used", swap_current);
- return (jlong)swap_current;
+bool memory_swap_current_value(CgroupV2Controller* ctrl, physical_memory_size_type& result) {
+ CONTAINER_READ_NUMBER_CHECKED(ctrl, "/memory.swap.current", "Swap currently used", result);
}
-jlong CgroupV2MemoryController::memory_and_swap_usage_in_bytes(julong upper_mem_bound, julong upper_swap_bound) {
- jlong memory_usage = memory_usage_in_bytes();
- if (memory_usage >= 0) {
- jlong swap_current = memory_swap_current_value(reader());
- return memory_usage + (swap_current >= 0 ? swap_current : 0);
+bool CgroupV2MemoryController::memory_and_swap_usage_in_bytes(physical_memory_size_type upper_mem_bound,
+ physical_memory_size_type upper_swap_bound,
+ physical_memory_size_type& result) {
+ physical_memory_size_type memory_usage = 0;
+ if (!memory_usage_in_bytes(memory_usage)) {
+ return false;
}
- return memory_usage; // not supported or unlimited case
+ physical_memory_size_type swap_current = 0;
+ if (!memory_swap_current_value(reader(), swap_current)) {
+ result = memory_usage; // treat as no swap usage
+ return true;
+ }
+ result = memory_usage + swap_current;
+ return true;
}
static
-jlong memory_limit_value(CgroupV2Controller* ctrl) {
- jlong memory_limit;
- CONTAINER_READ_NUMBER_CHECKED_MAX(ctrl, "/memory.max", "Memory Limit", memory_limit);
- return memory_limit;
+bool memory_limit_value(CgroupV2Controller* ctrl, physical_memory_size_type& result) {
+ CONTAINER_READ_NUMBER_CHECKED_MAX(ctrl, "/memory.max", "Memory Limit", result);
}
/* read_memory_limit_in_bytes
*
- * Return the limit of available memory for this process.
+ * Calculate the limit of available memory for this process. The result will be
+ * set in the 'result' variable if the function returns true.
*
* return:
- * memory limit in bytes or
- * -1 for unlimited, OSCONTAINER_ERROR for an error
+ * true when the limit could be read correctly.
+ * false in case of any error.
*/
-jlong CgroupV2MemoryController::read_memory_limit_in_bytes(julong upper_bound) {
- jlong limit = memory_limit_value(reader());
+bool CgroupV2MemoryController::read_memory_limit_in_bytes(physical_memory_size_type upper_bound,
+ physical_memory_size_type& result) {
+ physical_memory_size_type limit = 0; // default unlimited
+ if (!memory_limit_value(reader(), limit)) {
+ log_trace(os, container)("container memory limit failed, using host value " PHYS_MEM_TYPE_FORMAT,
+ upper_bound);
+ return false;
+ }
+ bool is_unlimited = limit == value_unlimited;
+ bool exceeds_physical_mem = false;
+ if (!is_unlimited && limit >= upper_bound) {
+ exceeds_physical_mem = true;
+ }
if (log_is_enabled(Trace, os, container)) {
- if (limit == -1) {
- log_trace(os, container)("Memory Limit is: Unlimited");
- } else {
- log_trace(os, container)("Memory Limit is: " JLONG_FORMAT, limit);
+ if (!is_unlimited) {
+ log_trace(os, container)("Memory Limit is: " PHYS_MEM_TYPE_FORMAT, limit);
}
- }
- if (log_is_enabled(Debug, os, container)) {
- julong read_limit = (julong)limit; // avoid signed/unsigned compare
- if (limit < 0 || read_limit >= upper_bound) {
- const char* reason;
- if (limit == -1) {
- reason = "unlimited";
- } else if (limit == OSCONTAINER_ERROR) {
- reason = "failed";
+ if (is_unlimited || exceeds_physical_mem) {
+ if (is_unlimited) {
+ log_trace(os, container)("Memory Limit is: Unlimited");
+ log_trace(os, container)("container memory limit unlimited, using upper bound value " PHYS_MEM_TYPE_FORMAT, upper_bound);
} else {
- assert(read_limit >= upper_bound, "Expected mem limit to exceed upper memory bound");
- reason = "ignored";
+ log_trace(os, container)("container memory limit ignored: " PHYS_MEM_TYPE_FORMAT ", upper bound is " PHYS_MEM_TYPE_FORMAT,
+ limit, upper_bound);
}
- log_debug(os, container)("container memory limit %s: " JLONG_FORMAT ", upper bound is " JLONG_FORMAT,
- reason, limit, upper_bound);
}
}
- return limit;
+ result = limit;
+ return true;
}
static
-jlong memory_swap_limit_value(CgroupV2Controller* ctrl) {
- jlong swap_limit;
- CONTAINER_READ_NUMBER_CHECKED_MAX(ctrl, "/memory.swap.max", "Swap Limit", swap_limit);
- return swap_limit;
+bool memory_swap_limit_value(CgroupV2Controller* ctrl, physical_memory_size_type& value) {
+ CONTAINER_READ_NUMBER_CHECKED_MAX(ctrl, "/memory.swap.max", "Swap Limit", value);
}
void CgroupV2Controller::set_subsystem_path(const char* cgroup_path) {
@@ -327,10 +367,17 @@ bool CgroupV2Controller::needs_hierarchy_adjustment() {
return strcmp(_cgroup_path, "/") != 0;
}
-void CgroupV2MemoryController::print_version_specific_info(outputStream* st, julong upper_mem_bound) {
- jlong swap_current = memory_swap_current_value(reader());
- jlong swap_limit = memory_swap_limit_value(reader());
-
+void CgroupV2MemoryController::print_version_specific_info(outputStream* st, physical_memory_size_type upper_mem_bound) {
+ MetricResult swap_current;
+ physical_memory_size_type swap_current_val = 0;
+ if (memory_swap_current_value(reader(), swap_current_val)) {
+ swap_current.set_value(swap_current_val);
+ }
+ MetricResult swap_limit;
+ physical_memory_size_type swap_limit_val = 0;
+ if (memory_swap_limit_value(reader(), swap_limit_val)) {
+ swap_limit.set_value(swap_limit_val);
+ }
OSContainer::print_container_helper(st, swap_current, "memory_swap_current_in_bytes");
OSContainer::print_container_helper(st, swap_limit, "memory_swap_max_limit_in_bytes");
}
@@ -346,29 +393,27 @@ char* CgroupV2Controller::construct_path(char* mount_path, const char* cgroup_pa
/* pids_max
*
- * Return the maximum number of tasks available to the process
+ * Calculate the maximum number of tasks available to the process. Set the
+ * value in the passed in 'value' reference. The value might be 'value_unlimited' when
+ * there is no limit.
*
* return:
- * maximum number of tasks
- * -1 for unlimited
- * OSCONTAINER_ERROR for not supported
+ * true if the value has been set appropriately
+ * false if there was an error
*/
-jlong CgroupV2Subsystem::pids_max() {
- jlong pids_max;
- CONTAINER_READ_NUMBER_CHECKED_MAX(unified(), "/pids.max", "Maximum number of tasks", pids_max);
- return pids_max;
+bool CgroupV2Subsystem::pids_max(uint64_t& value) {
+ CONTAINER_READ_NUMBER_CHECKED_MAX(unified(), "/pids.max", "Maximum number of tasks", value);
}
/* pids_current
*
- * The number of tasks currently in the cgroup (and its descendants) of the process
+ * The number of tasks currently in the cgroup (and its descendants) of the process. Set
+ * in the passed in 'value' reference.
*
* return:
- * current number of tasks
- * OSCONTAINER_ERROR for not supported
+ * true on success
+ * false when there was an error
*/
-jlong CgroupV2Subsystem::pids_current() {
- julong pids_current;
- CONTAINER_READ_NUMBER_CHECKED(unified(), "/pids.current", "Current number of tasks", pids_current);
- return pids_current;
+bool CgroupV2Subsystem::pids_current(uint64_t& value) {
+ CONTAINER_READ_NUMBER_CHECKED(unified(), "/pids.current", "Current number of tasks", value);
}
diff --git a/src/hotspot/os/linux/cgroupV2Subsystem_linux.hpp b/src/hotspot/os/linux/cgroupV2Subsystem_linux.hpp
index 07db126ce90..39a4fabe9f6 100644
--- a/src/hotspot/os/linux/cgroupV2Subsystem_linux.hpp
+++ b/src/hotspot/os/linux/cgroupV2Subsystem_linux.hpp
@@ -59,10 +59,10 @@ class CgroupV2CpuController: public CgroupCpuController {
public:
CgroupV2CpuController(const CgroupV2Controller& reader) : _reader(reader) {
}
- int cpu_quota() override;
- int cpu_period() override;
- int cpu_shares() override;
- jlong cpu_usage_in_micros();
+ bool cpu_quota(int& value) override;
+ bool cpu_period(int& value) override;
+ bool cpu_shares(int& value) override;
+ bool cpu_usage_in_micros(uint64_t& value);
bool is_read_only() override {
return reader()->is_read_only();
}
@@ -87,8 +87,8 @@ class CgroupV2CpuacctController: public CgroupCpuacctController {
CgroupV2CpuacctController(CgroupV2CpuController* reader) : _reader(reader) {
}
// In cgroup v2, cpu usage is a part of the cpu controller.
- jlong cpu_usage_in_micros() override {
- return reader()->cpu_usage_in_micros();
+ bool cpu_usage_in_micros(uint64_t& result) override {
+ return reader()->cpu_usage_in_micros(result);
}
bool is_read_only() override {
return reader()->is_read_only();
@@ -110,20 +110,27 @@ class CgroupV2MemoryController final: public CgroupMemoryController {
private:
CgroupV2Controller _reader;
CgroupV2Controller* reader() { return &_reader; }
+
public:
CgroupV2MemoryController(const CgroupV2Controller& reader) : _reader(reader) {
}
- jlong read_memory_limit_in_bytes(julong upper_bound) override;
- jlong memory_and_swap_limit_in_bytes(julong upper_mem_bound, julong upper_swap_bound) override;
- jlong memory_and_swap_usage_in_bytes(julong upper_mem_bound, julong upper_swap_bound) override;
- jlong memory_soft_limit_in_bytes(julong upper_bound) override;
- jlong memory_throttle_limit_in_bytes() override;
- jlong memory_usage_in_bytes() override;
- jlong memory_max_usage_in_bytes() override;
- jlong rss_usage_in_bytes() override;
- jlong cache_usage_in_bytes() override;
- void print_version_specific_info(outputStream* st, julong upper_mem_bound) override;
+ bool read_memory_limit_in_bytes(physical_memory_size_type upper_bound,
+ physical_memory_size_type& result) override;
+ bool memory_and_swap_limit_in_bytes(physical_memory_size_type upper_mem_bound,
+ physical_memory_size_type upper_swap_bound,
+ physical_memory_size_type& result) override;
+ bool memory_and_swap_usage_in_bytes(physical_memory_size_type upper_mem_bound,
+ physical_memory_size_type upper_swap_bound,
+ physical_memory_size_type& result) override;
+ bool memory_soft_limit_in_bytes(physical_memory_size_type upper_bound,
+ physical_memory_size_type& result) override;
+ bool memory_throttle_limit_in_bytes(physical_memory_size_type& result) override;
+ bool memory_usage_in_bytes(physical_memory_size_type& result) override;
+ bool memory_max_usage_in_bytes(physical_memory_size_type& result) override;
+ bool rss_usage_in_bytes(physical_memory_size_type& result) override;
+ bool cache_usage_in_bytes(physical_memory_size_type& result) override;
+ void print_version_specific_info(outputStream* st, physical_memory_size_type upper_mem_bound) override;
bool is_read_only() override {
return reader()->is_read_only();
}
@@ -160,8 +167,8 @@ class CgroupV2Subsystem: public CgroupSubsystem {
char * cpu_cpuset_cpus() override;
char * cpu_cpuset_memory_nodes() override;
- jlong pids_max() override;
- jlong pids_current() override;
+ bool pids_max(uint64_t& result) override;
+ bool pids_current(uint64_t& result) override;
bool is_containerized() override;
diff --git a/src/hotspot/os/linux/osContainer_linux.cpp b/src/hotspot/os/linux/osContainer_linux.cpp
index 561f2d4926c..d86bbf7428a 100644
--- a/src/hotspot/os/linux/osContainer_linux.cpp
+++ b/src/hotspot/os/linux/osContainer_linux.cpp
@@ -84,8 +84,12 @@ void OSContainer::init() {
// We can be in one of two cases:
// 1.) On a physical Linux system without any limit
// 2.) On a physical Linux system with a limit enforced by other means (like systemd slice)
- any_mem_cpu_limit_present = memory_limit_in_bytes() > 0 ||
- os::Linux::active_processor_count() != active_processor_count();
+ physical_memory_size_type mem_limit_val = value_unlimited;
+ (void)memory_limit_in_bytes(mem_limit_val); // discard error and use default
+ int host_cpus = os::Linux::active_processor_count();
+ int cpus = host_cpus;
+ (void)active_processor_count(cpus); // discard error and use default
+ any_mem_cpu_limit_present = mem_limit_val != value_unlimited || host_cpus != cpus;
if (any_mem_cpu_limit_present) {
reason = " because either a cpu or a memory limit is present";
} else {
@@ -103,77 +107,138 @@ const char * OSContainer::container_type() {
return cgroup_subsystem->container_type();
}
-bool OSContainer::available_memory_in_container(julong& value) {
- jlong mem_limit = memory_limit_in_bytes();
- jlong mem_usage = memory_usage_in_bytes();
+bool OSContainer::memory_limit_in_bytes(physical_memory_size_type& value) {
+ assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
+ physical_memory_size_type phys_mem = os::Linux::physical_memory();
+ return cgroup_subsystem->memory_limit_in_bytes(phys_mem, value);
+}
- if (mem_limit > 0 && mem_usage <= 0) {
- log_debug(os, container)("container memory usage failed: " JLONG_FORMAT, mem_usage);
+bool OSContainer::available_memory_in_bytes(physical_memory_size_type& value) {
+ physical_memory_size_type mem_limit = value_unlimited;
+ physical_memory_size_type mem_usage = 0;
+ if (memory_limit_in_bytes(mem_limit) && memory_usage_in_bytes(mem_usage)) {
+ assert(mem_usage != value_unlimited, "invariant");
+ if (mem_limit != value_unlimited) {
+ value = (mem_limit > mem_usage) ? mem_limit - mem_usage : 0;
+ return true;
+ }
}
+ log_trace(os, container)("calculating available memory in container failed");
+ return false;
+}
- if (mem_limit <= 0 || mem_usage <= 0) {
+bool OSContainer::available_swap_in_bytes(physical_memory_size_type host_free_swap,
+ physical_memory_size_type& value) {
+ physical_memory_size_type mem_limit = 0;
+ physical_memory_size_type mem_swap_limit = 0;
+ if (memory_limit_in_bytes(mem_limit) &&
+ memory_and_swap_limit_in_bytes(mem_swap_limit) &&
+ mem_limit != value_unlimited &&
+ mem_swap_limit != value_unlimited) {
+ if (mem_limit >= mem_swap_limit) {
+ value = 0; // no swap, thus no free swap
+ return true;
+ }
+ physical_memory_size_type swap_limit = mem_swap_limit - mem_limit;
+ physical_memory_size_type mem_swap_usage = 0;
+ physical_memory_size_type mem_usage = 0;
+ if (memory_and_swap_usage_in_bytes(mem_swap_usage) &&
+ memory_usage_in_bytes(mem_usage)) {
+ physical_memory_size_type swap_usage = value_unlimited;
+ if (mem_usage > mem_swap_usage) {
+ swap_usage = 0; // delta usage must not be negative
+ } else {
+ swap_usage = mem_swap_usage - mem_usage;
+ }
+ // free swap is based on swap limit (upper bound) and swap usage
+ if (swap_usage >= swap_limit) {
+ value = 0; // free swap must not be negative
+ return true;
+ }
+ value = swap_limit - swap_usage;
+ return true;
+ }
+ }
+ // unlimited or not supported. Leave an appropriate trace message
+ if (log_is_enabled(Trace, os, container)) {
+ char mem_swap_buf[25]; // uint64_t => 20 + 1, 'unlimited' => 9 + 1; 10 < 21 < 25
+ char mem_limit_buf[25];
+ int num = 0;
+ if (mem_swap_limit == value_unlimited) {
+ num = os::snprintf(mem_swap_buf, sizeof(mem_swap_buf), "%s", "unlimited");
+ } else {
+ num = os::snprintf(mem_swap_buf, sizeof(mem_swap_buf), PHYS_MEM_TYPE_FORMAT, mem_swap_limit);
+ }
+ assert(num < 25, "buffer too small");
+ mem_swap_buf[num] = '\0';
+ if (mem_limit == value_unlimited) {
+ num = os::snprintf(mem_limit_buf, sizeof(mem_limit_buf), "%s", "unlimited");
+ } else {
+ num = os::snprintf(mem_limit_buf, sizeof(mem_limit_buf), PHYS_MEM_TYPE_FORMAT, mem_limit);
+ }
+ assert(num < 25, "buffer too small");
+ mem_limit_buf[num] = '\0';
+ log_trace(os,container)("OSContainer::available_swap_in_bytes: container_swap_limit=%s"
+ " container_mem_limit=%s, host_free_swap: " PHYS_MEM_TYPE_FORMAT,
+ mem_swap_buf, mem_limit_buf, host_free_swap);
+ }
+ return false;
+}
+
+bool OSContainer::memory_and_swap_limit_in_bytes(physical_memory_size_type& value) {
+ assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
+ physical_memory_size_type phys_mem = os::Linux::physical_memory();
+ physical_memory_size_type host_swap = 0;
+ if (!os::Linux::host_swap(host_swap)) {
return false;
}
-
- value = mem_limit > mem_usage ? static_cast(mem_limit - mem_usage) : 0;
-
- return true;
+ return cgroup_subsystem->memory_and_swap_limit_in_bytes(phys_mem, host_swap, value);
}
-jlong OSContainer::memory_limit_in_bytes() {
+bool OSContainer::memory_and_swap_usage_in_bytes(physical_memory_size_type& value) {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
- julong phys_mem = static_cast(os::Linux::physical_memory());
- return cgroup_subsystem->memory_limit_in_bytes(phys_mem);
+ physical_memory_size_type phys_mem = os::Linux::physical_memory();
+ physical_memory_size_type host_swap = 0;
+ if (!os::Linux::host_swap(host_swap)) {
+ return false;
+ }
+ return cgroup_subsystem->memory_and_swap_usage_in_bytes(phys_mem, host_swap, value);
}
-jlong OSContainer::memory_and_swap_limit_in_bytes() {
+bool OSContainer::memory_soft_limit_in_bytes(physical_memory_size_type& value) {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
- julong phys_mem = static_cast(os::Linux::physical_memory());
- julong host_swap = os::Linux::host_swap();
- return cgroup_subsystem->memory_and_swap_limit_in_bytes(phys_mem, host_swap);
+ physical_memory_size_type phys_mem = os::Linux::physical_memory();
+ return cgroup_subsystem->memory_soft_limit_in_bytes(phys_mem, value);
}
-jlong OSContainer::memory_and_swap_usage_in_bytes() {
+bool OSContainer::memory_throttle_limit_in_bytes(physical_memory_size_type& value) {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
- julong phys_mem = static_cast(os::Linux::physical_memory());
- julong host_swap = os::Linux::host_swap();
- return cgroup_subsystem->memory_and_swap_usage_in_bytes(phys_mem, host_swap);
+ return cgroup_subsystem->memory_throttle_limit_in_bytes(value);
}
-jlong OSContainer::memory_soft_limit_in_bytes() {
+bool OSContainer::memory_usage_in_bytes(physical_memory_size_type& value) {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
- julong phys_mem = static_cast(os::Linux::physical_memory());
- return cgroup_subsystem->memory_soft_limit_in_bytes(phys_mem);
+ return cgroup_subsystem->memory_usage_in_bytes(value);
}
-jlong OSContainer::memory_throttle_limit_in_bytes() {
+bool OSContainer::memory_max_usage_in_bytes(physical_memory_size_type& value) {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
- return cgroup_subsystem->memory_throttle_limit_in_bytes();
+ return cgroup_subsystem->memory_max_usage_in_bytes(value);
}
-jlong OSContainer::memory_usage_in_bytes() {
+bool OSContainer::rss_usage_in_bytes(physical_memory_size_type& value) {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
- return cgroup_subsystem->memory_usage_in_bytes();
+ return cgroup_subsystem->rss_usage_in_bytes(value);
}
-jlong OSContainer::memory_max_usage_in_bytes() {
+bool OSContainer::cache_usage_in_bytes(physical_memory_size_type& value) {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
- return cgroup_subsystem->memory_max_usage_in_bytes();
-}
-
-jlong OSContainer::rss_usage_in_bytes() {
- assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
- return cgroup_subsystem->rss_usage_in_bytes();
-}
-
-jlong OSContainer::cache_usage_in_bytes() {
- assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
- return cgroup_subsystem->cache_usage_in_bytes();
+ return cgroup_subsystem->cache_usage_in_bytes(value);
}
void OSContainer::print_version_specific_info(outputStream* st) {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
- julong phys_mem = static_cast(os::Linux::physical_memory());
+ physical_memory_size_type phys_mem = os::Linux::physical_memory();
cgroup_subsystem->print_version_specific_info(st, phys_mem);
}
@@ -187,50 +252,55 @@ char * OSContainer::cpu_cpuset_memory_nodes() {
return cgroup_subsystem->cpu_cpuset_memory_nodes();
}
-int OSContainer::active_processor_count() {
+bool OSContainer::active_processor_count(int& value) {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
- return cgroup_subsystem->active_processor_count();
+ return cgroup_subsystem->active_processor_count(value);
}
-int OSContainer::cpu_quota() {
+bool OSContainer::cpu_quota(int& value) {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
- return cgroup_subsystem->cpu_quota();
+ return cgroup_subsystem->cpu_quota(value);
}
-int OSContainer::cpu_period() {
+bool OSContainer::cpu_period(int& value) {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
- return cgroup_subsystem->cpu_period();
+ return cgroup_subsystem->cpu_period(value);
}
-int OSContainer::cpu_shares() {
+bool OSContainer::cpu_shares(int& value) {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
- return cgroup_subsystem->cpu_shares();
+ return cgroup_subsystem->cpu_shares(value);
}
-jlong OSContainer::cpu_usage_in_micros() {
+bool OSContainer::cpu_usage_in_micros(uint64_t& value) {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
- return cgroup_subsystem->cpu_usage_in_micros();
+ return cgroup_subsystem->cpu_usage_in_micros(value);
}
-jlong OSContainer::pids_max() {
+bool OSContainer::pids_max(uint64_t& value) {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
- return cgroup_subsystem->pids_max();
+ return cgroup_subsystem->pids_max(value);
}
-jlong OSContainer::pids_current() {
+bool OSContainer::pids_current(uint64_t& value) {
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
- return cgroup_subsystem->pids_current();
+ return cgroup_subsystem->pids_current(value);
}
-void OSContainer::print_container_helper(outputStream* st, jlong j, const char* metrics) {
+void OSContainer::print_container_helper(outputStream* st, MetricResult& res, const char* metrics) {
st->print("%s: ", metrics);
- if (j >= 0) {
- if (j >= 1024) {
- st->print_cr(UINT64_FORMAT " k", uint64_t(j) / K);
+ if (res.success()) {
+ if (res.value() != value_unlimited) {
+ if (res.value() >= 1024) {
+ st->print_cr(PHYS_MEM_TYPE_FORMAT " k", (physical_memory_size_type)(res.value() / K));
+ } else {
+ st->print_cr(PHYS_MEM_TYPE_FORMAT, res.value());
+ }
} else {
- st->print_cr(UINT64_FORMAT, uint64_t(j));
+ st->print_cr("%s", "unlimited");
}
} else {
- st->print_cr("%s", j == OSCONTAINER_ERROR ? "not supported" : "unlimited");
+ // Not supported
+ st->print_cr("%s", "unavailable");
}
}
diff --git a/src/hotspot/os/linux/osContainer_linux.hpp b/src/hotspot/os/linux/osContainer_linux.hpp
index 6258714c48b..895c99ba167 100644
--- a/src/hotspot/os/linux/osContainer_linux.hpp
+++ b/src/hotspot/os/linux/osContainer_linux.hpp
@@ -30,11 +30,30 @@
#include "utilities/macros.hpp"
#include "utilities/ostream.hpp"
-#define OSCONTAINER_ERROR (-2)
+// Some cgroup interface files define the value 'max' for unlimited.
+// Define this constant value to indicate this value.
+const uint64_t value_unlimited = std::numeric_limits::max();
// 20ms timeout between re-reads of memory limit and _active_processor_count.
#define OSCONTAINER_CACHE_TIMEOUT (NANOSECS_PER_SEC/50)
+// Carrier object for print_container_helper()
+class MetricResult: public StackObj {
+ private:
+ static const uint64_t value_unused = 0;
+ bool _success = false;
+ physical_memory_size_type _value = value_unused;
+ public:
+ void set_value(physical_memory_size_type val) {
+ // having a value means success
+ _success = true;
+ _value = val;
+ }
+
+ bool success() { return _success; }
+ physical_memory_size_type value() { return _value; }
+};
+
class OSContainer: AllStatic {
private:
@@ -45,36 +64,38 @@ class OSContainer: AllStatic {
public:
static void init();
static void print_version_specific_info(outputStream* st);
- static void print_container_helper(outputStream* st, jlong j, const char* metrics);
+ static void print_container_helper(outputStream* st, MetricResult& res, const char* metrics);
static inline bool is_containerized();
static const char * container_type();
- static bool available_memory_in_container(julong& value);
- static jlong memory_limit_in_bytes();
- static jlong memory_and_swap_limit_in_bytes();
- static jlong memory_and_swap_usage_in_bytes();
- static jlong memory_soft_limit_in_bytes();
- static jlong memory_throttle_limit_in_bytes();
- static jlong memory_usage_in_bytes();
- static jlong memory_max_usage_in_bytes();
- static jlong rss_usage_in_bytes();
- static jlong cache_usage_in_bytes();
+ static bool available_memory_in_bytes(physical_memory_size_type& value);
+ static bool available_swap_in_bytes(physical_memory_size_type host_free_swap,
+ physical_memory_size_type& value);
+ static bool memory_limit_in_bytes(physical_memory_size_type& value);
+ static bool memory_and_swap_limit_in_bytes(physical_memory_size_type& value);
+ static bool memory_and_swap_usage_in_bytes(physical_memory_size_type& value);
+ static bool memory_soft_limit_in_bytes(physical_memory_size_type& value);
+ static bool memory_throttle_limit_in_bytes(physical_memory_size_type& value);
+ static bool memory_usage_in_bytes(physical_memory_size_type& value);
+ static bool memory_max_usage_in_bytes(physical_memory_size_type& value);
+ static bool rss_usage_in_bytes(physical_memory_size_type& value);
+ static bool cache_usage_in_bytes(physical_memory_size_type& value);
- static int active_processor_count();
+ static bool active_processor_count(int& value);
static char * cpu_cpuset_cpus();
static char * cpu_cpuset_memory_nodes();
- static int cpu_quota();
- static int cpu_period();
+ static bool cpu_quota(int& value);
+ static bool cpu_period(int& value);
- static int cpu_shares();
+ static bool cpu_shares(int& value);
- static jlong cpu_usage_in_micros();
+ static bool cpu_usage_in_micros(uint64_t& value);
- static jlong pids_max();
- static jlong pids_current();
+ static bool pids_max(uint64_t& value);
+ static bool pids_current(uint64_t& value);
};
inline bool OSContainer::is_containerized() {
diff --git a/src/hotspot/os/linux/os_linux.cpp b/src/hotspot/os/linux/os_linux.cpp
index 69ef8ce7c33..a345663dd5b 100644
--- a/src/hotspot/os/linux/os_linux.cpp
+++ b/src/hotspot/os/linux/os_linux.cpp
@@ -214,10 +214,8 @@ static bool suppress_primordial_thread_resolution = false;
// utility functions
bool os::available_memory(physical_memory_size_type& value) {
- julong avail_mem = 0;
- if (OSContainer::is_containerized() && OSContainer::available_memory_in_container(avail_mem)) {
- log_trace(os)("available container memory: " JULONG_FORMAT, avail_mem);
- value = static_cast(avail_mem);
+ if (OSContainer::is_containerized() && OSContainer::available_memory_in_bytes(value)) {
+ log_trace(os)("available container memory: " PHYS_MEM_TYPE_FORMAT, value);
return true;
}
@@ -225,36 +223,38 @@ bool os::available_memory(physical_memory_size_type& value) {
}
bool os::Linux::available_memory(physical_memory_size_type& value) {
- julong avail_mem = static_cast(-1L);
+ physical_memory_size_type avail_mem = 0;
+ bool found_available_mem = false;
FILE *fp = os::fopen("/proc/meminfo", "r");
if (fp != nullptr) {
char buf[80];
do {
- if (fscanf(fp, "MemAvailable: " JULONG_FORMAT " kB", &avail_mem) == 1) {
+ if (fscanf(fp, "MemAvailable: " PHYS_MEM_TYPE_FORMAT " kB", &avail_mem) == 1) {
avail_mem *= K;
+ found_available_mem = true;
break;
}
} while (fgets(buf, sizeof(buf), fp) != nullptr);
fclose(fp);
}
- if (avail_mem == static_cast(-1L)) {
+ // Only enter the free memory block if we
+ // haven't found the available memory
+ if (!found_available_mem) {
physical_memory_size_type free_mem = 0;
if (!free_memory(free_mem)) {
return false;
}
- avail_mem = static_cast(free_mem);
+ avail_mem = free_mem;
}
- log_trace(os)("available memory: " JULONG_FORMAT, avail_mem);
- value = static_cast(avail_mem);
+ log_trace(os)("available memory: " PHYS_MEM_TYPE_FORMAT, avail_mem);
+ value = avail_mem;
return true;
}
bool os::free_memory(physical_memory_size_type& value) {
- julong free_mem = 0;
- if (OSContainer::is_containerized() && OSContainer::available_memory_in_container(free_mem)) {
- log_trace(os)("free container memory: " JULONG_FORMAT, free_mem);
- value = static_cast(free_mem);
+ if (OSContainer::is_containerized() && OSContainer::available_memory_in_bytes(value)) {
+ log_trace(os)("free container memory: " PHYS_MEM_TYPE_FORMAT, value);
return true;
}
@@ -269,29 +269,26 @@ bool os::Linux::free_memory(physical_memory_size_type& value) {
if (ret != 0) {
return false;
}
- julong free_mem = (julong)si.freeram * si.mem_unit;
- log_trace(os)("free memory: " JULONG_FORMAT, free_mem);
- value = static_cast(free_mem);
+ physical_memory_size_type free_mem = (physical_memory_size_type)si.freeram * si.mem_unit;
+ log_trace(os)("free memory: " PHYS_MEM_TYPE_FORMAT, free_mem);
+ value = free_mem;
return true;
}
bool os::total_swap_space(physical_memory_size_type& value) {
if (OSContainer::is_containerized()) {
- jlong memory_and_swap_limit_in_bytes = OSContainer::memory_and_swap_limit_in_bytes();
- jlong memory_limit_in_bytes = OSContainer::memory_limit_in_bytes();
- if (memory_limit_in_bytes > 0 && memory_and_swap_limit_in_bytes > 0) {
- value = static_cast(memory_and_swap_limit_in_bytes - memory_limit_in_bytes);
- return true;
+ physical_memory_size_type mem_swap_limit = value_unlimited;
+ physical_memory_size_type memory_limit = value_unlimited;
+ if (OSContainer::memory_and_swap_limit_in_bytes(mem_swap_limit) &&
+ OSContainer::memory_limit_in_bytes(memory_limit)) {
+ if (memory_limit != value_unlimited && mem_swap_limit != value_unlimited &&
+ mem_swap_limit >= memory_limit /* ensure swap is >= 0 */) {
+ value = mem_swap_limit - memory_limit;
+ return true;
+ }
}
- } // fallback to the host swap space if the container did return the unbound value of -1
- struct sysinfo si;
- int ret = sysinfo(&si);
- if (ret != 0) {
- assert(false, "sysinfo failed in total_swap_space(): %s", os::strerror(errno));
- return false;
- }
- value = static_cast(si.totalswap) * si.mem_unit;
- return true;
+ } // fallback to the host swap space if the container returned unlimited
+ return Linux::host_swap(value);
}
static bool host_free_swap_f(physical_memory_size_type& value) {
@@ -315,29 +312,12 @@ bool os::free_swap_space(physical_memory_size_type& value) {
}
physical_memory_size_type host_free_swap_val = MIN2(total_swap_space, host_free_swap);
if (OSContainer::is_containerized()) {
- jlong mem_swap_limit = OSContainer::memory_and_swap_limit_in_bytes();
- jlong mem_limit = OSContainer::memory_limit_in_bytes();
- if (mem_swap_limit >= 0 && mem_limit >= 0) {
- jlong delta_limit = mem_swap_limit - mem_limit;
- if (delta_limit <= 0) {
- value = 0;
- return true;
- }
- jlong mem_swap_usage = OSContainer::memory_and_swap_usage_in_bytes();
- jlong mem_usage = OSContainer::memory_usage_in_bytes();
- if (mem_swap_usage > 0 && mem_usage > 0) {
- jlong delta_usage = mem_swap_usage - mem_usage;
- if (delta_usage >= 0) {
- jlong free_swap = delta_limit - delta_usage;
- value = free_swap >= 0 ? static_cast(free_swap) : 0;
- return true;
- }
- }
+ if (OSContainer::available_swap_in_bytes(host_free_swap_val, value)) {
+ return true;
}
- // unlimited or not supported. Fall through to return host value
- log_trace(os,container)("os::free_swap_space: container_swap_limit=" JLONG_FORMAT
- " container_mem_limit=" JLONG_FORMAT " returning host value: " PHYS_MEM_TYPE_FORMAT,
- mem_swap_limit, mem_limit, host_free_swap_val);
+ // Fall through to use host value
+ log_trace(os,container)("os::free_swap_space: containerized value unavailable"
+ " returning host value: " PHYS_MEM_TYPE_FORMAT, host_free_swap_val);
}
value = host_free_swap_val;
return true;
@@ -345,10 +325,10 @@ bool os::free_swap_space(physical_memory_size_type& value) {
physical_memory_size_type os::physical_memory() {
if (OSContainer::is_containerized()) {
- jlong mem_limit;
- if ((mem_limit = OSContainer::memory_limit_in_bytes()) > 0) {
- log_trace(os)("total container memory: " JLONG_FORMAT, mem_limit);
- return static_cast(mem_limit);
+ physical_memory_size_type mem_limit = value_unlimited;
+ if (OSContainer::memory_limit_in_bytes(mem_limit) && mem_limit != value_unlimited) {
+ log_trace(os)("total container memory: " PHYS_MEM_TYPE_FORMAT, mem_limit);
+ return mem_limit;
}
}
@@ -508,10 +488,15 @@ pid_t os::Linux::gettid() {
// Returns the amount of swap currently configured, in bytes.
// This can change at any time.
-julong os::Linux::host_swap() {
+bool os::Linux::host_swap(physical_memory_size_type& value) {
struct sysinfo si;
- sysinfo(&si);
- return (julong)(si.totalswap * si.mem_unit);
+ int ret = sysinfo(&si);
+ if (ret != 0) {
+ assert(false, "sysinfo failed in host_swap(): %s", os::strerror(errno));
+ return false;
+ }
+ value = static_cast(si.totalswap) * si.mem_unit;
+ return true;
}
// Most versions of linux have a bug where the number of processors are
@@ -2469,9 +2454,11 @@ bool os::Linux::print_container_info(outputStream* st) {
st->print_cr("cpu_memory_nodes: %s", p != nullptr ? p : "not supported");
free(p);
- int i = OSContainer::active_processor_count();
+ int i = -1;
+ bool supported = OSContainer::active_processor_count(i);
st->print("active_processor_count: ");
- if (i > 0) {
+ if (supported) {
+ assert(i > 0, "must be");
if (ActiveProcessorCount > 0) {
st->print_cr("%d, but overridden by -XX:ActiveProcessorCount %d", i, ActiveProcessorCount);
} else {
@@ -2481,65 +2468,105 @@ bool os::Linux::print_container_info(outputStream* st) {
st->print_cr("not supported");
}
- i = OSContainer::cpu_quota();
+
+ supported = OSContainer::cpu_quota(i);
st->print("cpu_quota: ");
- if (i > 0) {
+ if (supported && i > 0) {
st->print_cr("%d", i);
} else {
- st->print_cr("%s", i == OSCONTAINER_ERROR ? "not supported" : "no quota");
+ st->print_cr("%s", !supported ? "not supported" : "no quota");
}
- i = OSContainer::cpu_period();
+ supported = OSContainer::cpu_period(i);
st->print("cpu_period: ");
- if (i > 0) {
+ if (supported && i > 0) {
st->print_cr("%d", i);
} else {
- st->print_cr("%s", i == OSCONTAINER_ERROR ? "not supported" : "no period");
+ st->print_cr("%s", !supported ? "not supported" : "no period");
}
- i = OSContainer::cpu_shares();
+ supported = OSContainer::cpu_shares(i);
st->print("cpu_shares: ");
- if (i > 0) {
+ if (supported && i > 0) {
st->print_cr("%d", i);
} else {
- st->print_cr("%s", i == OSCONTAINER_ERROR ? "not supported" : "no shares");
+ st->print_cr("%s", !supported ? "not supported" : "no shares");
}
- jlong j = OSContainer::cpu_usage_in_micros();
+ uint64_t j = 0;
+ supported = OSContainer::cpu_usage_in_micros(j);
st->print("cpu_usage_in_micros: ");
- if (j >= 0) {
- st->print_cr(JLONG_FORMAT, j);
+ if (supported && j > 0) {
+ st->print_cr(UINT64_FORMAT, j);
} else {
- st->print_cr("%s", j == OSCONTAINER_ERROR ? "not supported" : "no usage");
+ st->print_cr("%s", !supported ? "not supported" : "no usage");
}
- OSContainer::print_container_helper(st, OSContainer::memory_limit_in_bytes(), "memory_limit_in_bytes");
- OSContainer::print_container_helper(st, OSContainer::memory_and_swap_limit_in_bytes(), "memory_and_swap_limit_in_bytes");
- OSContainer::print_container_helper(st, OSContainer::memory_soft_limit_in_bytes(), "memory_soft_limit_in_bytes");
- OSContainer::print_container_helper(st, OSContainer::memory_throttle_limit_in_bytes(), "memory_throttle_limit_in_bytes");
- OSContainer::print_container_helper(st, OSContainer::memory_usage_in_bytes(), "memory_usage_in_bytes");
- OSContainer::print_container_helper(st, OSContainer::memory_max_usage_in_bytes(), "memory_max_usage_in_bytes");
- OSContainer::print_container_helper(st, OSContainer::rss_usage_in_bytes(), "rss_usage_in_bytes");
- OSContainer::print_container_helper(st, OSContainer::cache_usage_in_bytes(), "cache_usage_in_bytes");
+ MetricResult memory_limit;
+ physical_memory_size_type val = value_unlimited;
+ if (OSContainer::memory_limit_in_bytes(val)) {
+ memory_limit.set_value(val);
+ }
+ MetricResult mem_swap_limit;
+ val = value_unlimited;
+ if (OSContainer::memory_and_swap_limit_in_bytes(val)) {
+ mem_swap_limit.set_value(val);
+ }
+ MetricResult mem_soft_limit;
+ val = value_unlimited;
+ if (OSContainer::memory_soft_limit_in_bytes(val)) {
+ mem_soft_limit.set_value(val);
+ }
+ MetricResult mem_throttle_limit;
+ val = value_unlimited;
+ if (OSContainer::memory_throttle_limit_in_bytes(val)) {
+ mem_throttle_limit.set_value(val);
+ }
+ MetricResult mem_usage;
+ val = 0;
+ if (OSContainer::memory_usage_in_bytes(val)) {
+ mem_usage.set_value(val);
+ }
+ MetricResult mem_max_usage;
+ val = 0;
+ if (OSContainer::memory_max_usage_in_bytes(val)) {
+ mem_max_usage.set_value(val);
+ }
+ MetricResult rss_usage;
+ val = 0;
+ if (OSContainer::rss_usage_in_bytes(val)) {
+ rss_usage.set_value(val);
+ }
+ MetricResult cache_usage;
+ val = 0;
+ if (OSContainer::cache_usage_in_bytes(val)) {
+ cache_usage.set_value(val);
+ }
+ OSContainer::print_container_helper(st, memory_limit, "memory_limit_in_bytes");
+ OSContainer::print_container_helper(st, mem_swap_limit, "memory_and_swap_limit_in_bytes");
+ OSContainer::print_container_helper(st, mem_soft_limit, "memory_soft_limit_in_bytes");
+ OSContainer::print_container_helper(st, mem_throttle_limit, "memory_throttle_limit_in_bytes");
+ OSContainer::print_container_helper(st, mem_usage, "memory_usage_in_bytes");
+ OSContainer::print_container_helper(st, mem_max_usage, "memory_max_usage_in_bytes");
+ OSContainer::print_container_helper(st, rss_usage, "rss_usage_in_bytes");
+ OSContainer::print_container_helper(st, cache_usage, "cache_usage_in_bytes");
OSContainer::print_version_specific_info(st);
- j = OSContainer::pids_max();
+ supported = OSContainer::pids_max(j);
st->print("maximum number of tasks: ");
- if (j > 0) {
- st->print_cr(JLONG_FORMAT, j);
+ if (supported && j != value_unlimited) {
+ st->print_cr(UINT64_FORMAT, j);
} else {
- st->print_cr("%s", j == OSCONTAINER_ERROR ? "not supported" : "unlimited");
+ st->print_cr("%s", !supported ? "not supported" : "unlimited");
}
- j = OSContainer::pids_current();
+ supported = OSContainer::pids_current(j);
st->print("current number of tasks: ");
- if (j > 0) {
- st->print_cr(JLONG_FORMAT, j);
+ if (supported && j > 0) {
+ st->print_cr(UINT64_FORMAT, j);
} else {
- if (j == OSCONTAINER_ERROR) {
- st->print_cr("not supported");
- }
+ st->print_cr("%s", !supported ? "not supported" : "no current tasks");
}
return true;
@@ -4643,7 +4670,7 @@ int os::Linux::active_processor_count() {
//
// 1. User option -XX:ActiveProcessorCount
// 2. kernel os calls (sched_getaffinity or sysconf(_SC_NPROCESSORS_ONLN)
-// 3. extracted from cgroup cpu subsystem (shares and quotas)
+// 3. extracted from cgroup cpu subsystem (quotas)
//
// Option 1, if specified, will always override.
// If the cgroup subsystem is active and configured, we
@@ -4660,9 +4687,8 @@ int os::active_processor_count() {
return ActiveProcessorCount;
}
- int active_cpus;
- if (OSContainer::is_containerized()) {
- active_cpus = OSContainer::active_processor_count();
+ int active_cpus = -1;
+ if (OSContainer::is_containerized() && OSContainer::active_processor_count(active_cpus)) {
log_trace(os)("active_processor_count: determined by OSContainer: %d",
active_cpus);
} else {
diff --git a/src/hotspot/os/linux/os_linux.hpp b/src/hotspot/os/linux/os_linux.hpp
index b77cd9f3c81..df96a17d8e9 100644
--- a/src/hotspot/os/linux/os_linux.hpp
+++ b/src/hotspot/os/linux/os_linux.hpp
@@ -45,8 +45,6 @@ class os::Linux {
static GrowableArray* _cpu_to_node;
static GrowableArray* _nindex_to_node;
- static julong available_memory_in_container();
-
protected:
static physical_memory_size_type _physical_memory;
@@ -117,7 +115,7 @@ class os::Linux {
static uintptr_t initial_thread_stack_size(void) { return _initial_thread_stack_size; }
static physical_memory_size_type physical_memory() { return _physical_memory; }
- static julong host_swap();
+ static bool host_swap(physical_memory_size_type& value);
static intptr_t* ucontext_get_sp(const ucontext_t* uc);
static intptr_t* ucontext_get_fp(const ucontext_t* uc);
diff --git a/src/hotspot/share/cds/aotArtifactFinder.hpp b/src/hotspot/share/cds/aotArtifactFinder.hpp
index 405222a8753..05bcde6b0ac 100644
--- a/src/hotspot/share/cds/aotArtifactFinder.hpp
+++ b/src/hotspot/share/cds/aotArtifactFinder.hpp
@@ -39,7 +39,7 @@ class TypeArrayKlass;
// It also decides what Klasses must be cached in aot-initialized state.
//
// ArchiveBuilder uses [1] as roots to scan for all MetaspaceObjs that need to be cached.
-// ArchiveHeapWriter uses [2] to create an image of the archived heap.
+// HeapShared uses [2] to create an image of the archived heap.
//
// [1] is stored in _all_cached_classes in aotArtifactFinder.cpp.
// [2] is stored in HeapShared::archived_object_cache().
diff --git a/src/hotspot/share/cds/aotMapLogger.cpp b/src/hotspot/share/cds/aotMapLogger.cpp
index d0a63c56093..a252eae4b84 100644
--- a/src/hotspot/share/cds/aotMapLogger.cpp
+++ b/src/hotspot/share/cds/aotMapLogger.cpp
@@ -796,7 +796,7 @@ void AOTMapLogger::dumptime_log_mapped_heap_region(ArchiveMappedHeapInfo* heap_i
address buffer_start = address(r.start()); // start of the current oop inside the buffer
address buffer_end = address(r.end());
- address requested_base = UseCompressedOops ? (address)CompressedOops::base() : (address)AOTMappedHeapWriter::NOCOOPS_REQUESTED_BASE;
+ address requested_base = UseCompressedOops ? AOTMappedHeapWriter::narrow_oop_base() : (address)AOTMappedHeapWriter::NOCOOPS_REQUESTED_BASE;
address requested_start = UseCompressedOops ? AOTMappedHeapWriter::buffered_addr_to_requested_addr(buffer_start) : requested_base;
log_region_range("heap", buffer_start, buffer_end, requested_start);
diff --git a/src/hotspot/share/cds/aotMappedHeapWriter.cpp b/src/hotspot/share/cds/aotMappedHeapWriter.cpp
index ff9319d266b..98f400c989c 100644
--- a/src/hotspot/share/cds/aotMappedHeapWriter.cpp
+++ b/src/hotspot/share/cds/aotMappedHeapWriter.cpp
@@ -55,7 +55,7 @@
GrowableArrayCHeap* AOTMappedHeapWriter::_buffer = nullptr;
-// The following are offsets from buffer_bottom()
+bool AOTMappedHeapWriter::_is_writing_deterministic_heap = false;
size_t AOTMappedHeapWriter::_buffer_used;
// Heap root segments
@@ -74,7 +74,7 @@ AOTMappedHeapWriter::_buffer_offset_to_source_obj_table = nullptr;
DumpedInternedStrings *AOTMappedHeapWriter::_dumped_interned_strings = nullptr;
typedef HashTable<
- size_t, // offset of a filler from ArchiveHeapWriter::buffer_bottom()
+ size_t, // offset of a filler from AOTMappedHeapWriter::buffer_bottom()
size_t, // size of this filler (in bytes)
127, // prime number
AnyObj::C_HEAP,
@@ -96,6 +96,45 @@ void AOTMappedHeapWriter::init() {
_source_objs = new GrowableArrayCHeap(10000);
guarantee(MIN_GC_REGION_ALIGNMENT <= G1HeapRegion::min_region_size_in_words() * HeapWordSize, "must be");
+
+ if (CDSConfig::old_cds_flags_used()) {
+ // With the old CDS workflow, we can guatantee determninistic output: given
+ // the same classlist file, we can generate the same static CDS archive.
+ // To ensure determinism, we always use the same compressed oop encoding
+ // (zero-based, no shift). See set_requested_address_range().
+ _is_writing_deterministic_heap = true;
+ } else {
+ // Determninistic output is not supported by the new AOT workflow, so
+ // we don't force the (zero-based, no shift) encoding. This way, it is more
+ // likely that we can avoid oop relocation in the production run.
+ _is_writing_deterministic_heap = false;
+ }
+ }
+}
+
+// For AOTMappedHeapWriter::narrow_oop_{mode, base, shift}(), see comments
+// in AOTMappedHeapWriter::set_requested_address_range(),
+CompressedOops::Mode AOTMappedHeapWriter::narrow_oop_mode() {
+ if (is_writing_deterministic_heap()) {
+ return CompressedOops::UnscaledNarrowOop;
+ } else {
+ return CompressedOops::mode();
+ }
+}
+
+address AOTMappedHeapWriter::narrow_oop_base() {
+ if (is_writing_deterministic_heap()) {
+ return (address)0;
+ } else {
+ return CompressedOops::base();
+ }
+}
+
+int AOTMappedHeapWriter::narrow_oop_shift() {
+ if (is_writing_deterministic_heap()) {
+ return 0;
+ } else {
+ return CompressedOops::shift();
}
}
@@ -116,7 +155,7 @@ void AOTMappedHeapWriter::write(GrowableArrayCHeap* roots,
assert(CDSConfig::is_dumping_heap(), "sanity");
allocate_buffer();
copy_source_objs_to_buffer(roots);
- set_requested_address(heap_info);
+ set_requested_address_range(heap_info);
relocate_embedded_oops(roots, heap_info);
}
@@ -536,14 +575,55 @@ size_t AOTMappedHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
return buffered_obj_offset;
}
-void AOTMappedHeapWriter::set_requested_address(ArchiveMappedHeapInfo* info) {
+// Set the range [_requested_bottom, _requested_top), the requested address range of all
+// the archived heap objects in the production run.
+//
+// (1) UseCompressedOops == true && !is_writing_deterministic_heap()
+//
+// The archived objects are stored using the COOPS encoding of the assembly phase.
+// We pick a range within the heap used by the assembly phase.
+//
+// In the production run, if different COOPS encodings are used:
+// - The heap contents needs to be relocated.
+//
+// (2) UseCompressedOops == true && is_writing_deterministic_heap()
+//
+// We always use zero-based, zero-shift encoding. _requested_top is aligned to 0x10000000.
+//
+// (3) UseCompressedOops == false:
+//
+// In the production run, the heap range is usually picked (randomly) by the OS, so we
+// will almost always need to perform relocation, regardless of how we pick the requested
+// address range.
+//
+// So we just hard code it to NOCOOPS_REQUESTED_BASE.
+//
+void AOTMappedHeapWriter::set_requested_address_range(ArchiveMappedHeapInfo* info) {
assert(!info->is_used(), "only set once");
size_t heap_region_byte_size = _buffer_used;
assert(heap_region_byte_size > 0, "must archived at least one object!");
if (UseCompressedOops) {
- if (UseG1GC) {
+ if (is_writing_deterministic_heap()) {
+ // Pick a heap range so that requested addresses can be encoded with zero-base/no shift.
+ // We align the requested bottom to at least 1 MB: if the production run uses G1 with a small
+ // heap (e.g., -Xmx256m), it's likely that we can map the archived objects at the
+ // requested location to avoid relocation.
+ //
+ // For other collectors or larger heaps, relocation is unavoidable, but is usually
+ // quite cheap. If you really want to avoid relocation, use the AOT workflow instead.
+ address heap_end = (address)0x100000000;
+ size_t alignment = MAX2(MIN_GC_REGION_ALIGNMENT, 1024 * 1024);
+ if (align_up(heap_region_byte_size, alignment) >= (size_t)heap_end) {
+ log_error(aot, heap)("cached heap space is too large: %zu bytes", heap_region_byte_size);
+ AOTMetaspace::unrecoverable_writing_error();
+ }
+ _requested_bottom = align_down(heap_end - heap_region_byte_size, alignment);
+ } else if (UseG1GC) {
+ // For G1, pick the range at the top of the current heap. If the exact same heap sizes
+ // are used in the production run, it's likely that we can map the archived objects
+ // at the requested location to avoid relocation.
address heap_end = (address)G1CollectedHeap::heap()->reserved().end();
log_info(aot, heap)("Heap end = %p", heap_end);
_requested_bottom = align_down(heap_end - heap_region_byte_size, G1HeapRegion::GrainBytes);
@@ -612,7 +692,14 @@ oop AOTMappedHeapWriter::load_oop_from_buffer(narrowOop* buffered_addr) {
template void AOTMappedHeapWriter::relocate_field_in_buffer(T* field_addr_in_buffer, oop source_referent, CHeapBitMap* oopmap) {
oop request_referent = source_obj_to_requested_obj(source_referent);
- store_requested_oop_in_buffer(field_addr_in_buffer, request_referent);
+ if (UseCompressedOops && is_writing_deterministic_heap()) {
+ // We use zero-based, 0-shift encoding, so the narrowOop is just the lower
+ // 32 bits of request_referent
+ intptr_t addr = cast_from_oop(request_referent);
+ *((narrowOop*)field_addr_in_buffer) = checked_cast(addr);
+ } else {
+ store_requested_oop_in_buffer(field_addr_in_buffer, request_referent);
+ }
if (request_referent != nullptr) {
mark_oop_pointer(field_addr_in_buffer, oopmap);
}
@@ -918,9 +1005,9 @@ AOTMapLogger::OopDataIterator* AOTMappedHeapWriter::oop_iterator(ArchiveMappedHe
address buffer_start = address(r.start());
address buffer_end = address(r.end());
- address requested_base = UseCompressedOops ? (address)CompressedOops::base() : (address)AOTMappedHeapWriter::NOCOOPS_REQUESTED_BASE;
- address requested_start = UseCompressedOops ? buffered_addr_to_requested_addr(buffer_start) : requested_base;
- int requested_shift = CompressedOops::shift();
+ address requested_base = UseCompressedOops ? AOTMappedHeapWriter::narrow_oop_base() : (address)AOTMappedHeapWriter::NOCOOPS_REQUESTED_BASE;
+ address requested_start = UseCompressedOops ? AOTMappedHeapWriter::buffered_addr_to_requested_addr(buffer_start) : requested_base;
+ int requested_shift = AOTMappedHeapWriter::narrow_oop_shift();
intptr_t buffer_to_requested_delta = requested_start - buffer_start;
uint64_t buffer_start_narrow_oop = 0xdeadbeed;
if (UseCompressedOops) {
diff --git a/src/hotspot/share/cds/aotMappedHeapWriter.hpp b/src/hotspot/share/cds/aotMappedHeapWriter.hpp
index 9a85b83d3d1..eafd38ac8bb 100644
--- a/src/hotspot/share/cds/aotMappedHeapWriter.hpp
+++ b/src/hotspot/share/cds/aotMappedHeapWriter.hpp
@@ -29,6 +29,7 @@
#include "cds/heapShared.hpp"
#include "memory/allocation.hpp"
#include "memory/allStatic.hpp"
+#include "oops/compressedOops.hpp"
#include "oops/oopHandle.hpp"
#include "utilities/bitMap.hpp"
#include "utilities/exceptions.hpp"
@@ -71,7 +72,7 @@ class AOTMappedHeapWriter : AllStatic {
// These are entered into HeapShared::archived_object_cache().
//
// - "buffered objects" are copies of the "source objects", and are stored in into
- // ArchiveHeapWriter::_buffer, which is a GrowableArray that sits outside of
+ // AOTMappedHeapWriter::_buffer, which is a GrowableArray that sits outside of
// the valid heap range. Therefore we avoid using the addresses of these copies
// as oops. They are usually called "buffered_addr" in the code (of the type "address").
//
@@ -81,26 +82,11 @@ class AOTMappedHeapWriter : AllStatic {
// - Each archived object has a "requested address" -- at run time, if the object
// can be mapped at this address, we can avoid relocation.
//
- // The requested address is implemented differently depending on UseCompressedOops:
+ // The requested address of an archived object is essentially its buffered_addr + delta,
+ // where delta is (_requested_bottom - buffer_bottom());
//
- // UseCompressedOops == true:
- // The archived objects are stored assuming that the runtime COOPS compression
- // scheme is exactly the same as in dump time (or else a more expensive runtime relocation
- // would be needed.)
- //
- // At dump time, we assume that the runtime heap range is exactly the same as
- // in dump time. The requested addresses of the archived objects are chosen such that
- // they would occupy the top end of a G1 heap (TBD when dumping is supported by other
- // collectors. See JDK-8298614).
- //
- // UseCompressedOops == false:
- // At runtime, the heap range is usually picked (randomly) by the OS, so we will almost always
- // need to perform relocation. Hence, the goal of the "requested address" is to ensure that
- // the contents of the archived objects are deterministic. I.e., the oop fields of archived
- // objects will always point to deterministic addresses.
- //
- // For G1, the archived heap is written such that the lowest archived object is placed
- // at NOCOOPS_REQUESTED_BASE. (TBD after JDK-8298614).
+ // The requested addresses of all archived objects are within [_requested_bottom, _requested_top).
+ // See AOTMappedHeapWriter::set_requested_address_range() for more info.
// ----------------------------------------------------------------------
public:
@@ -111,6 +97,15 @@ public:
// Shenandoah heap region size can never be smaller than 256K.
static constexpr int MIN_GC_REGION_ALIGNMENT = 256 * K;
+ // The heap contents are required to be deterministic when dumping "old" CDS archives, in order
+ // to support reproducible lib/server/classes*.jsa when building the JDK.
+ static bool is_writing_deterministic_heap() { return _is_writing_deterministic_heap; }
+
+ // The oop encoding used by the archived heap objects.
+ static CompressedOops::Mode narrow_oop_mode();
+ static address narrow_oop_base();
+ static int narrow_oop_shift();
+
static const int INITIAL_TABLE_SIZE = 15889; // prime number
static const int MAX_TABLE_SIZE = 1000000;
@@ -121,6 +116,7 @@ private:
int _field_offset;
};
+ static bool _is_writing_deterministic_heap;
static GrowableArrayCHeap* _buffer;
// The number of bytes that have written into _buffer (may be smaller than _buffer->length()).
@@ -130,15 +126,15 @@ private:
static HeapRootSegments _heap_root_segments;
// The address range of the requested location of the archived heap objects.
- static address _requested_bottom;
- static address _requested_top;
+ static address _requested_bottom; // The requested address of the lowest archived heap object
+ static address _requested_top; // The exclusive end of the highest archived heap object
static GrowableArrayCHeap* _native_pointers;
static GrowableArrayCHeap* _source_objs;
static DumpedInternedStrings *_dumped_interned_strings;
// We sort _source_objs_order to minimize the number of bits in ptrmap and oopmap.
- // See comments near the body of ArchiveHeapWriter::compare_objs_by_oop_fields().
+ // See comments near the body of AOTMappedHeapWriter::compare_objs_by_oop_fields().
// The objects will be written in the order of:
//_source_objs->at(_source_objs_order->at(0)._index)
// source_objs->at(_source_objs_order->at(1)._index)
@@ -200,7 +196,7 @@ private:
static int filler_array_length(size_t fill_bytes);
static HeapWord* init_filler_array_at_buffer_top(int array_length, size_t fill_bytes);
- static void set_requested_address(ArchiveMappedHeapInfo* info);
+ static void set_requested_address_range(ArchiveMappedHeapInfo* info);
static void mark_native_pointers(oop orig_obj);
static void relocate_embedded_oops(GrowableArrayCHeap* roots, ArchiveMappedHeapInfo* info);
static void compute_ptrmap(ArchiveMappedHeapInfo *info);
diff --git a/src/hotspot/share/cds/aotMetaspace.cpp b/src/hotspot/share/cds/aotMetaspace.cpp
index 8642b1a6de8..42d41e6ae89 100644
--- a/src/hotspot/share/cds/aotMetaspace.cpp
+++ b/src/hotspot/share/cds/aotMetaspace.cpp
@@ -114,6 +114,7 @@ intx AOTMetaspace::_relocation_delta;
char* AOTMetaspace::_requested_base_address;
Array* AOTMetaspace::_archived_method_handle_intrinsics = nullptr;
bool AOTMetaspace::_use_optimized_module_handling = true;
+FileMapInfo* AOTMetaspace::_output_mapinfo = nullptr;
// The CDS archive is divided into the following regions:
// rw - read-write metadata
@@ -322,6 +323,24 @@ void AOTMetaspace::initialize_for_static_dump() {
AOTMetaspace::unrecoverable_writing_error();
}
_symbol_region.init(&_symbol_rs, &_symbol_vs);
+ if (CDSConfig::is_dumping_preimage_static_archive()) {
+ // We are in the AOT training run. User code is executed.
+ //
+ // On Windows, if the user code closes System.out and we open the AOT config file for output
+ // only at VM exit, we might get back the same file HANDLE as stdout, and the AOT config
+ // file may get corrupted by UL logs. By opening early, we ensure that the output
+ // HANDLE is different than stdout so we can avoid such corruption.
+ open_output_mapinfo();
+ } else {
+ // No need for the above as we won't execute any user code.
+ }
+}
+
+void AOTMetaspace::open_output_mapinfo() {
+ const char* static_archive = CDSConfig::output_archive_path();
+ assert(static_archive != nullptr, "sanity");
+ _output_mapinfo = new FileMapInfo(static_archive, true);
+ _output_mapinfo->open_as_output();
}
// Called by universe_post_init()
@@ -655,15 +674,14 @@ private:
public:
- VM_PopulateDumpSharedSpace(StaticArchiveBuilder& b) :
- VM_Operation(), _mapped_heap_info(), _streamed_heap_info(), _map_info(nullptr), _builder(b) {}
+ VM_PopulateDumpSharedSpace(StaticArchiveBuilder& b, FileMapInfo* map_info) :
+ VM_Operation(), _mapped_heap_info(), _streamed_heap_info(), _map_info(map_info), _builder(b) {}
bool skip_operation() const { return false; }
VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; }
ArchiveMappedHeapInfo* mapped_heap_info() { return &_mapped_heap_info; }
ArchiveStreamedHeapInfo* streamed_heap_info() { return &_streamed_heap_info; }
- FileMapInfo* map_info() const { return _map_info; }
void doit(); // outline because gdb sucks
bool allow_nested_vm_operations() const { return true; }
}; // class VM_PopulateDumpSharedSpace
@@ -795,12 +813,6 @@ void VM_PopulateDumpSharedSpace::doit() {
CppVtables::zero_archived_vtables();
// Write the archive file
- if (CDSConfig::is_dumping_final_static_archive()) {
- FileMapInfo::free_current_info(); // FIXME: should not free current info
- }
- const char* static_archive = CDSConfig::output_archive_path();
- assert(static_archive != nullptr, "sanity");
- _map_info = new FileMapInfo(static_archive, true);
_map_info->populate_header(AOTMetaspace::core_region_alignment());
_map_info->set_early_serialized_data(early_serialized_data);
_map_info->set_serialized_data(serialized_data);
@@ -1138,7 +1150,14 @@ void AOTMetaspace::dump_static_archive_impl(StaticArchiveBuilder& builder, TRAPS
}
#endif
- VM_PopulateDumpSharedSpace op(builder);
+ if (!CDSConfig::is_dumping_preimage_static_archive()) {
+ if (CDSConfig::is_dumping_final_static_archive()) {
+ FileMapInfo::free_current_info(); // FIXME: should not free current info
+ }
+ open_output_mapinfo();
+ }
+
+ VM_PopulateDumpSharedSpace op(builder, _output_mapinfo);
VMThread::execute(&op);
if (AOTCodeCache::is_on_for_dump() && CDSConfig::is_dumping_final_static_archive()) {
@@ -1152,7 +1171,9 @@ void AOTMetaspace::dump_static_archive_impl(StaticArchiveBuilder& builder, TRAPS
CDSConfig::disable_dumping_aot_code();
}
- bool status = write_static_archive(&builder, op.map_info(), op.mapped_heap_info(), op.streamed_heap_info());
+ bool status = write_static_archive(&builder, _output_mapinfo, op.mapped_heap_info(), op.streamed_heap_info());
+ assert(!_output_mapinfo->is_open(), "Must be closed already");
+ _output_mapinfo = nullptr;
if (status && CDSConfig::is_dumping_preimage_static_archive()) {
tty->print_cr("%s AOTConfiguration recorded: %s",
CDSConfig::has_temp_aot_config_file() ? "Temporary" : "", AOTConfiguration);
@@ -1173,11 +1194,10 @@ bool AOTMetaspace::write_static_archive(ArchiveBuilder* builder,
// relocate the data so that it can be mapped to AOTMetaspace::requested_base_address()
// without runtime relocation.
builder->relocate_to_requested();
-
- map_info->open_as_output();
if (!map_info->is_open()) {
return false;
}
+ map_info->prepare_for_writing();
builder->write_archive(map_info, mapped_heap_info, streamed_heap_info);
return true;
}
diff --git a/src/hotspot/share/cds/aotMetaspace.hpp b/src/hotspot/share/cds/aotMetaspace.hpp
index bfd9f4bcc75..1712a7865ad 100644
--- a/src/hotspot/share/cds/aotMetaspace.hpp
+++ b/src/hotspot/share/cds/aotMetaspace.hpp
@@ -60,6 +60,7 @@ class AOTMetaspace : AllStatic {
static char* _requested_base_address;
static bool _use_optimized_module_handling;
static Array* _archived_method_handle_intrinsics;
+ static FileMapInfo* _output_mapinfo;
public:
enum {
@@ -185,6 +186,7 @@ public:
private:
static void read_extra_data(JavaThread* current, const char* filename) NOT_CDS_RETURN;
static void fork_and_dump_final_static_archive(TRAPS);
+ static void open_output_mapinfo();
static bool write_static_archive(ArchiveBuilder* builder,
FileMapInfo* map_info,
ArchiveMappedHeapInfo* mapped_heap_info,
diff --git a/src/hotspot/share/cds/cdsConfig.cpp b/src/hotspot/share/cds/cdsConfig.cpp
index 7976f690b8b..86533e212d8 100644
--- a/src/hotspot/share/cds/cdsConfig.cpp
+++ b/src/hotspot/share/cds/cdsConfig.cpp
@@ -526,7 +526,7 @@ void CDSConfig::check_aotmode_record() {
bool has_output = !FLAG_IS_DEFAULT(AOTCacheOutput);
if (!has_output && !has_config) {
- vm_exit_during_initialization("At least one of AOTCacheOutput and AOTConfiguration must be specified when using -XX:AOTMode=record");
+ vm_exit_during_initialization("At least one of AOTCacheOutput and AOTConfiguration must be specified when using -XX:AOTMode=record");
}
if (has_output) {
diff --git a/src/hotspot/share/cds/dynamicArchive.cpp b/src/hotspot/share/cds/dynamicArchive.cpp
index 85e59e23f8c..8fae8dabf8c 100644
--- a/src/hotspot/share/cds/dynamicArchive.cpp
+++ b/src/hotspot/share/cds/dynamicArchive.cpp
@@ -353,6 +353,7 @@ void DynamicArchiveBuilder::write_archive(char* serialized_data, AOTClassLocatio
assert(dynamic_info != nullptr, "Sanity");
dynamic_info->open_as_output();
+ dynamic_info->prepare_for_writing();
ArchiveBuilder::write_archive(dynamic_info, nullptr, nullptr);
address base = _requested_dynamic_archive_bottom;
diff --git a/src/hotspot/share/cds/filemap.cpp b/src/hotspot/share/cds/filemap.cpp
index ae92ce31058..0eeb96bb269 100644
--- a/src/hotspot/share/cds/filemap.cpp
+++ b/src/hotspot/share/cds/filemap.cpp
@@ -216,12 +216,14 @@ void FileMapHeader::populate(FileMapInfo *info, size_t core_region_alignment,
_obj_alignment = ObjectAlignmentInBytes;
_compact_strings = CompactStrings;
_compact_headers = UseCompactObjectHeaders;
+#if INCLUDE_CDS_JAVA_HEAP
if (CDSConfig::is_dumping_heap()) {
_object_streaming_mode = HeapShared::is_writing_streaming_mode();
- _narrow_oop_mode = CompressedOops::mode();
- _narrow_oop_base = CompressedOops::base();
- _narrow_oop_shift = CompressedOops::shift();
+ _narrow_oop_mode = AOTMappedHeapWriter::narrow_oop_mode();
+ _narrow_oop_base = AOTMappedHeapWriter::narrow_oop_base();
+ _narrow_oop_shift = AOTMappedHeapWriter::narrow_oop_shift();
}
+#endif
_compressed_oops = UseCompressedOops;
_compressed_class_ptrs = UseCompressedClassPointers;
if (UseCompressedClassPointers) {
@@ -777,7 +779,9 @@ void FileMapInfo::open_as_output() {
}
_fd = fd;
_file_open = true;
+}
+void FileMapInfo::prepare_for_writing() {
// Seek past the header. We will write the header after all regions are written
// and their CRCs computed.
size_t header_bytes = header()->header_size();
@@ -911,7 +915,7 @@ void FileMapInfo::write_region(int region, char* base, size_t size,
if (HeapShared::is_writing_mapping_mode()) {
requested_base = (char*)AOTMappedHeapWriter::requested_address();
if (UseCompressedOops) {
- mapping_offset = (size_t)((address)requested_base - CompressedOops::base());
+ mapping_offset = (size_t)((address)requested_base - AOTMappedHeapWriter::narrow_oop_base());
assert((mapping_offset >> CompressedOops::shift()) << CompressedOops::shift() == mapping_offset, "must be");
}
} else {
diff --git a/src/hotspot/share/cds/filemap.hpp b/src/hotspot/share/cds/filemap.hpp
index b97b46a7c26..fbd3c8e1681 100644
--- a/src/hotspot/share/cds/filemap.hpp
+++ b/src/hotspot/share/cds/filemap.hpp
@@ -290,7 +290,7 @@ public:
void log_paths(const char* msg, int start_idx, int end_idx);
- FileMapInfo(const char* full_apth, bool is_static);
+ FileMapInfo(const char* full_path, bool is_static);
~FileMapInfo();
static void free_current_info();
@@ -365,6 +365,7 @@ public:
// File manipulation.
bool open_as_input() NOT_CDS_RETURN_(false);
void open_as_output();
+ void prepare_for_writing();
void write_header();
void write_region(int region, char* base, size_t size,
bool read_only, bool allow_exec);
diff --git a/src/hotspot/share/cds/heapShared.cpp b/src/hotspot/share/cds/heapShared.cpp
index 357b317ee49..f2382289c7d 100644
--- a/src/hotspot/share/cds/heapShared.cpp
+++ b/src/hotspot/share/cds/heapShared.cpp
@@ -631,9 +631,8 @@ void HeapShared::init_scratch_objects_for_basic_type_mirrors(TRAPS) {
}
// Given java_mirror that represents a (primitive or reference) type T,
-// return the "scratch" version that represents the same type T.
-// Note that if java_mirror will be returned if it's already a
-// scratch mirror.
+// return the "scratch" version that represents the same type T. Note
+// that java_mirror will be returned if the mirror is already a scratch mirror.
//
// See java_lang_Class::create_scratch_mirror() for more info.
oop HeapShared::scratch_java_mirror(oop java_mirror) {
diff --git a/src/hotspot/share/cds/heapShared.hpp b/src/hotspot/share/cds/heapShared.hpp
index 2c782f7231b..118c60faa60 100644
--- a/src/hotspot/share/cds/heapShared.hpp
+++ b/src/hotspot/share/cds/heapShared.hpp
@@ -332,7 +332,7 @@ public:
// Used by CDSHeapVerifier.
OopHandle _orig_referrer;
- // The location of this object inside ArchiveHeapWriter::_buffer
+ // The location of this object inside {AOTMappedHeapWriter, AOTStreamedHeapWriter}::_buffer
size_t _buffer_offset;
// One or more fields in this object are pointing to non-null oops.
diff --git a/src/hotspot/share/ci/ciField.cpp b/src/hotspot/share/ci/ciField.cpp
index d47c4c508d7..19e05784f4d 100644
--- a/src/hotspot/share/ci/ciField.cpp
+++ b/src/hotspot/share/ci/ciField.cpp
@@ -216,9 +216,6 @@ ciField::ciField(fieldDescriptor *fd) :
static bool trust_final_non_static_fields(ciInstanceKlass* holder) {
if (holder == nullptr)
return false;
- if (holder->name() == ciSymbols::java_lang_System())
- // Never trust strangely unstable finals: System.out, etc.
- return false;
// Even if general trusting is disabled, trust system-built closures in these packages.
if (holder->is_in_package("java/lang/invoke") || holder->is_in_package("sun/invoke") ||
holder->is_in_package("java/lang/reflect") || holder->is_in_package("jdk/internal/reflect") ||
@@ -230,15 +227,9 @@ static bool trust_final_non_static_fields(ciInstanceKlass* holder) {
// can't be serialized, so there is no hacking of finals going on with them.
if (holder->is_hidden())
return true;
- // Trust final fields in all boxed classes
- if (holder->is_box_klass())
- return true;
// Trust final fields in records
if (holder->is_record())
return true;
- // Trust final fields in String
- if (holder->name() == ciSymbols::java_lang_String())
- return true;
// Trust Atomic*FieldUpdaters: they are very important for performance, and make up one
// more reason not to use Unsafe, if their final fields are trusted. See more in JDK-8140483.
if (holder->name() == ciSymbols::java_util_concurrent_atomic_AtomicIntegerFieldUpdater_Impl() ||
@@ -267,17 +258,7 @@ void ciField::initialize_from(fieldDescriptor* fd) {
// not be constant is when the field is a *special* static & final field
// whose value may change. The three examples are java.lang.System.in,
// java.lang.System.out, and java.lang.System.err.
- assert(vmClasses::System_klass() != nullptr, "Check once per vm");
- if (k == vmClasses::System_klass()) {
- // Check offsets for case 2: System.in, System.out, or System.err
- if (_offset == java_lang_System::in_offset() ||
- _offset == java_lang_System::out_offset() ||
- _offset == java_lang_System::err_offset()) {
- _is_constant = false;
- return;
- }
- }
- _is_constant = true;
+ _is_constant = !fd->is_mutable_static_final();
} else {
// An instance field can be constant if it's a final static field or if
// it's a final non-static field of a trusted class (classes in
diff --git a/src/hotspot/share/ci/ciInstanceKlass.cpp b/src/hotspot/share/ci/ciInstanceKlass.cpp
index 9bbf005356c..64b9acf9146 100644
--- a/src/hotspot/share/ci/ciInstanceKlass.cpp
+++ b/src/hotspot/share/ci/ciInstanceKlass.cpp
@@ -605,7 +605,7 @@ bool ciInstanceKlass::is_leaf_type() {
if (is_shared()) {
return is_final(); // approximately correct
} else {
- return !has_subklass() && (nof_implementors() == 0);
+ return !has_subklass() && (!is_interface() || nof_implementors() == 0);
}
}
@@ -619,6 +619,7 @@ bool ciInstanceKlass::is_leaf_type() {
// This is OK, since any dependencies we decide to assert
// will be checked later under the Compile_lock.
ciInstanceKlass* ciInstanceKlass::implementor() {
+ assert(is_interface(), "required");
ciInstanceKlass* impl = _implementor;
if (impl == nullptr) {
if (is_shared()) {
diff --git a/src/hotspot/share/ci/ciInstanceKlass.hpp b/src/hotspot/share/ci/ciInstanceKlass.hpp
index ec8fc789c7d..1f887771f54 100644
--- a/src/hotspot/share/ci/ciInstanceKlass.hpp
+++ b/src/hotspot/share/ci/ciInstanceKlass.hpp
@@ -259,6 +259,7 @@ public:
ciInstanceKlass* unique_implementor() {
assert(is_loaded(), "must be loaded");
+ assert(is_interface(), "must be");
ciInstanceKlass* impl = implementor();
return (impl != this ? impl : nullptr);
}
diff --git a/src/hotspot/share/classfile/classLoader.cpp b/src/hotspot/share/classfile/classLoader.cpp
index 082c745f4c3..12fbda899b9 100644
--- a/src/hotspot/share/classfile/classLoader.cpp
+++ b/src/hotspot/share/classfile/classLoader.cpp
@@ -412,31 +412,30 @@ ClassFileStream* ClassPathImageEntry::open_stream(JavaThread* current, const cha
//
ClassFileStream* ClassPathImageEntry::open_stream_for_loader(JavaThread* current, const char* name, ClassLoaderData* loader_data) {
jlong size;
- JImageLocationRef location = (*JImageFindResource)(jimage_non_null(), "", get_jimage_version_string(), name, &size);
+ JImageLocationRef location = 0;
- if (location == 0) {
- TempNewSymbol class_name = SymbolTable::new_symbol(name);
- TempNewSymbol pkg_name = ClassLoader::package_from_class_name(class_name);
+ TempNewSymbol class_name = SymbolTable::new_symbol(name);
+ TempNewSymbol pkg_name = ClassLoader::package_from_class_name(class_name);
- if (pkg_name != nullptr) {
- if (!Universe::is_module_initialized()) {
- location = (*JImageFindResource)(jimage_non_null(), JAVA_BASE_NAME, get_jimage_version_string(), name, &size);
- } else {
- PackageEntry* package_entry = ClassLoader::get_package_entry(pkg_name, loader_data);
- if (package_entry != nullptr) {
- ResourceMark rm(current);
- // Get the module name
- ModuleEntry* module = package_entry->module();
- assert(module != nullptr, "Boot classLoader package missing module");
- assert(module->is_named(), "Boot classLoader package is in unnamed module");
- const char* module_name = module->name()->as_C_string();
- if (module_name != nullptr) {
- location = (*JImageFindResource)(jimage_non_null(), module_name, get_jimage_version_string(), name, &size);
- }
+ if (pkg_name != nullptr) {
+ if (!Universe::is_module_initialized()) {
+ location = (*JImageFindResource)(jimage_non_null(), JAVA_BASE_NAME, get_jimage_version_string(), name, &size);
+ } else {
+ PackageEntry* package_entry = ClassLoader::get_package_entry(pkg_name, loader_data);
+ if (package_entry != nullptr) {
+ ResourceMark rm(current);
+ // Get the module name
+ ModuleEntry* module = package_entry->module();
+ assert(module != nullptr, "Boot classLoader package missing module");
+ assert(module->is_named(), "Boot classLoader package is in unnamed module");
+ const char* module_name = module->name()->as_C_string();
+ if (module_name != nullptr) {
+ location = (*JImageFindResource)(jimage_non_null(), module_name, get_jimage_version_string(), name, &size);
}
}
}
}
+
if (location != 0) {
if (UsePerfData) {
ClassLoader::perf_sys_classfile_bytes_read()->inc(size);
diff --git a/src/hotspot/share/classfile/javaClasses.cpp b/src/hotspot/share/classfile/javaClasses.cpp
index e41af702601..ee80dbbc45c 100644
--- a/src/hotspot/share/classfile/javaClasses.cpp
+++ b/src/hotspot/share/classfile/javaClasses.cpp
@@ -1241,10 +1241,7 @@ bool java_lang_Class::restore_archived_mirror(Klass *k,
if (!k->is_array_klass()) {
// - local static final fields with initial values were initialized at dump time
-
- // create the init_lock
- typeArrayOop r = oopFactory::new_typeArray(T_INT, 0, CHECK_(false));
- set_init_lock(mirror(), r);
+ assert(init_lock(mirror()) != nullptr, "allocated during AOT assembly");
if (protection_domain.not_null()) {
set_protection_domain(mirror(), protection_domain());
@@ -1336,11 +1333,6 @@ void java_lang_Class::set_class_data(oop java_class, oop class_data) {
java_class->obj_field_put(_classData_offset, class_data);
}
-void java_lang_Class::set_reflection_data(oop java_class, oop reflection_data) {
- assert(_reflectionData_offset != 0, "must be set");
- java_class->obj_field_put(_reflectionData_offset, reflection_data);
-}
-
void java_lang_Class::set_class_loader(oop java_class, oop loader) {
assert(_class_loader_offset != 0, "offsets should have been initialized");
java_class->obj_field_put(_class_loader_offset, loader);
@@ -1483,7 +1475,6 @@ Klass* java_lang_Class::array_klass_acquire(oop java_class) {
return k;
}
-
void java_lang_Class::release_set_array_klass(oop java_class, Klass* klass) {
assert(klass->is_klass() && klass->is_array_klass(), "should be array klass");
java_class->release_metadata_field_put(_array_klass_offset, klass);
@@ -1589,11 +1580,6 @@ void java_lang_Class::set_modifiers(oop the_class_mirror, u2 value) {
the_class_mirror->char_field_put(_modifiers_offset, value);
}
-int java_lang_Class::raw_access_flags(oop the_class_mirror) {
- assert(_raw_access_flags_offset != 0, "offsets should have been initialized");
- return the_class_mirror->char_field(_raw_access_flags_offset);
-}
-
void java_lang_Class::set_raw_access_flags(oop the_class_mirror, u2 value) {
assert(_raw_access_flags_offset != 0, "offsets should have been initialized");
the_class_mirror->char_field_put(_raw_access_flags_offset, value);
diff --git a/src/hotspot/share/classfile/javaClasses.hpp b/src/hotspot/share/classfile/javaClasses.hpp
index 28f8c0a3b8c..699dd39b887 100644
--- a/src/hotspot/share/classfile/javaClasses.hpp
+++ b/src/hotspot/share/classfile/javaClasses.hpp
@@ -273,6 +273,12 @@ class java_lang_Class : AllStatic {
static void initialize_mirror_fields(InstanceKlass* ik, Handle mirror, Handle protection_domain,
Handle classData, TRAPS);
static void set_mirror_module_field(JavaThread* current, Klass* K, Handle mirror, Handle module);
+
+ static void set_modifiers(oop java_class, u2 value);
+ static void set_raw_access_flags(oop java_class, u2 value);
+ static void set_is_primitive(oop java_class);
+ static void release_set_array_klass(oop java_class, Klass* klass);
+
public:
static void allocate_fixup_lists();
static void compute_offsets();
@@ -307,12 +313,10 @@ class java_lang_Class : AllStatic {
static bool is_instance(oop obj);
static bool is_primitive(oop java_class);
- static void set_is_primitive(oop java_class);
static BasicType primitive_type(oop java_class);
static oop primitive_mirror(BasicType t);
- // JVM_NewArray support
static Klass* array_klass_acquire(oop java_class);
- static void release_set_array_klass(oop java_class, Klass* klass);
+
// compiler support for class operations
static int klass_offset() { CHECK_INIT(_klass_offset); }
static int array_klass_offset() { CHECK_INIT(_array_klass_offset); }
@@ -331,7 +335,6 @@ class java_lang_Class : AllStatic {
static objArrayOop signers(oop java_class);
static oop class_data(oop java_class);
static void set_class_data(oop java_class, oop classData);
- static void set_reflection_data(oop java_class, oop reflection_data);
static int reflection_data_offset() { return _reflectionData_offset; }
static oop class_loader(oop java_class);
@@ -344,10 +347,6 @@ class java_lang_Class : AllStatic {
static void set_source_file(oop java_class, oop source_file);
static int modifiers(oop java_class);
- static void set_modifiers(oop java_class, u2 value);
-
- static int raw_access_flags(oop java_class);
- static void set_raw_access_flags(oop java_class, u2 value);
static size_t oop_size(oop java_class);
static void set_oop_size(HeapWord* java_class, size_t size);
diff --git a/src/hotspot/share/code/relocInfo.cpp b/src/hotspot/share/code/relocInfo.cpp
index 286d407c94b..2a6335e2118 100644
--- a/src/hotspot/share/code/relocInfo.cpp
+++ b/src/hotspot/share/code/relocInfo.cpp
@@ -26,6 +26,7 @@
#include "code/compiledIC.hpp"
#include "code/nmethod.hpp"
#include "code/relocInfo.hpp"
+#include "cppstdlib/new.hpp"
#include "cppstdlib/type_traits.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
@@ -37,8 +38,6 @@
#include "utilities/checkedCast.hpp"
#include "utilities/copy.hpp"
-#include
-
const RelocationHolder RelocationHolder::none; // its type is relocInfo::none
diff --git a/src/hotspot/share/code/relocInfo.hpp b/src/hotspot/share/code/relocInfo.hpp
index a6a08815d10..6f1778ef479 100644
--- a/src/hotspot/share/code/relocInfo.hpp
+++ b/src/hotspot/share/code/relocInfo.hpp
@@ -25,6 +25,7 @@
#ifndef SHARE_CODE_RELOCINFO_HPP
#define SHARE_CODE_RELOCINFO_HPP
+#include "cppstdlib/new.hpp"
#include "memory/allocation.hpp"
#include "oops/oopsHierarchy.hpp"
#include "runtime/osInfo.hpp"
@@ -32,8 +33,6 @@
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
-#include
-
class CodeBlob;
class Metadata;
class NativeMovConstReg;
diff --git a/src/hotspot/share/cppstdlib/new.hpp b/src/hotspot/share/cppstdlib/new.hpp
new file mode 100644
index 00000000000..3536ac13288
--- /dev/null
+++ b/src/hotspot/share/cppstdlib/new.hpp
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_CPPSTDLIB_NEW_HPP
+#define SHARE_CPPSTDLIB_NEW_HPP
+
+#include "utilities/compilerWarnings.hpp"
+
+// HotSpot usage:
+// Only the following may be used:
+// * std::nothrow_t, std::nothrow
+// * std::align_val_t
+// * The non-allocating forms of `operator new` and `operator new[]` are
+// implicitly used by the corresponding `new` and `new[]` expressions.
+// - operator new(size_t, void*) noexcept
+// - operator new[](size_t, void*) noexcept
+// Note that the non-allocating forms of `operator delete` and `operator
+// delete[]` are not used, since they are only invoked by a placement new
+// expression that fails by throwing an exception. But they might still
+// end up being referenced in such a situation.
+
+BEGIN_ALLOW_FORBIDDEN_FUNCTIONS
+#include "utilities/vmassert_uninstall.hpp"
+
+#include
+
+#include "utilities/vmassert_reinstall.hpp" // don't reorder
+END_ALLOW_FORBIDDEN_FUNCTIONS
+
+// Deprecation declarations to forbid use of the default global allocator.
+// See C++17 21.6.1 Header synopsis.
+
+namespace std {
+
+#if 0
+// We could deprecate exception types, for completeness, but don't bother. We
+// already have exceptions disabled, and run into compiler bugs when we try.
+//
+// gcc -Wattributes => type attributes ignored after type is already defined
+// See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=122167
+//
+// clang -Wignored-attributes => attribute declaration must precede definition
+// The clang warning is https://github.com/llvm/llvm-project/issues/135481,
+// which should be fixed in clang 21.
+class [[deprecated]] bad_alloc;
+class [[deprecated]] bad_array_new_length;
+#endif // #if 0
+
+// Forbid new_handler manipulation by HotSpot code, leaving it untouched for
+// use by application code.
+[[deprecated]] new_handler get_new_handler() noexcept;
+[[deprecated]] new_handler set_new_handler(new_handler) noexcept;
+
+// Prefer HotSpot mechanisms for padding.
+//
+// The syntax for redeclaring these for deprecation is tricky, and not
+// supported by some versions of some compilers. Dispatch on compiler and
+// version to decide whether to redeclare deprecated.
+
+#if defined(__clang__)
+#if __clang_major__ >= 19
+// clang18 and earlier may accept the declaration but go wrong with uses.
+// Different warnings and link-time failures are both possible.
+#define CAN_DEPRECATE_HARDWARE_INTERFERENCE_SIZES 1
+#endif // restrict clang version
+
+#elif defined(__GNUC__)
+#if (__GNUC__ > 13) || (__GNUC__ == 13 && __GNUC_MINOR__ >= 2)
+// g++11.5 accepts the declaration and reports deprecation for uses, but also
+// has link-time failure for uses. Haven't tested intermediate versions.
+#define CAN_DEPRECATE_HARDWARE_INTERFERENCE_SIZES 1
+#endif // restrict gcc version
+
+#elif defined(_MSVC)
+// VS2022-17.13.2 => error C2370: '...': redefinition; different storage class
+
+#endif // Compiler dispatch
+
+// Redeclare deprecated if such is supported.
+#ifdef CAN_DEPRECATE_HARDWARE_INTERFERENCE_SIZES
+[[deprecated]] extern const size_t hardware_destructive_interference_size;
+[[deprecated]] extern const size_t hardware_constructive_interference_size;
+#undef CAN_DEPRECATE_HARDWARE_INTERFERENCE_SIZES
+#endif // CAN_DEPRECATE_HARDWARE_INTERFERENCE_SIZES
+
+} // namespace std
+
+// Forbid using the global allocator by HotSpot code.
+// This doesn't provide complete coverage. Some global allocation and
+// deallocation functions are implicitly declared in all translation units,
+// without needing to include ; see C++17 6.7.4. So this doesn't remove
+// the need for the link-time verification that these functions aren't used.
+//
+// But don't poison them when compiling gtests. The gtest framework, the
+// HotSpot wrapper around it (gtestMain.cpp), and even some tests, all have
+// new/new[] and delete/delete[] expressions that use the default global
+// allocator. We also don't apply the link-time check for gtests, for the
+// same reason.
+#ifndef HOTSPOT_GTEST
+
+[[deprecated]] void* operator new(std::size_t);
+[[deprecated]] void* operator new(std::size_t, std::align_val_t);
+[[deprecated]] void* operator new(std::size_t, const std::nothrow_t&) noexcept;
+[[deprecated]] void* operator new(std::size_t, std::align_val_t,
+ const std::nothrow_t&) noexcept;
+
+[[deprecated]] void operator delete(void*) noexcept;
+[[deprecated]] void operator delete(void*, std::size_t) noexcept;
+[[deprecated]] void operator delete(void*, std::align_val_t) noexcept;
+[[deprecated]] void operator delete(void*, std::size_t, std::align_val_t) noexcept;
+[[deprecated]] void operator delete(void*, const std::nothrow_t&) noexcept;
+[[deprecated]] void operator delete(void*, std::align_val_t,
+ const std::nothrow_t&) noexcept;
+
+[[deprecated]] void* operator new[](std::size_t);
+[[deprecated]] void* operator new[](std::size_t, std::align_val_t);
+[[deprecated]] void* operator new[](std::size_t, const std::nothrow_t&) noexcept;
+[[deprecated]] void* operator new[](std::size_t, std::align_val_t,
+ const std::nothrow_t&) noexcept;
+
+[[deprecated]] void operator delete[](void*) noexcept;
+[[deprecated]] void operator delete[](void*, std::size_t) noexcept;
+[[deprecated]] void operator delete[](void*, std::align_val_t) noexcept;
+[[deprecated]] void operator delete[](void*, std::size_t, std::align_val_t) noexcept;
+[[deprecated]] void operator delete[](void*, const std::nothrow_t&) noexcept;
+[[deprecated]] void operator delete[](void*, std::align_val_t,
+ const std::nothrow_t&) noexcept;
+
+#endif // HOTSPOT_GTEST
+
+// Allow (don't poison) the non-allocating forms from [new.delete.placement].
+
+#endif // SHARE_CPPSTDLIB_NEW_HPP
diff --git a/src/hotspot/share/gc/g1/g1Allocator.cpp b/src/hotspot/share/gc/g1/g1Allocator.cpp
index 713bafd4782..78710084ee3 100644
--- a/src/hotspot/share/gc/g1/g1Allocator.cpp
+++ b/src/hotspot/share/gc/g1/g1Allocator.cpp
@@ -123,6 +123,14 @@ void G1Allocator::reuse_retained_old_region(G1EvacInfo* evacuation_info,
}
}
+size_t G1Allocator::free_bytes_in_retained_old_region() const {
+ if (_retained_old_gc_alloc_region == nullptr) {
+ return 0;
+ } else {
+ return _retained_old_gc_alloc_region->free();
+ }
+}
+
void G1Allocator::init_gc_alloc_regions(G1EvacInfo* evacuation_info) {
assert_at_safepoint_on_vm_thread();
diff --git a/src/hotspot/share/gc/g1/g1Allocator.hpp b/src/hotspot/share/gc/g1/g1Allocator.hpp
index 19b19c06e92..9a7e62f5cc6 100644
--- a/src/hotspot/share/gc/g1/g1Allocator.hpp
+++ b/src/hotspot/share/gc/g1/g1Allocator.hpp
@@ -103,7 +103,10 @@ public:
void init_gc_alloc_regions(G1EvacInfo* evacuation_info);
void release_gc_alloc_regions(G1EvacInfo* evacuation_info);
void abandon_gc_alloc_regions();
+
bool is_retained_old_region(G1HeapRegion* hr);
+ // Return the amount of free bytes in the current retained old region.
+ size_t free_bytes_in_retained_old_region() const;
// Node index of current thread.
inline uint current_node_index() const;
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
index f04658a1415..d18f61ff507 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
@@ -2964,8 +2964,8 @@ void G1CollectedHeap::abandon_collection_set() {
}
size_t G1CollectedHeap::non_young_occupancy_after_allocation(size_t allocation_word_size) {
- // For simplicity, just count whole regions.
- const size_t cur_occupancy = (old_regions_count() + humongous_regions_count()) * G1HeapRegion::GrainBytes;
+ const size_t cur_occupancy = (old_regions_count() + humongous_regions_count()) * G1HeapRegion::GrainBytes -
+ _allocator->free_bytes_in_retained_old_region();
// Humongous allocations will always be assigned to non-young heap, so consider
// that allocation in the result as well. Otherwise the allocation will always
// be in young gen, so there is no need to account it here.
diff --git a/src/hotspot/share/gc/g1/g1HeapRegion.cpp b/src/hotspot/share/gc/g1/g1HeapRegion.cpp
index 63d64503316..b1eeb333d8d 100644
--- a/src/hotspot/share/gc/g1/g1HeapRegion.cpp
+++ b/src/hotspot/share/gc/g1/g1HeapRegion.cpp
@@ -787,23 +787,13 @@ void G1HeapRegion::fill_range_with_dead_objects(HeapWord* start, HeapWord* end)
// possible that there is a pinned object that is not any more referenced by
// Java code (only by native).
//
- // In this case we must not zap contents of such an array but we can overwrite
- // the header; since only pinned typearrays are allowed, this fits nicely with
- // putting filler arrays into the dead range as the object header sizes match and
- // no user data is overwritten.
+ // In this case we should not zap, because that would overwrite
+ // user-observable data. Memory corresponding to obj-header is safe to
+ // change, since it's not directly user-observable.
//
// In particular String Deduplication might change the reference to the character
// array of the j.l.String after native code obtained a raw reference to it (via
// GetStringCritical()).
- CollectedHeap::fill_with_objects(start, range_size, !has_pinned_objects());
- HeapWord* current = start;
- do {
- // Update the BOT if the a threshold is crossed.
- size_t obj_size = cast_to_oop(current)->size();
- update_bot_for_block(current, current + obj_size);
-
- // Advance to the next object.
- current += obj_size;
- guarantee(current <= end, "Should never go past end");
- } while (current != end);
+ CollectedHeap::fill_with_object(start, range_size, !has_pinned_objects());
+ update_bot_for_block(start, start + range_size);
}
diff --git a/src/hotspot/share/gc/g1/g1YoungGenSizer.cpp b/src/hotspot/share/gc/g1/g1YoungGenSizer.cpp
index a8209eb19ba..ffa573c68cc 100644
--- a/src/hotspot/share/gc/g1/g1YoungGenSizer.cpp
+++ b/src/hotspot/share/gc/g1/g1YoungGenSizer.cpp
@@ -25,47 +25,95 @@
#include "gc/g1/g1Arguments.hpp"
#include "gc/g1/g1HeapRegion.hpp"
#include "gc/g1/g1YoungGenSizer.hpp"
+#include "gc/shared/gc_globals.hpp"
#include "logging/log.hpp"
#include "runtime/globals_extension.hpp"
G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults),
_use_adaptive_sizing(true), _min_desired_young_length(0), _max_desired_young_length(0) {
+ precond(!FLAG_IS_ERGO(NewRatio));
+ precond(!FLAG_IS_ERGO(NewSize));
+ precond(!FLAG_IS_ERGO(MaxNewSize));
+
+ // Figure out compatible young gen sizing policies.
+ // This will either use all default, NewRatio or a combination of NewSize and
+ // MaxNewSize. If both ratio and size is user specified NewRatio will be ignored.
+
+ const bool user_specified_NewRatio = !FLAG_IS_DEFAULT(NewRatio);
+ const bool user_specified_NewSize = !FLAG_IS_DEFAULT(NewSize);
+ const bool user_specified_MaxNewSize = !FLAG_IS_DEFAULT(MaxNewSize);
+
+ // MaxNewSize is updated every time the heap is resized (and when initialized),
+ // as such the value of MaxNewSize is only modified if it is also used by the
+ // young generation sizing. (If MaxNewSize is user specified).
+
+ if (!user_specified_NewRatio && !user_specified_NewSize && !user_specified_MaxNewSize) {
+ // Using Defaults.
+ return;
+ }
+
+ if (user_specified_NewRatio && !user_specified_NewSize && !user_specified_MaxNewSize) {
+ // Using NewRatio.
+ _sizer_kind = SizerNewRatio;
+ _use_adaptive_sizing = false;
+ return;
+ }
+
if (FLAG_IS_CMDLINE(NewRatio)) {
- if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
- log_warning(gc, ergo)("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
- } else {
- _sizer_kind = SizerNewRatio;
- _use_adaptive_sizing = false;
- return;
+ // NewRatio ignored at this point, issue warning if NewRatio was specified
+ // on the command line.
+ log_warning(gc, ergo)("-XX:NewSize and -XX:MaxNewSize overrides -XX:NewRatio");
+ }
+
+ assert(!FLAG_IS_DEFAULT(InitialHeapSize), "Initial heap size must be selected");
+ if (user_specified_NewSize && NewSize > InitialHeapSize) {
+ // If user specifed NewSize is larger than the InitialHeapSize truncate the value.
+ if (FLAG_IS_CMDLINE(NewSize)) {
+ log_warning(gc, ergo)("NewSize (%zuk) is greater than the initial heap size (%zuk). "
+ "A new NewSize of %zuk will be used.",
+ NewSize/K, InitialHeapSize/K, InitialHeapSize/K);
}
+ FLAG_SET_ERGO(NewSize, InitialHeapSize);
+ }
+
+ assert(!FLAG_IS_DEFAULT(MaxHeapSize), "Max heap size must be selected");
+ if (user_specified_MaxNewSize && MaxNewSize > MaxHeapSize) {
+ // If user specifed MaxNewSize is larger than the MaxHeapSize truncate the value.
+ if (FLAG_IS_CMDLINE(MaxNewSize)) {
+ log_warning(gc, ergo)("MaxNewSize (%zuk) greater than the entire heap (%zuk). "
+ "A new MaxNewSize of %zuk will be used.",
+ MaxNewSize/K, MaxHeapSize/K, MaxHeapSize/K);
+ }
+ FLAG_SET_ERGO(MaxNewSize, MaxHeapSize);
}
if (NewSize > MaxNewSize) {
+ // Either NewSize, MaxNewSize or both have been specified and are incompatible.
+ // In either case set MaxNewSize to the value of NewSize.
if (FLAG_IS_CMDLINE(MaxNewSize)) {
- log_warning(gc, ergo)("NewSize (%zuk) is greater than the MaxNewSize (%zuk). "
- "A new max generation size of %zuk will be used.",
+ log_warning(gc, ergo)("NewSize (%zuk) is greater than MaxNewSize (%zuk). "
+ "A new MaxNewSize of %zuk will be used.",
NewSize/K, MaxNewSize/K, NewSize/K);
}
FLAG_SET_ERGO(MaxNewSize, NewSize);
}
- if (FLAG_IS_CMDLINE(NewSize)) {
- _min_desired_young_length = MAX2((uint) (NewSize / G1HeapRegion::GrainBytes),
- 1U);
- if (FLAG_IS_CMDLINE(MaxNewSize)) {
- _max_desired_young_length =
- MAX2((uint) (MaxNewSize / G1HeapRegion::GrainBytes),
- 1U);
- _sizer_kind = SizerMaxAndNewSize;
- _use_adaptive_sizing = _min_desired_young_length != _max_desired_young_length;
- } else {
- _sizer_kind = SizerNewSizeOnly;
- }
- } else if (FLAG_IS_CMDLINE(MaxNewSize)) {
- _max_desired_young_length =
- MAX2((uint) (MaxNewSize / G1HeapRegion::GrainBytes),
- 1U);
+ if (user_specified_NewSize) {
+ _min_desired_young_length = MAX2((uint)(NewSize / G1HeapRegion::GrainBytes), 1U);
+ }
+
+ if (user_specified_MaxNewSize) {
+ _max_desired_young_length = MAX2((uint)(MaxNewSize / G1HeapRegion::GrainBytes), 1U);
+ }
+
+ if (user_specified_NewSize && user_specified_MaxNewSize) {
+ _sizer_kind = SizerMaxAndNewSize;
+ _use_adaptive_sizing = _min_desired_young_length != _max_desired_young_length;
+ } else if (user_specified_NewSize) {
+ _sizer_kind = SizerNewSizeOnly;
+ } else {
+ postcond(user_specified_MaxNewSize);
_sizer_kind = SizerMaxNewSizeOnly;
}
}
diff --git a/src/hotspot/share/gc/parallel/mutableSpace.cpp b/src/hotspot/share/gc/parallel/mutableSpace.cpp
index 6d30c5a8d1f..fc42fc1eab2 100644
--- a/src/hotspot/share/gc/parallel/mutableSpace.cpp
+++ b/src/hotspot/share/gc/parallel/mutableSpace.cpp
@@ -180,19 +180,6 @@ bool MutableSpace::cas_deallocate(HeapWord *obj, size_t size) {
return AtomicAccess::cmpxchg(top_addr(), expected_top, obj) == expected_top;
}
-// Only used by oldgen allocation.
-bool MutableSpace::needs_expand(size_t word_size) const {
- // This method can be invoked either outside of safepoint by java threads or
- // in safepoint by gc workers. Such accesses are synchronized by holding one
- // of the following locks.
- assert(Heap_lock->is_locked() || PSOldGenExpand_lock->is_locked(), "precondition");
-
- // Holding the lock means end is stable. So while top may be advancing
- // via concurrent allocations, there is no need to order the reads of top
- // and end here, unlike in cas_allocate.
- return pointer_delta(end(), top()) < word_size;
-}
-
void MutableSpace::oop_iterate(OopIterateClosure* cl) {
HeapWord* obj_addr = bottom();
HeapWord* t = top();
diff --git a/src/hotspot/share/gc/parallel/mutableSpace.hpp b/src/hotspot/share/gc/parallel/mutableSpace.hpp
index 37fa7e3710e..9d3894e2489 100644
--- a/src/hotspot/share/gc/parallel/mutableSpace.hpp
+++ b/src/hotspot/share/gc/parallel/mutableSpace.hpp
@@ -127,11 +127,6 @@ public:
virtual HeapWord* cas_allocate(size_t word_size);
// Optional deallocation. Used in NUMA-allocator.
bool cas_deallocate(HeapWord *obj, size_t size);
- // Return true if this space needs to be expanded in order to satisfy an
- // allocation request of the indicated size. Concurrent allocations and
- // resizes may change the result of a later call. Used by oldgen allocator.
- // precondition: holding PSOldGenExpand_lock if not VM thread
- bool needs_expand(size_t word_size) const;
// Iteration.
void oop_iterate(OopIterateClosure* cl);
diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp
index f5ed40c40e5..747e2f3228c 100644
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp
@@ -370,6 +370,55 @@ void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
PSParallelCompact::invoke(clear_all_soft_refs, should_do_max_compaction);
}
+bool ParallelScavengeHeap::should_attempt_young_gc() const {
+ const bool ShouldRunYoungGC = true;
+ const bool ShouldRunFullGC = false;
+
+ if (!_young_gen->to_space()->is_empty()) {
+ log_debug(gc, ergo)("To-space is not empty; run full-gc instead.");
+ return ShouldRunFullGC;
+ }
+
+ // Check if the predicted promoted bytes will overflow free space in old-gen.
+ PSAdaptiveSizePolicy* policy = _size_policy;
+
+ size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes();
+ size_t promotion_estimate = MIN2(avg_promoted, _young_gen->used_in_bytes());
+ // Total free size after possible old gen expansion
+ size_t free_in_old_gen_with_expansion = _old_gen->max_gen_size() - _old_gen->used_in_bytes();
+
+ log_trace(gc, ergo)("average_promoted %zu; padded_average_promoted %zu",
+ (size_t) policy->average_promoted_in_bytes(),
+ (size_t) policy->padded_average_promoted_in_bytes());
+
+ if (promotion_estimate >= free_in_old_gen_with_expansion) {
+ log_debug(gc, ergo)("Run full-gc; predicted promotion size >= max free space in old-gen: %zu >= %zu",
+ promotion_estimate, free_in_old_gen_with_expansion);
+ return ShouldRunFullGC;
+ }
+
+ if (UseAdaptiveSizePolicy) {
+ // Also checking OS has enough free memory to commit and expand old-gen.
+ // Otherwise, the recorded gc-pause-time might be inflated to include time
+ // of OS preparing free memory, resulting in inaccurate young-gen resizing.
+ assert(_old_gen->committed().byte_size() >= _old_gen->used_in_bytes(), "inv");
+ // Use uint64_t instead of size_t for 32bit compatibility.
+ uint64_t free_mem_in_os;
+ if (os::free_memory(free_mem_in_os)) {
+ size_t actual_free = (size_t)MIN2(_old_gen->committed().byte_size() - _old_gen->used_in_bytes() + free_mem_in_os,
+ (uint64_t)SIZE_MAX);
+ if (promotion_estimate > actual_free) {
+ log_debug(gc, ergo)("Run full-gc; predicted promotion size > free space in old-gen and OS: %zu > %zu",
+ promotion_estimate, actual_free);
+ return ShouldRunFullGC;
+ }
+ }
+ }
+
+ // No particular reasons to run full-gc, so young-gc.
+ return ShouldRunYoungGC;
+}
+
static bool check_gc_heap_free_limit(size_t free_bytes, size_t capacity_bytes) {
return (free_bytes * 100 / capacity_bytes) < GCHeapFreeLimit;
}
@@ -403,7 +452,16 @@ bool ParallelScavengeHeap::check_gc_overhead_limit() {
}
HeapWord* ParallelScavengeHeap::expand_heap_and_allocate(size_t size, bool is_tlab) {
+#ifdef ASSERT
assert(Heap_lock->is_locked(), "precondition");
+ if (is_init_completed()) {
+ assert(SafepointSynchronize::is_at_safepoint(), "precondition");
+ assert(Thread::current()->is_VM_thread(), "precondition");
+ } else {
+ assert(Thread::current()->is_Java_thread(), "precondition");
+ assert(Heap_lock->owned_by_self(), "precondition");
+ }
+#endif
HeapWord* result = young_gen()->expand_and_allocate(size);
@@ -507,17 +565,18 @@ void ParallelScavengeHeap::collect(GCCause::Cause cause) {
VMThread::execute(&op);
}
-void ParallelScavengeHeap::collect_at_safepoint(bool full) {
+void ParallelScavengeHeap::collect_at_safepoint(bool is_full) {
assert(!GCLocker::is_active(), "precondition");
bool clear_soft_refs = GCCause::should_clear_all_soft_refs(_gc_cause);
- if (!full) {
- bool success = PSScavenge::invoke(clear_soft_refs);
- if (success) {
+ if (!is_full && should_attempt_young_gc()) {
+ bool young_gc_success = PSScavenge::invoke(clear_soft_refs);
+ if (young_gc_success) {
return;
}
- // Upgrade to Full-GC if young-gc fails
+ log_debug(gc, heap)("Upgrade to Full-GC since Young-gc failed.");
}
+
const bool should_do_max_compaction = false;
PSParallelCompact::invoke(clear_soft_refs, should_do_max_compaction);
}
diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp
index f9161afc28f..0221fd2a90e 100644
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp
@@ -119,6 +119,9 @@ class ParallelScavengeHeap : public CollectedHeap {
void print_tracing_info() const override;
void stop() override {};
+ // Returns true if a young GC should be attempted, false if a full GC is preferred.
+ bool should_attempt_young_gc() const;
+
public:
ParallelScavengeHeap() :
CollectedHeap(),
diff --git a/src/hotspot/share/gc/parallel/psOldGen.cpp b/src/hotspot/share/gc/parallel/psOldGen.cpp
index 4e614c53447..974cd6aca59 100644
--- a/src/hotspot/share/gc/parallel/psOldGen.cpp
+++ b/src/hotspot/share/gc/parallel/psOldGen.cpp
@@ -33,6 +33,7 @@
#include "gc/shared/spaceDecorator.hpp"
#include "logging/log.hpp"
#include "oops/oop.inline.hpp"
+#include "runtime/init.hpp"
#include "runtime/java.hpp"
#include "utilities/align.hpp"
@@ -118,13 +119,22 @@ void PSOldGen::initialize_performance_counters() {
}
HeapWord* PSOldGen::expand_and_allocate(size_t word_size) {
+#ifdef ASSERT
assert(Heap_lock->is_locked(), "precondition");
+ if (is_init_completed()) {
+ assert(SafepointSynchronize::is_at_safepoint(), "precondition");
+ assert(Thread::current()->is_VM_thread(), "precondition");
+ } else {
+ assert(Thread::current()->is_Java_thread(), "precondition");
+ assert(Heap_lock->owned_by_self(), "precondition");
+ }
+#endif
- if (object_space()->needs_expand(word_size)) {
+ if (pointer_delta(object_space()->end(), object_space()->top()) < word_size) {
expand(word_size*HeapWordSize);
}
- // Reuse the CAS API even though this is VM thread in safepoint. This method
+ // Reuse the CAS API even though this is in a critical section. This method
// is not invoked repeatedly, so the CAS overhead should be negligible.
return cas_allocate_noexpand(word_size);
}
@@ -168,7 +178,7 @@ bool PSOldGen::expand_for_allocate(size_t word_size) {
// true until we expand, since we have the lock. Other threads may take
// the space we need before we can allocate it, regardless of whether we
// expand. That's okay, we'll just try expanding again.
- if (object_space()->needs_expand(word_size)) {
+ if (pointer_delta(object_space()->end(), object_space()->top()) < word_size) {
result = expand(word_size*HeapWordSize);
}
}
@@ -192,10 +202,21 @@ void PSOldGen::try_expand_till_size(size_t target_capacity_bytes) {
bool PSOldGen::expand(size_t bytes) {
#ifdef ASSERT
- if (!Thread::current()->is_VM_thread()) {
- assert_lock_strong(PSOldGenExpand_lock);
+ // During startup (is_init_completed() == false), expansion can occur for
+ // 1. java-threads invoking heap-allocation (using Heap_lock)
+ // 2. CDS construction by a single thread (using PSOldGenExpand_lock but not needed)
+ //
+ // After startup (is_init_completed() == true), expansion can occur for
+ // 1. GC workers for promoting to old-gen (using PSOldGenExpand_lock)
+ // 2. VM thread to satisfy the pending allocation
+ // Both cases are inside safepoint pause, but are never overlapping.
+ //
+ if (is_init_completed()) {
+ assert(SafepointSynchronize::is_at_safepoint(), "precondition");
+ assert(Thread::current()->is_VM_thread() || PSOldGenExpand_lock->owned_by_self(), "precondition");
+ } else {
+ assert(Heap_lock->owned_by_self() || PSOldGenExpand_lock->owned_by_self(), "precondition");
}
- assert_locked_or_safepoint(Heap_lock);
assert(bytes > 0, "precondition");
#endif
const size_t remaining_bytes = virtual_space()->uncommitted_size();
diff --git a/src/hotspot/share/gc/parallel/psScavenge.cpp b/src/hotspot/share/gc/parallel/psScavenge.cpp
index e738a13d464..d1d595df529 100644
--- a/src/hotspot/share/gc/parallel/psScavenge.cpp
+++ b/src/hotspot/share/gc/parallel/psScavenge.cpp
@@ -313,12 +313,6 @@ bool PSScavenge::invoke(bool clear_soft_refs) {
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
- // Check for potential problems.
- if (!should_attempt_scavenge()) {
- log_info(gc, ergo)("Young-gc might fail so skipping");
- return false;
- }
-
IsSTWGCActiveMark mark;
_gc_timer.register_gc_start();
@@ -336,8 +330,7 @@ bool PSScavenge::invoke(bool clear_soft_refs) {
PSOldGen* old_gen = heap->old_gen();
PSAdaptiveSizePolicy* size_policy = heap->size_policy();
- assert(young_gen->to_space()->is_empty(),
- "Attempt to scavenge with live objects in to_space");
+ assert(young_gen->to_space()->is_empty(), "precondition");
heap->increment_total_collections();
@@ -520,59 +513,6 @@ void PSScavenge::clean_up_failed_promotion() {
NOT_PRODUCT(ParallelScavengeHeap::heap()->reset_promotion_should_fail();)
}
-bool PSScavenge::should_attempt_scavenge() {
- const bool ShouldRunYoungGC = true;
- const bool ShouldRunFullGC = false;
-
- ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
- PSYoungGen* young_gen = heap->young_gen();
- PSOldGen* old_gen = heap->old_gen();
-
- if (!young_gen->to_space()->is_empty()) {
- log_debug(gc, ergo)("To-space is not empty; run full-gc instead.");
- return ShouldRunFullGC;
- }
-
- // Check if the predicted promoted bytes will overflow free space in old-gen.
- PSAdaptiveSizePolicy* policy = heap->size_policy();
-
- size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes();
- size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes());
- // Total free size after possible old gen expansion
- size_t free_in_old_gen_with_expansion = old_gen->max_gen_size() - old_gen->used_in_bytes();
-
- log_trace(gc, ergo)("average_promoted %zu; padded_average_promoted %zu",
- (size_t) policy->average_promoted_in_bytes(),
- (size_t) policy->padded_average_promoted_in_bytes());
-
- if (promotion_estimate >= free_in_old_gen_with_expansion) {
- log_debug(gc, ergo)("Run full-gc; predicted promotion size >= max free space in old-gen: %zu >= %zu",
- promotion_estimate, free_in_old_gen_with_expansion);
- return ShouldRunFullGC;
- }
-
- if (UseAdaptiveSizePolicy) {
- // Also checking OS has enough free memory to commit and expand old-gen.
- // Otherwise, the recorded gc-pause-time might be inflated to include time
- // of OS preparing free memory, resulting in inaccurate young-gen resizing.
- assert(old_gen->committed().byte_size() >= old_gen->used_in_bytes(), "inv");
- // Use uint64_t instead of size_t for 32bit compatibility.
- uint64_t free_mem_in_os;
- if (os::free_memory(free_mem_in_os)) {
- size_t actual_free = (size_t)MIN2(old_gen->committed().byte_size() - old_gen->used_in_bytes() + free_mem_in_os,
- (uint64_t)SIZE_MAX);
- if (promotion_estimate > actual_free) {
- log_debug(gc, ergo)("Run full-gc; predicted promotion size > free space in old-gen and OS: %zu > %zu",
- promotion_estimate, actual_free);
- return ShouldRunFullGC;
- }
- }
- }
-
- // No particular reasons to run full-gc, so young-gc.
- return ShouldRunYoungGC;
-}
-
// Adaptive size policy support.
void PSScavenge::set_young_generation_boundary(HeapWord* v) {
_young_generation_boundary = v;
diff --git a/src/hotspot/share/gc/parallel/psScavenge.hpp b/src/hotspot/share/gc/parallel/psScavenge.hpp
index c297a46a46e..af9b91f74bc 100644
--- a/src/hotspot/share/gc/parallel/psScavenge.hpp
+++ b/src/hotspot/share/gc/parallel/psScavenge.hpp
@@ -64,8 +64,6 @@ class PSScavenge: AllStatic {
static void clean_up_failed_promotion();
- static bool should_attempt_scavenge();
-
// Private accessors
static PSCardTable* card_table() { assert(_card_table != nullptr, "Sanity"); return _card_table; }
static const ParallelScavengeTracer* gc_tracer() { return &_gc_tracer; }
diff --git a/src/hotspot/share/gc/shared/bufferNode.cpp b/src/hotspot/share/gc/shared/bufferNode.cpp
index b064f9c7efe..90e50f52e84 100644
--- a/src/hotspot/share/gc/shared/bufferNode.cpp
+++ b/src/hotspot/share/gc/shared/bufferNode.cpp
@@ -22,12 +22,11 @@
*
*/
+#include "cppstdlib/new.hpp"
#include "gc/shared/bufferNode.hpp"
#include "memory/allocation.inline.hpp"
#include "utilities/debug.hpp"
-#include
-
BufferNode::AllocatorConfig::AllocatorConfig(size_t size)
: _buffer_capacity(size)
{
diff --git a/src/hotspot/share/gc/shared/oopStorage.cpp b/src/hotspot/share/gc/shared/oopStorage.cpp
index d52efc13dac..a1cc3ffa553 100644
--- a/src/hotspot/share/gc/shared/oopStorage.cpp
+++ b/src/hotspot/share/gc/shared/oopStorage.cpp
@@ -28,7 +28,7 @@
#include "logging/logStream.hpp"
#include "memory/allocation.inline.hpp"
#include "nmt/memTracker.hpp"
-#include "runtime/atomicAccess.hpp"
+#include "runtime/atomic.hpp"
#include "runtime/globals.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/interfaceSupport.inline.hpp"
@@ -122,7 +122,7 @@ OopStorage::ActiveArray::ActiveArray(size_t size) :
{}
OopStorage::ActiveArray::~ActiveArray() {
- assert(_refcount == 0, "precondition");
+ assert(_refcount.load_relaxed() == 0, "precondition");
}
OopStorage::ActiveArray* OopStorage::ActiveArray::create(size_t size,
@@ -144,32 +144,32 @@ size_t OopStorage::ActiveArray::size() const {
}
size_t OopStorage::ActiveArray::block_count() const {
- return _block_count;
+ return _block_count.load_relaxed();
}
size_t OopStorage::ActiveArray::block_count_acquire() const {
- return AtomicAccess::load_acquire(&_block_count);
+ return _block_count.load_acquire();
}
void OopStorage::ActiveArray::increment_refcount() const {
- int new_value = AtomicAccess::add(&_refcount, 1);
- assert(new_value >= 1, "negative refcount %d", new_value - 1);
+ int old_value = _refcount.fetch_then_add(1);
+ assert(old_value >= 0, "negative refcount %d", old_value);
}
bool OopStorage::ActiveArray::decrement_refcount() const {
- int new_value = AtomicAccess::sub(&_refcount, 1);
+ int new_value = _refcount.sub_then_fetch(1);
assert(new_value >= 0, "negative refcount %d", new_value);
return new_value == 0;
}
bool OopStorage::ActiveArray::push(Block* block) {
- size_t index = _block_count;
+ size_t index = _block_count.load_relaxed();
if (index < _size) {
block->set_active_index(index);
*block_ptr(index) = block;
// Use a release_store to ensure all the setup is complete before
// making the block visible.
- AtomicAccess::release_store(&_block_count, index + 1);
+ _block_count.release_store(index + 1);
return true;
} else {
return false;
@@ -177,19 +177,19 @@ bool OopStorage::ActiveArray::push(Block* block) {
}
void OopStorage::ActiveArray::remove(Block* block) {
- assert(_block_count > 0, "array is empty");
+ assert(_block_count.load_relaxed() > 0, "array is empty");
size_t index = block->active_index();
assert(*block_ptr(index) == block, "block not present");
- size_t last_index = _block_count - 1;
+ size_t last_index = _block_count.load_relaxed() - 1;
Block* last_block = *block_ptr(last_index);
last_block->set_active_index(index);
*block_ptr(index) = last_block;
- _block_count = last_index;
+ _block_count.store_relaxed(last_index);
}
void OopStorage::ActiveArray::copy_from(const ActiveArray* from) {
- assert(_block_count == 0, "array must be empty");
- size_t count = from->_block_count;
+ assert(_block_count.load_relaxed() == 0, "array must be empty");
+ size_t count = from->_block_count.load_relaxed();
assert(count <= _size, "precondition");
Block* const* from_ptr = from->block_ptr(0);
Block** to_ptr = block_ptr(0);
@@ -198,7 +198,7 @@ void OopStorage::ActiveArray::copy_from(const ActiveArray* from) {
assert(block->active_index() == i, "invariant");
*to_ptr++ = block;
}
- _block_count = count;
+ _block_count.store_relaxed(count);
}
// Blocks start with an array of BitsPerWord oop entries. That array
@@ -230,14 +230,17 @@ OopStorage::Block::Block(const OopStorage* owner, void* memory) :
assert(is_aligned(this, block_alignment), "misaligned block");
}
+#ifdef ASSERT
OopStorage::Block::~Block() {
- assert(_release_refcount == 0, "deleting block while releasing");
- assert(_deferred_updates_next == nullptr, "deleting block with deferred update");
+ assert(_release_refcount.load_relaxed() == 0, "deleting block while releasing");
+ assert(_deferred_updates_next.load_relaxed() == nullptr, "deleting block with deferred update");
// Clear fields used by block_for_ptr and entry validation, which
- // might help catch bugs. Volatile to prevent dead-store elimination.
- const_cast(_allocated_bitmask) = 0;
+ // might help catch bugs.
+ _allocated_bitmask.store_relaxed(0);
+ // Volatile to prevent dead-store elimination.
const_cast(_owner_address) = 0;
}
+#endif // ASSERT
size_t OopStorage::Block::allocation_size() {
// _data must be first member, so aligning Block aligns _data.
@@ -272,16 +275,16 @@ uintx OopStorage::Block::bitmask_for_entry(const oop* ptr) const {
bool OopStorage::Block::is_safe_to_delete() const {
assert(is_empty(), "precondition");
OrderAccess::loadload();
- return (AtomicAccess::load_acquire(&_release_refcount) == 0) &&
- (AtomicAccess::load_acquire(&_deferred_updates_next) == nullptr);
+ return ((_release_refcount.load_acquire() == 0) &&
+ (_deferred_updates_next.load_acquire() == nullptr));
}
OopStorage::Block* OopStorage::Block::deferred_updates_next() const {
- return _deferred_updates_next;
+ return _deferred_updates_next.load_relaxed();
}
void OopStorage::Block::set_deferred_updates_next(Block* block) {
- _deferred_updates_next = block;
+ _deferred_updates_next.store_relaxed(block);
}
bool OopStorage::Block::contains(const oop* ptr) const {
@@ -321,9 +324,8 @@ void OopStorage::Block::atomic_add_allocated(uintx add) {
// we can use an atomic add to implement the operation. The assert post
// facto verifies the precondition held; if there were any set bits in
// common, then after the add at least one of them will be zero.
- uintx sum = AtomicAccess::add(&_allocated_bitmask, add);
- assert((sum & add) == add, "some already present: %zu:%zu",
- sum, add);
+ uintx sum = _allocated_bitmask.add_then_fetch(add);
+ assert((sum & add) == add, "some already present: %zu:%zu", sum, add);
}
oop* OopStorage::Block::allocate() {
@@ -452,7 +454,7 @@ oop* OopStorage::allocate() {
oop* result = block->allocate();
assert(result != nullptr, "allocation failed");
assert(!block->is_empty(), "postcondition");
- AtomicAccess::inc(&_allocation_count); // release updates outside lock.
+ _allocation_count.add_then_fetch(1u); // release updates outside lock.
if (block->is_full()) {
// Transitioning from not full to full.
// Remove full blocks from consideration by future allocates.
@@ -490,7 +492,7 @@ size_t OopStorage::allocate(oop** ptrs, size_t size) {
assert(!is_empty_bitmask(taken), "invariant");
} // Drop lock, now that we've taken all available entries from block.
size_t num_taken = population_count(taken);
- AtomicAccess::add(&_allocation_count, num_taken);
+ _allocation_count.add_then_fetch(num_taken);
// Fill ptrs from those taken entries.
size_t limit = MIN2(num_taken, size);
for (size_t i = 0; i < limit; ++i) {
@@ -506,7 +508,7 @@ size_t OopStorage::allocate(oop** ptrs, size_t size) {
assert(size == limit, "invariant");
assert(num_taken == (limit + population_count(taken)), "invariant");
block->release_entries(taken, this);
- AtomicAccess::sub(&_allocation_count, num_taken - limit);
+ _allocation_count.sub_then_fetch(num_taken - limit);
}
log_trace(oopstorage, ref)("%s: bulk allocate %zu, returned %zu",
name(), limit, num_taken - limit);
@@ -527,9 +529,9 @@ bool OopStorage::try_add_block() {
if (block == nullptr) return false;
// Add new block to the _active_array, growing if needed.
- if (!_active_array->push(block)) {
+ if (!_active_array.load_relaxed()->push(block)) {
if (expand_active_array()) {
- guarantee(_active_array->push(block), "push failed after expansion");
+ guarantee(_active_array.load_relaxed()->push(block), "push failed after expansion");
} else {
log_debug(oopstorage, blocks)("%s: failed active array expand", name());
Block::delete_block(*block);
@@ -576,7 +578,7 @@ OopStorage::Block* OopStorage::block_for_allocation() {
// indicate allocation failure.
bool OopStorage::expand_active_array() {
assert_lock_strong(_allocation_mutex);
- ActiveArray* old_array = _active_array;
+ ActiveArray* old_array = _active_array.load_relaxed();
size_t new_size = 2 * old_array->size();
log_debug(oopstorage, blocks)("%s: expand active array %zu",
name(), new_size);
@@ -599,7 +601,7 @@ void OopStorage::replace_active_array(ActiveArray* new_array) {
// Update new_array refcount to account for the new reference.
new_array->increment_refcount();
// Install new_array, ensuring its initialization is complete first.
- AtomicAccess::release_store(&_active_array, new_array);
+ _active_array.release_store(new_array);
// Wait for any readers that could read the old array from _active_array.
// Can't use GlobalCounter here, because this is called from allocate(),
// which may be called in the scope of a GlobalCounter critical section
@@ -617,7 +619,7 @@ void OopStorage::replace_active_array(ActiveArray* new_array) {
// using it.
OopStorage::ActiveArray* OopStorage::obtain_active_array() const {
SingleWriterSynchronizer::CriticalSection cs(&_protect_active);
- ActiveArray* result = AtomicAccess::load_acquire(&_active_array);
+ ActiveArray* result = _active_array.load_acquire();
result->increment_refcount();
return result;
}
@@ -625,7 +627,7 @@ OopStorage::ActiveArray* OopStorage::obtain_active_array() const {
// Decrement refcount of array and destroy if refcount is zero.
void OopStorage::relinquish_block_array(ActiveArray* array) const {
if (array->decrement_refcount()) {
- assert(array != _active_array, "invariant");
+ assert(array != _active_array.load_relaxed(), "invariant");
ActiveArray::destroy(array);
}
}
@@ -672,14 +674,14 @@ static void log_release_transitions(uintx releasing,
void OopStorage::Block::release_entries(uintx releasing, OopStorage* owner) {
assert(releasing != 0, "preconditon");
// Prevent empty block deletion when transitioning to empty.
- AtomicAccess::inc(&_release_refcount);
+ _release_refcount.add_then_fetch(1u);
// Atomically update allocated bitmask.
- uintx old_allocated = _allocated_bitmask;
+ uintx old_allocated = _allocated_bitmask.load_relaxed();
while (true) {
assert((releasing & ~old_allocated) == 0, "releasing unallocated entries");
uintx new_value = old_allocated ^ releasing;
- uintx fetched = AtomicAccess::cmpxchg(&_allocated_bitmask, old_allocated, new_value);
+ uintx fetched = _allocated_bitmask.compare_exchange(old_allocated, new_value);
if (fetched == old_allocated) break; // Successful update.
old_allocated = fetched; // Retry with updated bitmask.
}
@@ -698,12 +700,12 @@ void OopStorage::Block::release_entries(uintx releasing, OopStorage* owner) {
// then someone else has made such a claim and the deferred update has not
// yet been processed and will include our change, so we don't need to do
// anything further.
- if (AtomicAccess::replace_if_null(&_deferred_updates_next, this)) {
+ if (_deferred_updates_next.compare_exchange(nullptr, this) == nullptr) {
// Successfully claimed. Push, with self-loop for end-of-list.
- Block* head = owner->_deferred_updates;
+ Block* head = owner->_deferred_updates.load_relaxed();
while (true) {
- _deferred_updates_next = (head == nullptr) ? this : head;
- Block* fetched = AtomicAccess::cmpxchg(&owner->_deferred_updates, head, this);
+ _deferred_updates_next.store_relaxed((head == nullptr) ? this : head);
+ Block* fetched = owner->_deferred_updates.compare_exchange(head, this);
if (fetched == head) break; // Successful update.
head = fetched; // Retry with updated head.
}
@@ -720,7 +722,7 @@ void OopStorage::Block::release_entries(uintx releasing, OopStorage* owner) {
}
}
// Release hold on empty block deletion.
- AtomicAccess::dec(&_release_refcount);
+ _release_refcount.sub_then_fetch(1u);
}
// Process one available deferred update. Returns true if one was processed.
@@ -729,13 +731,13 @@ bool OopStorage::reduce_deferred_updates() {
// Atomically pop a block off the list, if any available.
// No ABA issue because this is only called by one thread at a time.
// The atomicity is wrto pushes by release().
- Block* block = AtomicAccess::load_acquire(&_deferred_updates);
+ Block* block = _deferred_updates.load_acquire();
while (true) {
if (block == nullptr) return false;
// Try atomic pop of block from list.
Block* tail = block->deferred_updates_next();
if (block == tail) tail = nullptr; // Handle self-loop end marker.
- Block* fetched = AtomicAccess::cmpxchg(&_deferred_updates, block, tail);
+ Block* fetched = _deferred_updates.compare_exchange(block, tail);
if (fetched == block) break; // Update successful.
block = fetched; // Retry with updated block.
}
@@ -780,7 +782,7 @@ void OopStorage::release(const oop* ptr) {
assert(block != nullptr, "%s: invalid release " PTR_FORMAT, name(), p2i(ptr));
log_trace(oopstorage, ref)("%s: releasing " PTR_FORMAT, name(), p2i(ptr));
block->release_entries(block->bitmask_for_entry(ptr), this);
- AtomicAccess::dec(&_allocation_count);
+ _allocation_count.sub_then_fetch(1u);
}
void OopStorage::release(const oop* const* ptrs, size_t size) {
@@ -806,7 +808,7 @@ void OopStorage::release(const oop* const* ptrs, size_t size) {
}
// Release the contiguous entries that are in block.
block->release_entries(releasing, this);
- AtomicAccess::sub(&_allocation_count, count);
+ _allocation_count.sub_then_fetch(count);
}
}
@@ -837,7 +839,7 @@ OopStorage::OopStorage(const char* name, MemTag mem_tag) :
_mem_tag(mem_tag),
_needs_cleanup(false)
{
- _active_array->increment_refcount();
+ _active_array.load_relaxed()->increment_refcount();
assert(_active_mutex->rank() < _allocation_mutex->rank(),
"%s: active_mutex must have lower rank than allocation_mutex", _name);
assert(Service_lock->rank() < _active_mutex->rank(),
@@ -852,20 +854,21 @@ void OopStorage::delete_empty_block(const Block& block) {
OopStorage::~OopStorage() {
Block* block;
- while ((block = _deferred_updates) != nullptr) {
- _deferred_updates = block->deferred_updates_next();
+ while ((block = _deferred_updates.load_relaxed()) != nullptr) {
+ _deferred_updates.store_relaxed(block->deferred_updates_next());
block->set_deferred_updates_next(nullptr);
}
while ((block = _allocation_list.head()) != nullptr) {
_allocation_list.unlink(*block);
}
- bool unreferenced = _active_array->decrement_refcount();
+ ActiveArray* array = _active_array.load_relaxed();
+ bool unreferenced = array->decrement_refcount();
assert(unreferenced, "deleting storage while _active_array is referenced");
- for (size_t i = _active_array->block_count(); 0 < i; ) {
- block = _active_array->at(--i);
+ for (size_t i = array->block_count(); 0 < i; ) {
+ block = array->at(--i);
Block::delete_block(*block);
}
- ActiveArray::destroy(_active_array);
+ ActiveArray::destroy(array);
os::free(const_cast(_name));
}
@@ -894,7 +897,7 @@ bool OopStorage::should_report_num_dead() const {
// face of frequent explicit ServiceThread wakeups, hence the defer period.
// Global cleanup request state.
-static volatile bool needs_cleanup_requested = false;
+static Atomic needs_cleanup_requested{false};
// Time after which a cleanup is permitted.
static jlong cleanup_permit_time = 0;
@@ -906,12 +909,11 @@ const jlong cleanup_defer_period = 500 * NANOSECS_PER_MILLISEC;
bool OopStorage::has_cleanup_work_and_reset() {
assert_lock_strong(Service_lock);
- if (AtomicAccess::load_acquire(&needs_cleanup_requested) &&
- os::javaTimeNanos() > cleanup_permit_time) {
- cleanup_permit_time =
- os::javaTimeNanos() + cleanup_defer_period;
+ if (needs_cleanup_requested.load_acquire() &&
+ (os::javaTimeNanos() > cleanup_permit_time)) {
+ cleanup_permit_time = os::javaTimeNanos() + cleanup_defer_period;
// Set the request flag false and return its old value.
- AtomicAccess::release_store(&needs_cleanup_requested, false);
+ needs_cleanup_requested.release_store(false);
return true;
} else {
return false;
@@ -923,22 +925,22 @@ bool OopStorage::has_cleanup_work_and_reset() {
void OopStorage::record_needs_cleanup() {
// Set local flag first, else ServiceThread could wake up and miss
// the request.
- AtomicAccess::release_store(&_needs_cleanup, true);
- AtomicAccess::release_store_fence(&needs_cleanup_requested, true);
+ _needs_cleanup.release_store(true);
+ needs_cleanup_requested.release_store_fence(true);
}
bool OopStorage::delete_empty_blocks() {
// ServiceThread might have oopstorage work, but not for this object.
// But check for deferred updates, which might provide cleanup work.
- if (!AtomicAccess::load_acquire(&_needs_cleanup) &&
- (AtomicAccess::load_acquire(&_deferred_updates) == nullptr)) {
+ if (!_needs_cleanup.load_acquire() &&
+ (_deferred_updates.load_acquire() == nullptr)) {
return false;
}
MutexLocker ml(_allocation_mutex, Mutex::_no_safepoint_check_flag);
// Clear the request before processing.
- AtomicAccess::release_store_fence(&_needs_cleanup, false);
+ _needs_cleanup.release_store_fence(false);
// Other threads could be adding to the empty block count or the
// deferred update list while we're working. Set an upper bound on
@@ -977,7 +979,7 @@ bool OopStorage::delete_empty_blocks() {
// but don't re-notify, to avoid useless spinning of the
// ServiceThread. Instead, iteration completion notifies.
if (_concurrent_iteration_count > 0) return true;
- _active_array->remove(block);
+ _active_array.load_relaxed()->remove(block);
}
// Remove block from _allocation_list and delete it.
_allocation_list.unlink(*block);
@@ -1001,8 +1003,9 @@ OopStorage::EntryStatus OopStorage::allocation_status(const oop* ptr) const {
MutexLocker ml(_allocation_mutex, Mutex::_no_safepoint_check_flag);
// Block could be a false positive, so get index carefully.
size_t index = Block::active_index_safe(block);
- if ((index < _active_array->block_count()) &&
- (block == _active_array->at(index)) &&
+ ActiveArray* array = _active_array.load_relaxed();
+ if ((index < array->block_count()) &&
+ (block == array->at(index)) &&
block->contains(ptr)) {
if ((block->allocated_bitmask() & block->bitmask_for_entry(ptr)) != 0) {
return ALLOCATED_ENTRY;
@@ -1015,7 +1018,7 @@ OopStorage::EntryStatus OopStorage::allocation_status(const oop* ptr) const {
}
size_t OopStorage::allocation_count() const {
- return _allocation_count;
+ return _allocation_count.load_relaxed();
}
size_t OopStorage::block_count() const {
@@ -1084,7 +1087,7 @@ void OopStorage::BasicParState::update_concurrent_iteration_count(int value) {
bool OopStorage::BasicParState::claim_next_segment(IterationData* data) {
data->_processed += data->_segment_end - data->_segment_start;
- size_t start = AtomicAccess::load_acquire(&_next_block);
+ size_t start = _next_block.load_acquire();
if (start >= _block_count) {
return finish_iteration(data); // No more blocks available.
}
@@ -1097,11 +1100,11 @@ bool OopStorage::BasicParState::claim_next_segment(IterationData* data) {
size_t max_step = 10;
size_t remaining = _block_count - start;
size_t step = MIN2(max_step, 1 + (remaining / _estimated_thread_count));
- // AtomicAccess::add with possible overshoot. This can perform better
+ // Atomic add with possible overshoot. This can perform better
// than a CAS loop on some platforms when there is contention.
// We can cope with the uncertainty by recomputing start/end from
// the result of the add, and dealing with potential overshoot.
- size_t end = AtomicAccess::add(&_next_block, step);
+ size_t end = _next_block.add_then_fetch(step);
// _next_block may have changed, so recompute start from result of add.
start = end - step;
// _next_block may have changed so much that end has overshot.
@@ -1128,15 +1131,15 @@ bool OopStorage::BasicParState::finish_iteration(const IterationData* data) cons
}
size_t OopStorage::BasicParState::num_dead() const {
- return AtomicAccess::load(&_num_dead);
+ return _num_dead.load_relaxed();
}
void OopStorage::BasicParState::increment_num_dead(size_t num_dead) {
- AtomicAccess::add(&_num_dead, num_dead);
+ _num_dead.add_then_fetch(num_dead);
}
void OopStorage::BasicParState::report_num_dead() const {
- _storage->report_num_dead(AtomicAccess::load(&_num_dead));
+ _storage->report_num_dead(_num_dead.load_relaxed());
}
const char* OopStorage::name() const { return _name; }
@@ -1164,8 +1167,8 @@ bool OopStorage::Block::print_containing(const oop* addr, outputStream* st) {
#ifndef PRODUCT
void OopStorage::print_on(outputStream* st) const {
- size_t allocations = _allocation_count;
- size_t blocks = _active_array->block_count();
+ size_t allocations = _allocation_count.load_relaxed();
+ size_t blocks = _active_array.load_relaxed()->block_count();
double data_size = section_size * section_count;
double alloc_percentage = percent_of((double)allocations, blocks * data_size);
diff --git a/src/hotspot/share/gc/shared/oopStorage.hpp b/src/hotspot/share/gc/shared/oopStorage.hpp
index 34c980a0586..6097eeaa4f4 100644
--- a/src/hotspot/share/gc/shared/oopStorage.hpp
+++ b/src/hotspot/share/gc/shared/oopStorage.hpp
@@ -27,6 +27,7 @@
#include "memory/allocation.hpp"
#include "oops/oop.hpp"
+#include "runtime/atomic.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
#include "utilities/singleWriterSynchronizer.hpp"
@@ -258,15 +259,15 @@ private:
private:
const char* _name;
- ActiveArray* _active_array;
+ Atomic _active_array;
AllocationList _allocation_list;
- Block* volatile _deferred_updates;
+ Atomic _deferred_updates;
Mutex* _allocation_mutex;
Mutex* _active_mutex;
NumDeadCallback _num_dead_callback;
- // Volatile for racy unlocked accesses.
- volatile size_t _allocation_count;
+ // Atomic for racy unlocked accesses.
+ Atomic _allocation_count;
// Protection for _active_array.
mutable SingleWriterSynchronizer _protect_active;
@@ -278,7 +279,7 @@ private:
MemTag _mem_tag;
// Flag indicating this storage object is a candidate for empty block deletion.
- volatile bool _needs_cleanup;
+ Atomic _needs_cleanup;
// Clients construct via "create" factory function.
OopStorage(const char* name, MemTag mem_tag);
diff --git a/src/hotspot/share/gc/shared/oopStorage.inline.hpp b/src/hotspot/share/gc/shared/oopStorage.inline.hpp
index 4fb1d8fcaf1..c2747781a6b 100644
--- a/src/hotspot/share/gc/shared/oopStorage.inline.hpp
+++ b/src/hotspot/share/gc/shared/oopStorage.inline.hpp
@@ -30,6 +30,7 @@
#include "cppstdlib/type_traits.hpp"
#include "memory/allocation.hpp"
#include "oops/oop.hpp"
+#include "runtime/atomic.hpp"
#include "runtime/safepoint.hpp"
#include "utilities/align.hpp"
#include "utilities/count_trailing_zeros.hpp"
@@ -42,8 +43,8 @@ class OopStorage::ActiveArray {
friend class OopStorage::TestAccess;
size_t _size;
- volatile size_t _block_count;
- mutable volatile int _refcount;
+ Atomic _block_count;
+ mutable Atomic _refcount;
// Block* _blocks[1]; // Pseudo flexible array member.
ActiveArray(size_t size);
@@ -104,7 +105,7 @@ inline OopStorage::Block** OopStorage::ActiveArray::block_ptr(size_t index) {
}
inline OopStorage::Block* OopStorage::ActiveArray::at(size_t index) const {
- assert(index < _block_count, "precondition");
+ assert(index < _block_count.load_relaxed(), "precondition");
return *block_ptr(index);
}
@@ -135,16 +136,16 @@ class OopStorage::Block /* No base class, to avoid messing up alignment. */ {
oop _data[BitsPerWord];
static const unsigned _data_pos = 0; // Position of _data.
- volatile uintx _allocated_bitmask; // One bit per _data element.
+ Atomic _allocated_bitmask; // One bit per _data element.
intptr_t _owner_address;
void* _memory; // Unaligned storage containing block.
size_t _active_index;
AllocationListEntry _allocation_list_entry;
- Block* volatile _deferred_updates_next;
- volatile uintx _release_refcount;
+ Atomic _deferred_updates_next;
+ Atomic _release_refcount;
Block(const OopStorage* owner, void* memory);
- ~Block();
+ ~Block() NOT_DEBUG(= default);
void check_index(unsigned index) const;
unsigned get_index(const oop* ptr) const;
@@ -322,7 +323,7 @@ inline const oop* OopStorage::Block::get_pointer(unsigned index) const {
}
inline uintx OopStorage::Block::allocated_bitmask() const {
- return _allocated_bitmask;
+ return _allocated_bitmask.load_relaxed();
}
inline uintx OopStorage::Block::bitmask_for_index(unsigned index) const {
@@ -366,7 +367,7 @@ inline bool OopStorage::iterate_impl(F f, Storage* storage) {
// Propagate const/non-const iteration to the block layer, by using
// const or non-const blocks as corresponding to Storage.
using BlockPtr = std::conditional_t::value, const Block*, Block*>;
- ActiveArray* blocks = storage->_active_array;
+ ActiveArray* blocks = storage->_active_array.load_relaxed();
size_t limit = blocks->block_count();
for (size_t i = 0; i < limit; ++i) {
BlockPtr block = blocks->at(i);
diff --git a/src/hotspot/share/gc/shared/oopStorageParState.hpp b/src/hotspot/share/gc/shared/oopStorageParState.hpp
index 046bf9de8c2..cad1a1f0cf6 100644
--- a/src/hotspot/share/gc/shared/oopStorageParState.hpp
+++ b/src/hotspot/share/gc/shared/oopStorageParState.hpp
@@ -27,6 +27,7 @@
#include "cppstdlib/type_traits.hpp"
#include "gc/shared/oopStorage.hpp"
+#include "runtime/atomic.hpp"
#include "utilities/globalDefinitions.hpp"
//////////////////////////////////////////////////////////////////////////////
@@ -131,10 +132,10 @@ class OopStorage::BasicParState {
const OopStorage* _storage;
ActiveArray* _active_array;
size_t _block_count;
- volatile size_t _next_block;
+ Atomic _next_block;
uint _estimated_thread_count;
bool _concurrent;
- volatile size_t _num_dead;
+ Atomic _num_dead;
NONCOPYABLE(BasicParState);
diff --git a/src/hotspot/share/gc/shared/partialArrayState.cpp b/src/hotspot/share/gc/shared/partialArrayState.cpp
index f913f3db4ba..39c1fe4fc78 100644
--- a/src/hotspot/share/gc/shared/partialArrayState.cpp
+++ b/src/hotspot/share/gc/shared/partialArrayState.cpp
@@ -22,6 +22,7 @@
*
*/
+#include "cppstdlib/new.hpp"
#include "gc/shared/partialArrayState.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/arena.hpp"
@@ -33,8 +34,6 @@
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
-#include
-
PartialArrayState::PartialArrayState(oop src, oop dst,
size_t index, size_t length,
size_t initial_refcount)
diff --git a/src/hotspot/share/gc/shared/stringdedup/stringDedupProcessor.cpp b/src/hotspot/share/gc/shared/stringdedup/stringDedupProcessor.cpp
index 8bf6f4e539a..b8a27f31d01 100644
--- a/src/hotspot/share/gc/shared/stringdedup/stringDedupProcessor.cpp
+++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupProcessor.cpp
@@ -198,10 +198,8 @@ void StringDedup::Processor::run(JavaThread* thread) {
void StringDedup::Processor::log_statistics() {
_total_stat.add(&_cur_stat);
Stat::log_summary(&_cur_stat, &_total_stat);
- if (log_is_enabled(Debug, stringdedup)) {
- _cur_stat.log_statistics(false);
- _total_stat.log_statistics(true);
- Table::log_statistics();
- }
+ _cur_stat.emit_statistics(false /* total */);
+ _total_stat.emit_statistics(true /* total */);
+ Table::log_statistics();
_cur_stat = Stat{};
}
diff --git a/src/hotspot/share/gc/shared/stringdedup/stringDedupStat.cpp b/src/hotspot/share/gc/shared/stringdedup/stringDedupStat.cpp
index 28e5e9adf20..245a0ab20e9 100644
--- a/src/hotspot/share/gc/shared/stringdedup/stringDedupStat.cpp
+++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupStat.cpp
@@ -23,6 +23,7 @@
*/
#include "gc/shared/stringdedup/stringDedupStat.hpp"
+#include "jfr/jfrEvents.hpp"
#include "logging/log.hpp"
#include "utilities/globalDefinitions.hpp"
@@ -91,13 +92,6 @@ static double strdedup_elapsed_param_ms(Tickspan t) {
}
void StringDedup::Stat::log_summary(const Stat* last_stat, const Stat* total_stat) {
- double total_deduped_bytes_percent = 0.0;
-
- if (total_stat->_new_bytes > 0) {
- // Avoid division by zero
- total_deduped_bytes_percent = percent_of(total_stat->_deduped_bytes, total_stat->_new_bytes);
- }
-
log_info(stringdedup)(
"Concurrent String Deduplication "
"%zu/" STRDEDUP_BYTES_FORMAT_NS " (new), "
@@ -106,7 +100,7 @@ void StringDedup::Stat::log_summary(const Stat* last_stat, const Stat* total_sta
STRDEDUP_ELAPSED_FORMAT_MS " of " STRDEDUP_ELAPSED_FORMAT_MS,
last_stat->_new, STRDEDUP_BYTES_PARAM(last_stat->_new_bytes),
last_stat->_deduped, STRDEDUP_BYTES_PARAM(last_stat->_deduped_bytes),
- total_deduped_bytes_percent,
+ percent_of(total_stat->_deduped_bytes, total_stat->_new_bytes),
strdedup_elapsed_param_ms(last_stat->_process_elapsed),
strdedup_elapsed_param_ms(last_stat->_active_elapsed));
}
@@ -208,7 +202,7 @@ void StringDedup::Stat::log_times(const char* prefix) const {
}
}
-void StringDedup::Stat::log_statistics(bool total) const {
+void StringDedup::Stat::log_statistics() const {
double known_percent = percent_of(_known, _inspected);
double known_shared_percent = percent_of(_known_shared, _inspected);
double new_percent = percent_of(_new, _inspected);
@@ -216,7 +210,6 @@ void StringDedup::Stat::log_statistics(bool total) const {
double deduped_bytes_percent = percent_of(_deduped_bytes, _new_bytes);
double replaced_percent = percent_of(_replaced, _new);
double deleted_percent = percent_of(_deleted, _new);
- log_times(total ? "Total" : "Last");
log_debug(stringdedup)(" Inspected: %12zu", _inspected);
log_debug(stringdedup)(" Known: %12zu(%5.1f%%)", _known, known_percent);
log_debug(stringdedup)(" Shared: %12zu(%5.1f%%)", _known_shared, known_shared_percent);
@@ -229,3 +222,40 @@ void StringDedup::Stat::log_statistics(bool total) const {
log_debug(stringdedup)(" Skipped: %zu (dead), %zu (incomplete), %zu (shared)",
_skipped_dead, _skipped_incomplete, _skipped_shared);
}
+
+void StringDedup::Stat::emit_statistics(bool total) const {
+ if (log_is_enabled(Debug, stringdedup)) {
+ log_times(total ? "Total" : "Last");
+ log_statistics();
+ }
+
+ if (total) {
+ // Send only JFR events about the last stats
+ return;
+ }
+
+ EventStringDeduplication e;
+ if (e.should_commit()) {
+ e.set_starttime(_active_start);
+ Ticks active_end = _active_start;
+ active_end += _active_elapsed;
+ e.set_endtime(active_end);
+
+ e.set_inspected(_inspected);
+ e.set_known(_known);
+ e.set_shared(_known_shared);
+ e.set_newStrings(_new);
+ e.set_newSize(_new_bytes);
+ e.set_replaced(_replaced);
+ e.set_deleted(_deleted);
+ e.set_deduplicated(_deduped);
+ e.set_deduplicatedSize(_deduped_bytes);
+ e.set_skippedDead(_skipped_dead);
+ e.set_skippedIncomplete(_skipped_incomplete);
+ e.set_skippedShared(_skipped_shared);
+ e.set_processing(_process_elapsed);
+ e.set_tableResize(_resize_table_elapsed);
+ e.set_tableCleanup(_cleanup_table_elapsed);
+ e.commit();
+ }
+}
diff --git a/src/hotspot/share/gc/shared/stringdedup/stringDedupStat.hpp b/src/hotspot/share/gc/shared/stringdedup/stringDedupStat.hpp
index db753af3be5..fb864ab34ab 100644
--- a/src/hotspot/share/gc/shared/stringdedup/stringDedupStat.hpp
+++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupStat.hpp
@@ -71,6 +71,7 @@ private:
void report_phase_end(const char* phase, Tickspan* elapsed);
void log_times(const char* prefix) const;
+ void log_statistics() const;
public:
Stat();
@@ -148,7 +149,7 @@ public:
void report_active_end();
void add(const Stat* const stat);
- void log_statistics(bool total) const;
+ void emit_statistics(bool total) const;
static void log_summary(const Stat* last_stat, const Stat* total_stat);
};
diff --git a/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp b/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp
index 6682993766d..a376f3b96de 100644
--- a/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp
+++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp
@@ -730,6 +730,10 @@ void StringDedup::Table::verify() {
}
void StringDedup::Table::log_statistics() {
+ if (!log_is_enabled(Debug, stringdedup)) {
+ return;
+ }
+
size_t dead_count;
int dead_state;
{
diff --git a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp
index 9635ed4d0cb..2181e089663 100644
--- a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp
+++ b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp
@@ -37,7 +37,6 @@
#include "utilities/copy.hpp"
size_t ThreadLocalAllocBuffer::_max_size = 0;
-int ThreadLocalAllocBuffer::_reserve_for_allocation_prefetch = 0;
unsigned int ThreadLocalAllocBuffer::_target_refills = 0;
ThreadLocalAllocBuffer::ThreadLocalAllocBuffer() :
@@ -225,30 +224,6 @@ void ThreadLocalAllocBuffer::startup_initialization() {
// abort during VM initialization.
_target_refills = MAX2(_target_refills, 2U);
-#ifdef COMPILER2
- // If the C2 compiler is present, extra space is needed at the end of
- // TLABs, otherwise prefetching instructions generated by the C2
- // compiler will fault (due to accessing memory outside of heap).
- // The amount of space is the max of the number of lines to
- // prefetch for array and for instance allocations. (Extra space must be
- // reserved to accommodate both types of allocations.)
- //
- // Only SPARC-specific BIS instructions are known to fault. (Those
- // instructions are generated if AllocatePrefetchStyle==3 and
- // AllocatePrefetchInstr==1). To be on the safe side, however,
- // extra space is reserved for all combinations of
- // AllocatePrefetchStyle and AllocatePrefetchInstr.
- //
- // If the C2 compiler is not present, no space is reserved.
-
- // +1 for rounding up to next cache line, +1 to be safe
- if (CompilerConfig::is_c2_or_jvmci_compiler_enabled()) {
- int lines = MAX2(AllocatePrefetchLines, AllocateInstancePrefetchLines) + 2;
- _reserve_for_allocation_prefetch = (AllocatePrefetchDistance + AllocatePrefetchStepSize * lines) /
- (int)HeapWordSize;
- }
-#endif
-
// During jvm startup, the main thread is initialized
// before the heap is initialized. So reinitialize it now.
guarantee(Thread::current()->is_Java_thread(), "tlab initialization thread not Java thread");
@@ -454,8 +429,7 @@ void ThreadLocalAllocStats::publish() {
}
size_t ThreadLocalAllocBuffer::end_reserve() {
- size_t reserve_size = CollectedHeap::lab_alignment_reserve();
- return MAX2(reserve_size, (size_t)_reserve_for_allocation_prefetch);
+ return CollectedHeap::lab_alignment_reserve();
}
const HeapWord* ThreadLocalAllocBuffer::start_relaxed() const {
diff --git a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp
index 59979646395..b64fa8d6ad1 100644
--- a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp
+++ b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp
@@ -58,7 +58,6 @@ private:
size_t _allocated_before_last_gc; // total bytes allocated up until the last gc
static size_t _max_size; // maximum size of any TLAB
- static int _reserve_for_allocation_prefetch; // Reserve at the end of the TLAB
static unsigned _target_refills; // expected number of refills between GCs
unsigned _number_of_refills;
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp
index 5cc9eab7b4b..a9c5ebe49de 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp
@@ -106,9 +106,6 @@ private:
size_t _used[UIntNumPartitions];
size_t _available[UIntNumPartitions];
- // Measured in bytes.
- size_t _allocated_since_gc_start[UIntNumPartitions];
-
// Some notes:
// total_region_counts[p] is _capacity[p] / region_size_bytes
// retired_regions[p] is total_region_counts[p] - _region_counts[p]
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp
index 3a99023eca4..34713898fc6 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp
@@ -378,24 +378,20 @@ HeapWord* ShenandoahCardCluster::first_object_start(const size_t card_index, con
// evacuation phase) of young collections. This is never called
// during global collections during marking or update refs..
// 4. Every allocation under TAMS updates the object start array.
+#ifdef ASSERT
oop obj = cast_to_oop(p);
assert(oopDesc::is_oop(obj), "Should be an object");
-#ifdef ASSERT
-#define WALK_FORWARD_IN_BLOCK_START true
-#else
-#define WALK_FORWARD_IN_BLOCK_START false
-#endif // ASSERT
- while (WALK_FORWARD_IN_BLOCK_START && p + obj->size() < left) {
+ while (p + obj->size() < left) {
p += obj->size();
obj = cast_to_oop(p);
assert(oopDesc::is_oop(obj), "Should be an object");
assert(Klass::is_valid(obj->klass()), "Not a valid klass ptr");
// Check assumptions in previous block comment if this assert fires
- guarantee(false, "Should never need forward walk in block start");
+ fatal("Should never need forward walk in block start");
}
-#undef WALK_FORWARD_IN_BLOCK_START
assert(p <= left, "p should start at or before left end of card");
assert(p + obj->size() > left, "obj should end after left end of card");
+#endif // ASSERT
return p;
}
diff --git a/src/hotspot/share/gc/z/zDeferredConstructed.inline.hpp b/src/hotspot/share/gc/z/zDeferredConstructed.inline.hpp
index d6d35ecddcd..f686bc78d15 100644
--- a/src/hotspot/share/gc/z/zDeferredConstructed.inline.hpp
+++ b/src/hotspot/share/gc/z/zDeferredConstructed.inline.hpp
@@ -27,10 +27,9 @@
#include "gc/z/zDeferredConstructed.hpp"
+#include "cppstdlib/new.hpp"
#include "cppstdlib/type_traits.hpp"
-#include
-
template
inline ZDeferredConstructed::ZDeferredConstructed()
DEBUG_ONLY(: _initialized(false)) {
diff --git a/src/hotspot/share/include/jmm.h b/src/hotspot/share/include/jmm.h
index ba7ed3bbca5..ee1462fe5a8 100644
--- a/src/hotspot/share/include/jmm.h
+++ b/src/hotspot/share/include/jmm.h
@@ -53,7 +53,8 @@ enum {
JMM_VERSION_2 = 0x20020000, // JDK 10
JMM_VERSION_3 = 0x20030000, // JDK 14
JMM_VERSION_4 = 0x20040000, // JDK 21
- JMM_VERSION = JMM_VERSION_4
+ JMM_VERSION_5 = 0x20050000, // JDK 26
+ JMM_VERSION = JMM_VERSION_5
};
typedef struct {
@@ -81,6 +82,7 @@ typedef enum {
JMM_GC_TIME_MS = 9, /* Total accumulated time spent in collection */
JMM_GC_COUNT = 10, /* Total number of collections */
JMM_JVM_UPTIME_MS = 11, /* The JVM uptime in milliseconds */
+ JMM_TOTAL_GC_CPU_TIME = 12, /* Total accumulated GC CPU time */
JMM_INTERNAL_ATTRIBUTE_INDEX = 100,
JMM_CLASS_LOADED_BYTES = 101, /* Number of bytes loaded instance classes */
diff --git a/src/hotspot/share/jfr/jni/jfrJniMethod.cpp b/src/hotspot/share/jfr/jni/jfrJniMethod.cpp
index cc5bbe1fc60..6a1146587bc 100644
--- a/src/hotspot/share/jfr/jni/jfrJniMethod.cpp
+++ b/src/hotspot/share/jfr/jni/jfrJniMethod.cpp
@@ -421,7 +421,9 @@ JVM_END
JVM_ENTRY_NO_ENV(jlong, jfr_host_total_swap_memory(JNIEnv* env, jclass jvm))
#ifdef LINUX
// We want the host swap memory, not the container value.
- return os::Linux::host_swap();
+ physical_memory_size_type host_swap = 0;
+ (void)os::Linux::host_swap(host_swap); // Discard return value and treat as no swap
+ return static_cast(host_swap);
#else
physical_memory_size_type total_swap_space = 0;
// Return value ignored - defaulting to 0 on failure.
diff --git a/src/hotspot/share/jfr/metadata/metadata.xml b/src/hotspot/share/jfr/metadata/metadata.xml
index 6d43123ae87..eaafef37306 100644
--- a/src/hotspot/share/jfr/metadata/metadata.xml
+++ b/src/hotspot/share/jfr/metadata/metadata.xml
@@ -1283,7 +1283,25 @@
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/hotspot/share/memory/allocation.hpp b/src/hotspot/share/memory/allocation.hpp
index 35180fdba5e..963ca04aadf 100644
--- a/src/hotspot/share/memory/allocation.hpp
+++ b/src/hotspot/share/memory/allocation.hpp
@@ -25,14 +25,13 @@
#ifndef SHARE_MEMORY_ALLOCATION_HPP
#define SHARE_MEMORY_ALLOCATION_HPP
+#include "cppstdlib/new.hpp"
#include "memory/allStatic.hpp"
#include "nmt/memTag.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
-#include
-
class outputStream;
class Thread;
class JavaThread;
diff --git a/src/hotspot/share/memory/arena.cpp b/src/hotspot/share/memory/arena.cpp
index b9968083e0e..2de3f837c00 100644
--- a/src/hotspot/share/memory/arena.cpp
+++ b/src/hotspot/share/memory/arena.cpp
@@ -24,6 +24,7 @@
*/
#include "compiler/compilationMemoryStatistic.hpp"
+#include "cppstdlib/new.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/arena.hpp"
#include "memory/resourceArea.hpp"
diff --git a/src/hotspot/share/memory/arena.hpp b/src/hotspot/share/memory/arena.hpp
index b4a0546babf..a8450b5543a 100644
--- a/src/hotspot/share/memory/arena.hpp
+++ b/src/hotspot/share/memory/arena.hpp
@@ -31,8 +31,6 @@
#include "utilities/globalDefinitions.hpp"
#include "utilities/powerOfTwo.hpp"
-#include
-
// The byte alignment to be used by Arena::Amalloc.
#define ARENA_AMALLOC_ALIGNMENT BytesPerLong
#define ARENA_ALIGN(x) (align_up((x), ARENA_AMALLOC_ALIGNMENT))
diff --git a/src/hotspot/share/memory/universe.cpp b/src/hotspot/share/memory/universe.cpp
index db0435044ca..d389fe81806 100644
--- a/src/hotspot/share/memory/universe.cpp
+++ b/src/hotspot/share/memory/universe.cpp
@@ -557,32 +557,32 @@ void Universe::genesis(TRAPS) {
void Universe::initialize_basic_type_mirrors(TRAPS) {
#if INCLUDE_CDS_JAVA_HEAP
- if (CDSConfig::is_using_archive() &&
- HeapShared::is_archived_heap_in_use() &&
- _basic_type_mirrors[T_INT].resolve() != nullptr) {
- // check that all basic type mirrors are mapped also
- for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
- if (!is_reference_type((BasicType)i)) {
- oop m = _basic_type_mirrors[i].resolve();
- assert(m != nullptr, "archived mirrors should not be null");
- }
+ if (CDSConfig::is_using_archive() &&
+ HeapShared::is_archived_heap_in_use() &&
+ _basic_type_mirrors[T_INT].resolve() != nullptr) {
+ // check that all basic type mirrors are mapped also
+ for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
+ if (!is_reference_type((BasicType)i)) {
+ oop m = _basic_type_mirrors[i].resolve();
+ assert(m != nullptr, "archived mirrors should not be null");
}
- } else
- // _basic_type_mirrors[T_INT], etc, are null if not using an archived heap
+ }
+ } else
+ // _basic_type_mirrors[T_INT], etc, are null if not using an archived heap
#endif
- {
- for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
- BasicType bt = (BasicType)i;
- if (!is_reference_type(bt)) {
- oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);
- _basic_type_mirrors[i] = OopHandle(vm_global(), m);
- }
- CDS_JAVA_HEAP_ONLY(_archived_basic_type_mirror_indices[i] = -1);
+ {
+ for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
+ BasicType bt = (BasicType)i;
+ if (!is_reference_type(bt)) {
+ oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);
+ _basic_type_mirrors[i] = OopHandle(vm_global(), m);
}
+ CDS_JAVA_HEAP_ONLY(_archived_basic_type_mirror_indices[i] = -1);
}
- if (CDSConfig::is_dumping_heap()) {
- HeapShared::init_scratch_objects_for_basic_type_mirrors(CHECK);
- }
+ }
+ if (CDSConfig::is_dumping_heap()) {
+ HeapShared::init_scratch_objects_for_basic_type_mirrors(CHECK);
+ }
}
void Universe::fixup_mirrors(TRAPS) {
diff --git a/src/hotspot/share/nmt/mallocTracker.cpp b/src/hotspot/share/nmt/mallocTracker.cpp
index a61a27db25d..2cf5034c0bf 100644
--- a/src/hotspot/share/nmt/mallocTracker.cpp
+++ b/src/hotspot/share/nmt/mallocTracker.cpp
@@ -45,7 +45,7 @@
#include "utilities/ostream.hpp"
#include "utilities/vmError.hpp"
-MallocMemorySnapshot MallocMemorySummary::_snapshot;
+DeferredStatic MallocMemorySummary::_snapshot;
void MemoryCounter::update_peak(size_t size, size_t cnt) {
size_t peak_sz = peak_size();
@@ -101,7 +101,7 @@ void MallocMemorySnapshot::make_adjustment() {
}
void MallocMemorySummary::initialize() {
- // Uses placement new operator to initialize static area.
+ _snapshot.initialize();
MallocLimitHandler::initialize(MallocLimit);
}
diff --git a/src/hotspot/share/nmt/mallocTracker.hpp b/src/hotspot/share/nmt/mallocTracker.hpp
index 0ead41f2411..fc03faf7212 100644
--- a/src/hotspot/share/nmt/mallocTracker.hpp
+++ b/src/hotspot/share/nmt/mallocTracker.hpp
@@ -30,6 +30,7 @@
#include "nmt/memTag.hpp"
#include "nmt/nmtCommon.hpp"
#include "runtime/atomicAccess.hpp"
+#include "utilities/deferredStatic.hpp"
#include "utilities/nativeCallStack.hpp"
class outputStream;
@@ -204,7 +205,7 @@ class MallocMemorySnapshot {
class MallocMemorySummary : AllStatic {
private:
// Reserve memory for placement of MallocMemorySnapshot object
- static MallocMemorySnapshot _snapshot;
+ static DeferredStatic _snapshot;
static bool _have_limits;
// Called when a total limit break was detected.
@@ -251,7 +252,7 @@ class MallocMemorySummary : AllStatic {
}
static MallocMemorySnapshot* as_snapshot() {
- return &_snapshot;
+ return _snapshot.get();
}
// MallocLimit: returns true if allocating s bytes on f would trigger
diff --git a/src/hotspot/share/nmt/memMapPrinter.cpp b/src/hotspot/share/nmt/memMapPrinter.cpp
index 7e82de23dd8..9a2fe166d3d 100644
--- a/src/hotspot/share/nmt/memMapPrinter.cpp
+++ b/src/hotspot/share/nmt/memMapPrinter.cpp
@@ -32,7 +32,6 @@
#include "memory/universe.hpp"
#include "nmt/memMapPrinter.hpp"
#include "nmt/memTag.hpp"
-#include "nmt/memTagBitmap.hpp"
#include "nmt/memTracker.hpp"
#include "nmt/virtualMemoryTracker.hpp"
#include "runtime/nonJavaThread.hpp"
@@ -40,6 +39,8 @@
#include "runtime/thread.hpp"
#include "runtime/threadSMR.hpp"
#include "runtime/vmThread.hpp"
+#include "utilities/bitMap.hpp"
+#include "utilities/bitMap.inline.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/ostream.hpp"
#include "utilities/permitForbiddenFunctions.hpp"
@@ -128,8 +129,8 @@ public:
}
// Given a vma [from, to), find all regions that intersect with this vma and
- // return their collective flags.
- MemTagBitmap lookup(const void* from, const void* to) const {
+ // fill out their collective flags into bm.
+ void lookup(const void* from, const void* to, ResourceBitMap& bm) const {
assert(from <= to, "Sanity");
// We optimize for sequential lookups. Since this class is used when a list
// of OS mappings is scanned (VirtualQuery, /proc/pid/maps), and these lists
@@ -138,16 +139,14 @@ public:
// the range is to the right of the given section, we need to re-start the search
_last = 0;
}
- MemTagBitmap bm;
for(uintx i = _last; i < _count; i++) {
if (range_intersects(from, to, _ranges[i].from, _ranges[i].to)) {
- bm.set_tag(_mem_tags[i]);
+ bm.set_bit((BitMap::idx_t)_mem_tags[i]);
} else if (to <= _ranges[i].from) {
_last = i;
break;
}
}
- return bm;
}
bool do_allocation_site(const ReservedMemoryRegion* rgn) override {
@@ -247,11 +246,13 @@ bool MappingPrintSession::print_nmt_info_for_region(const void* vma_from, const
// print NMT information, if available
if (MemTracker::enabled()) {
// Correlate vma region (from, to) with NMT region(s) we collected previously.
- const MemTagBitmap flags = _nmt_info.lookup(vma_from, vma_to);
- if (flags.has_any()) {
+ ResourceMark rm;
+ ResourceBitMap flags(mt_number_of_tags);
+ _nmt_info.lookup(vma_from, vma_to, flags);
+ if (!flags.is_empty()) {
for (int i = 0; i < mt_number_of_tags; i++) {
const MemTag mem_tag = (MemTag)i;
- if (flags.has_tag(mem_tag)) {
+ if (flags.at((BitMap::idx_t)mem_tag)) {
if (num_printed > 0) {
_out->put(',');
}
diff --git a/src/hotspot/share/nmt/memTagBitmap.hpp b/src/hotspot/share/nmt/memTagBitmap.hpp
deleted file mode 100644
index f65dce60fa6..00000000000
--- a/src/hotspot/share/nmt/memTagBitmap.hpp
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 2023, 2024, Red Hat, Inc. All rights reserved.
- * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_NMT_MEMTAGBITMAP_HPP
-#define SHARE_NMT_MEMTAGBITMAP_HPP
-
-#include "nmt/memTag.hpp"
-#include "utilities/debug.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-class MemTagBitmap {
- uint32_t _v;
- STATIC_ASSERT(sizeof(_v) * BitsPerByte >= mt_number_of_tags);
-
-public:
- MemTagBitmap(uint32_t v = 0) : _v(v) {}
- MemTagBitmap(const MemTagBitmap& o) : _v(o._v) {}
-
- uint32_t raw_value() const { return _v; }
-
- void set_tag(MemTag mem_tag) {
- const int bitno = (int)mem_tag;
- _v |= nth_bit(bitno);
- }
-
- bool has_tag(MemTag mem_tag) const {
- const int bitno = (int)mem_tag;
- return _v & nth_bit(bitno);
- }
-
- bool has_any() const { return _v > 0; }
-};
-
-#endif // SHARE_NMT_MEMTAGBITMAP_HPP
diff --git a/src/hotspot/share/oops/instanceKlass.cpp b/src/hotspot/share/oops/instanceKlass.cpp
index 2d03b69ee92..24358f662bc 100644
--- a/src/hotspot/share/oops/instanceKlass.cpp
+++ b/src/hotspot/share/oops/instanceKlass.cpp
@@ -2870,7 +2870,7 @@ void InstanceKlass::restore_unshareable_info(ClassLoaderData* loader_data, Handl
}
bool InstanceKlass::can_be_verified_at_dumptime() const {
- if (AOTMetaspace::in_aot_cache(this)) {
+ if (CDSConfig::is_dumping_dynamic_archive() && AOTMetaspace::in_aot_cache(this)) {
// This is a class that was dumped into the base archive, so we know
// it was verified at dump time.
return true;
diff --git a/src/hotspot/share/oops/resolvedFieldEntry.cpp b/src/hotspot/share/oops/resolvedFieldEntry.cpp
index dd0a81ce0f3..83f1a6919a6 100644
--- a/src/hotspot/share/oops/resolvedFieldEntry.cpp
+++ b/src/hotspot/share/oops/resolvedFieldEntry.cpp
@@ -23,8 +23,17 @@
*/
#include "cds/archiveBuilder.hpp"
+#include "cppstdlib/type_traits.hpp"
#include "oops/resolvedFieldEntry.hpp"
+static_assert(std::is_trivially_copyable_v);
+
+// Detect inadvertently introduced trailing padding.
+class ResolvedFieldEntryWithExtra : public ResolvedFieldEntry {
+ u1 _extra_field;
+};
+static_assert(sizeof(ResolvedFieldEntryWithExtra) > sizeof(ResolvedFieldEntry));
+
void ResolvedFieldEntry::print_on(outputStream* st) const {
st->print_cr("Field Entry:");
@@ -45,9 +54,7 @@ void ResolvedFieldEntry::print_on(outputStream* st) const {
#if INCLUDE_CDS
void ResolvedFieldEntry::remove_unshareable_info() {
- u2 saved_cpool_index = _cpool_index;
- memset(this, 0, sizeof(*this));
- _cpool_index = saved_cpool_index;
+ *this = ResolvedFieldEntry(_cpool_index);
}
void ResolvedFieldEntry::mark_and_relocate() {
diff --git a/src/hotspot/share/oops/resolvedFieldEntry.hpp b/src/hotspot/share/oops/resolvedFieldEntry.hpp
index 1df4ae8d956..77ad4815730 100644
--- a/src/hotspot/share/oops/resolvedFieldEntry.hpp
+++ b/src/hotspot/share/oops/resolvedFieldEntry.hpp
@@ -43,6 +43,9 @@
// Field bytecodes start with a constant pool index as their operand, which is then rewritten to
// a "field index", which is an index into the array of ResolvedFieldEntry.
+// The explicit paddings are necessary for generating deterministic CDS archives. They prevent
+// the C++ compiler from potentially inserting random values in unused gaps.
+
//class InstanceKlass;
class ResolvedFieldEntry {
friend class VMStructs;
@@ -54,17 +57,9 @@ class ResolvedFieldEntry {
u1 _tos_state; // TOS state
u1 _flags; // Flags: [0000|00|is_final|is_volatile]
u1 _get_code, _put_code; // Get and Put bytecodes of the field
-
- void copy_from(const ResolvedFieldEntry& other) {
- _field_holder = other._field_holder;
- _field_offset = other._field_offset;
- _field_index = other._field_index;
- _cpool_index = other._cpool_index;
- _tos_state = other._tos_state;
- _flags = other._flags;
- _get_code = other._get_code;
- _put_code = other._put_code;
- }
+#ifdef _LP64
+ u4 _padding;
+#endif
public:
ResolvedFieldEntry(u2 cpi) :
@@ -75,48 +70,15 @@ public:
_tos_state(0),
_flags(0),
_get_code(0),
- _put_code(0) {}
+ _put_code(0)
+#ifdef _LP64
+ , _padding(0)
+#endif
+ {}
ResolvedFieldEntry() :
ResolvedFieldEntry(0) {}
- // Notes on copy constructor, copy assignment operator, and copy_from().
- // These are necessary for generating deterministic CDS archives.
- //
- // We have some unused padding on 64-bit platforms (4 bytes at the tail end).
- //
- // When ResolvedFieldEntries in a ConstantPoolCache are allocated from the metaspace,
- // their entire content (including the padding) is filled with zeros. They are
- // then initialized with initialize_resolved_entries_array() in cpCache.cpp from a
- // GrowableArray.
- //
- // The GrowableArray is initialized in rewriter.cpp, using ResolvedFieldEntries that
- // are originally allocated from the C++ stack. Functions like GrowableArray::expand_to()
- // will also allocate ResolvedFieldEntries from the stack. These may have random bits
- // in the padding as the C++ compiler is allowed to leave the padding in uninitialized
- // states.
- //
- // If we use the default copy constructor and/or default copy assignment operator,
- // the random padding will be copied into the GrowableArray, from there
- // to the ConstantPoolCache, and eventually to the CDS archive. As a result, the
- // CDS archive will contain random bits, causing failures in
- // test/hotspot/jtreg/runtime/cds/DeterministicDump.java (usually on Windows).
- //
- // By using copy_from(), we can prevent the random padding from being copied,
- // ensuring that the ResolvedFieldEntries in a ConstantPoolCache (and thus the
- // CDS archive) will have all zeros in the padding.
-
- // Copy constructor
- ResolvedFieldEntry(const ResolvedFieldEntry& other) {
- copy_from(other);
- }
-
- // Copy assignment operator
- ResolvedFieldEntry& operator=(const ResolvedFieldEntry& other) {
- copy_from(other);
- return *this;
- }
-
// Bit shift to get flags
// Note: Only two flags exists at the moment but more could be added
enum {
diff --git a/src/hotspot/share/oops/resolvedMethodEntry.cpp b/src/hotspot/share/oops/resolvedMethodEntry.cpp
index 2dc533dbee0..bb96ca86012 100644
--- a/src/hotspot/share/oops/resolvedMethodEntry.cpp
+++ b/src/hotspot/share/oops/resolvedMethodEntry.cpp
@@ -23,9 +23,18 @@
*/
#include "cds/archiveBuilder.hpp"
+#include "cppstdlib/type_traits.hpp"
#include "oops/method.hpp"
#include "oops/resolvedMethodEntry.hpp"
+static_assert(std::is_trivially_copyable_v);
+
+// Detect inadvertently introduced trailing padding.
+class ResolvedMethodEntryWithExtra : public ResolvedMethodEntry {
+ u1 _extra_field;
+};
+static_assert(sizeof(ResolvedMethodEntryWithExtra) > sizeof(ResolvedMethodEntry));
+
bool ResolvedMethodEntry::check_no_old_or_obsolete_entry() {
// return false if m refers to a non-deleted old or obsolete method
if (_method != nullptr) {
@@ -39,14 +48,10 @@ bool ResolvedMethodEntry::check_no_old_or_obsolete_entry() {
void ResolvedMethodEntry::reset_entry() {
if (has_resolved_references_index()) {
u2 saved_resolved_references_index = _entry_specific._resolved_references_index;
- u2 saved_cpool_index = _cpool_index;
- memset(this, 0, sizeof(*this));
+ *this = ResolvedMethodEntry(_cpool_index);
set_resolved_references_index(saved_resolved_references_index);
- _cpool_index = saved_cpool_index;
} else {
- u2 saved_cpool_index = _cpool_index;
- memset(this, 0, sizeof(*this));
- _cpool_index = saved_cpool_index;
+ *this = ResolvedMethodEntry(_cpool_index);
}
}
diff --git a/src/hotspot/share/oops/resolvedMethodEntry.hpp b/src/hotspot/share/oops/resolvedMethodEntry.hpp
index c95efb751e9..802cf252a6d 100644
--- a/src/hotspot/share/oops/resolvedMethodEntry.hpp
+++ b/src/hotspot/share/oops/resolvedMethodEntry.hpp
@@ -61,6 +61,9 @@
// pool entry and thus the same resolved method entry.
// The is_vfinal flag indicates method pointer for a final method or an index.
+// The explicit paddings are necessary for generating deterministic CDS archives. They prevent
+// the C++ compiler from potentially inserting random values in unused gaps.
+
class InstanceKlass;
class ResolvedMethodEntry {
friend class VMStructs;
@@ -70,6 +73,7 @@ class ResolvedMethodEntry {
InstanceKlass* _interface_klass; // for interface and static
u2 _resolved_references_index; // Index of resolved references array that holds the appendix oop for invokehandle
u2 _table_index; // vtable/itable index for virtual and interface calls
+ // The padding field is unused here, as the parent constructor zeroes the union.
} _entry_specific;
u2 _cpool_index; // Constant pool index
@@ -80,51 +84,36 @@ class ResolvedMethodEntry {
#ifdef ASSERT
bool _has_interface_klass;
bool _has_table_index;
+# ifdef _LP64
+ u2 _padding1;
+ u4 _padding2;
+# else
+ u1 _padding1;
+ u1 _padding2;
+# endif
#endif
- // See comments in resolvedFieldEntry.hpp about copy_from and padding.
- // We have unused padding on debug builds.
- void copy_from(const ResolvedMethodEntry& other) {
- _method = other._method;
- _entry_specific = other._entry_specific;
- _cpool_index = other._cpool_index;
- _number_of_parameters = other._number_of_parameters;
- _tos_state = other._tos_state;
- _flags = other._flags;
- _bytecode1 = other._bytecode1;
- _bytecode2 = other._bytecode2;
-#ifdef ASSERT
- _has_interface_klass = other._has_interface_klass;
- _has_table_index = other._has_table_index;
-#endif
- }
-
// Constructors
public:
ResolvedMethodEntry(u2 cpi) :
_method(nullptr),
+ _entry_specific{nullptr},
_cpool_index(cpi),
_number_of_parameters(0),
_tos_state(0),
_flags(0),
_bytecode1(0),
- _bytecode2(0) {
- _entry_specific._interface_klass = nullptr;
- DEBUG_ONLY(_has_interface_klass = false;)
- DEBUG_ONLY(_has_table_index = false;)
- }
+ _bytecode2(0)
+#ifdef ASSERT
+ , _has_interface_klass(false),
+ _has_table_index(false),
+ _padding1(0),
+ _padding2(0)
+#endif
+ {}
ResolvedMethodEntry() :
ResolvedMethodEntry(0) {}
- ResolvedMethodEntry(const ResolvedMethodEntry& other) {
- copy_from(other);
- }
-
- ResolvedMethodEntry& operator=(const ResolvedMethodEntry& other) {
- copy_from(other);
- return *this;
- }
-
// Bit shift to get flags
enum {
diff --git a/src/hotspot/share/opto/arraycopynode.cpp b/src/hotspot/share/opto/arraycopynode.cpp
index c02aefc7943..4ee6107fe54 100644
--- a/src/hotspot/share/opto/arraycopynode.cpp
+++ b/src/hotspot/share/opto/arraycopynode.cpp
@@ -28,8 +28,6 @@
#include "gc/shared/gc_globals.hpp"
#include "opto/arraycopynode.hpp"
#include "opto/graphKit.hpp"
-#include "runtime/sharedRuntime.hpp"
-#include "utilities/macros.hpp"
#include "utilities/powerOfTwo.hpp"
const TypeFunc* ArrayCopyNode::_arraycopy_type_Type = nullptr;
@@ -779,15 +777,17 @@ bool ArrayCopyNode::modifies(intptr_t offset_lo, intptr_t offset_hi, PhaseValues
return false;
}
-// As an optimization, choose optimum vector size for copy length known at compile time.
-int ArrayCopyNode::get_partial_inline_vector_lane_count(BasicType type, int const_len) {
- int lane_count = ArrayOperationPartialInlineSize/type2aelembytes(type);
- if (const_len > 0) {
- int size_in_bytes = const_len * type2aelembytes(type);
- if (size_in_bytes <= 16)
- lane_count = 16/type2aelembytes(type);
- else if (size_in_bytes > 16 && size_in_bytes <= 32)
- lane_count = 32/type2aelembytes(type);
+// As an optimization, choose the optimal vector size for bounded copy length
+int ArrayCopyNode::get_partial_inline_vector_lane_count(BasicType type, jlong max_len) {
+ assert(max_len > 0, JLONG_FORMAT, max_len);
+ // We only care whether max_size_in_bytes is not larger than 32, we also want to avoid
+ // multiplication overflow, so clamp max_len to [0, 64]
+ int max_size_in_bytes = MIN2(max_len, 64) * type2aelembytes(type);
+ if (ArrayOperationPartialInlineSize > 16 && max_size_in_bytes <= 16) {
+ return 16 / type2aelembytes(type);
+ } else if (ArrayOperationPartialInlineSize > 32 && max_size_in_bytes <= 32) {
+ return 32 / type2aelembytes(type);
+ } else {
+ return ArrayOperationPartialInlineSize / type2aelembytes(type);
}
- return lane_count;
}
diff --git a/src/hotspot/share/opto/arraycopynode.hpp b/src/hotspot/share/opto/arraycopynode.hpp
index 13e739fc2c7..83c085fd5db 100644
--- a/src/hotspot/share/opto/arraycopynode.hpp
+++ b/src/hotspot/share/opto/arraycopynode.hpp
@@ -191,7 +191,7 @@ public:
static bool may_modify(const TypeOopPtr* t_oop, MemBarNode* mb, PhaseValues* phase, ArrayCopyNode*& ac);
- static int get_partial_inline_vector_lane_count(BasicType type, int const_len);
+ static int get_partial_inline_vector_lane_count(BasicType type, jlong max_len);
bool modifies(intptr_t offset_lo, intptr_t offset_hi, PhaseValues* phase, bool must_modify) const;
diff --git a/src/hotspot/share/opto/c2_globals.hpp b/src/hotspot/share/opto/c2_globals.hpp
index 0a4f231c49b..2b2b4db47b1 100644
--- a/src/hotspot/share/opto/c2_globals.hpp
+++ b/src/hotspot/share/opto/c2_globals.hpp
@@ -428,7 +428,7 @@
"0=print nothing except PhasePrintLevel directives, " \
"6=all details printed. " \
"Level of detail of printouts can be set on a per-method level " \
- "as well by using CompileCommand=PrintPhaseLevel.") \
+ "as well by using CompileCommand=PhasePrintLevel.") \
range(-1, 6) \
\
develop(bool, PrintIdealGraph, false, \
diff --git a/src/hotspot/share/opto/compile.cpp b/src/hotspot/share/opto/compile.cpp
index 6babc13e1b3..89b5e36b120 100644
--- a/src/hotspot/share/opto/compile.cpp
+++ b/src/hotspot/share/opto/compile.cpp
@@ -5233,7 +5233,7 @@ void Compile::end_method() {
#ifndef PRODUCT
bool Compile::should_print_phase(const int level) const {
- return PrintPhaseLevel > 0 && directive()->PhasePrintLevelOption >= level &&
+ return PrintPhaseLevel >= 0 && directive()->PhasePrintLevelOption >= level &&
_method != nullptr; // Do not print phases for stubs.
}
diff --git a/src/hotspot/share/opto/doCall.cpp b/src/hotspot/share/opto/doCall.cpp
index 754b0fa8d1c..91bb743618b 100644
--- a/src/hotspot/share/opto/doCall.cpp
+++ b/src/hotspot/share/opto/doCall.cpp
@@ -97,10 +97,9 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
Bytecodes::Code bytecode = caller->java_code_at_bci(bci);
ciMethod* orig_callee = caller->get_method_at_bci(bci);
- const bool is_virtual_or_interface = (bytecode == Bytecodes::_invokevirtual) ||
- (bytecode == Bytecodes::_invokeinterface) ||
- (orig_callee->intrinsic_id() == vmIntrinsics::_linkToVirtual) ||
- (orig_callee->intrinsic_id() == vmIntrinsics::_linkToInterface);
+ const bool is_virtual = (bytecode == Bytecodes::_invokevirtual) || (orig_callee->intrinsic_id() == vmIntrinsics::_linkToVirtual);
+ const bool is_interface = (bytecode == Bytecodes::_invokeinterface) || (orig_callee->intrinsic_id() == vmIntrinsics::_linkToInterface);
+ const bool is_virtual_or_interface = is_virtual || is_interface;
const bool check_access = !orig_callee->is_method_handle_intrinsic(); // method handle intrinsics don't perform access checks
@@ -339,17 +338,25 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
// number of implementors for decl_interface is 0 or 1. If
// it's 0 then no class implements decl_interface and there's
// no point in inlining.
- if (call_does_dispatch && bytecode == Bytecodes::_invokeinterface) {
- ciInstanceKlass* declared_interface =
- caller->get_declared_method_holder_at_bci(bci)->as_instance_klass();
+ if (call_does_dispatch && is_interface) {
+ ciInstanceKlass* declared_interface = nullptr;
+ if (orig_callee->intrinsic_id() == vmIntrinsics::_linkToInterface) {
+ // MemberName doesn't keep information about resolved interface class (REFC) once
+ // resolution is over, but resolved method holder (DECC) can be used as a
+ // conservative approximation.
+ declared_interface = callee->holder();
+ } else {
+ assert(!orig_callee->is_method_handle_intrinsic(), "not allowed");
+ declared_interface = caller->get_declared_method_holder_at_bci(bci)->as_instance_klass();
+ }
+ assert(declared_interface->is_interface(), "required");
ciInstanceKlass* singleton = declared_interface->unique_implementor();
if (singleton != nullptr) {
assert(singleton != declared_interface, "not a unique implementor");
- assert(check_access, "required");
ciMethod* cha_monomorphic_target =
- callee->find_monomorphic_target(caller->holder(), declared_interface, singleton);
+ callee->find_monomorphic_target(caller->holder(), declared_interface, singleton, check_access);
if (cha_monomorphic_target != nullptr &&
cha_monomorphic_target->holder() != env()->Object_klass()) { // subtype check against Object is useless
@@ -372,7 +379,7 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
}
}
}
- } // call_does_dispatch && bytecode == Bytecodes::_invokeinterface
+ } // call_does_dispatch && is_interface
// Nothing claimed the intrinsic, we go with straight-forward inlining
// for already discovered intrinsic.
diff --git a/src/hotspot/share/opto/macro.cpp b/src/hotspot/share/opto/macro.cpp
index 90602bc2b35..6f2171bbd75 100644
--- a/src/hotspot/share/opto/macro.cpp
+++ b/src/hotspot/share/opto/macro.cpp
@@ -1914,8 +1914,7 @@ Node* PhaseMacroExpand::prefetch_allocation(Node* i_o, Node*& needgc_false,
transform_later(cache_adr);
cache_adr = new CastP2XNode(needgc_false, cache_adr);
transform_later(cache_adr);
- // Address is aligned to execute prefetch to the beginning of cache line size
- // (it is important when BIS instruction is used on SPARC as prefetch).
+ // Address is aligned to execute prefetch to the beginning of cache line size.
Node* mask = _igvn.MakeConX(~(intptr_t)(step_size-1));
cache_adr = new AndXNode(cache_adr, mask);
transform_later(cache_adr);
diff --git a/src/hotspot/share/opto/macroArrayCopy.cpp b/src/hotspot/share/opto/macroArrayCopy.cpp
index 10de940c0c2..0ba8ed40c37 100644
--- a/src/hotspot/share/opto/macroArrayCopy.cpp
+++ b/src/hotspot/share/opto/macroArrayCopy.cpp
@@ -204,53 +204,46 @@ void PhaseMacroExpand::generate_limit_guard(Node** ctrl, Node* offset, Node* sub
void PhaseMacroExpand::generate_partial_inlining_block(Node** ctrl, MergeMemNode** mem, const TypePtr* adr_type,
RegionNode** exit_block, Node** result_memory, Node* length,
Node* src_start, Node* dst_start, BasicType type) {
- const TypePtr *src_adr_type = _igvn.type(src_start)->isa_ptr();
- Node* inline_block = nullptr;
- Node* stub_block = nullptr;
+ int inline_limit = ArrayOperationPartialInlineSize / type2aelembytes(type);
- int const_len = -1;
- const TypeInt* lty = nullptr;
- uint shift = exact_log2(type2aelembytes(type));
- if (length->Opcode() == Op_ConvI2L) {
- lty = _igvn.type(length->in(1))->isa_int();
- } else {
- lty = _igvn.type(length)->isa_int();
- }
- if (lty && lty->is_con()) {
- const_len = lty->get_con() << shift;
+ const TypeLong* length_type = _igvn.type(length)->isa_long();
+ if (length_type == nullptr) {
+ assert(_igvn.type(length) == Type::TOP, "");
+ return;
}
- // Return if copy length is greater than partial inline size limit or
- // target does not supports masked load/stores.
- int lane_count = ArrayCopyNode::get_partial_inline_vector_lane_count(type, const_len);
- if ( const_len > ArrayOperationPartialInlineSize ||
- !Matcher::match_rule_supported_vector(Op_LoadVectorMasked, lane_count, type) ||
+ const TypeLong* inline_range = TypeLong::make(0, inline_limit, Type::WidenMin);
+ if (length_type->join(inline_range) == Type::TOP) {
+ // The ranges do not intersect, the inline check will surely fail
+ return;
+ }
+
+ // Return if the target does not supports masked load/stores.
+ int lane_count = ArrayCopyNode::get_partial_inline_vector_lane_count(type, length_type->_hi);
+ if (!Matcher::match_rule_supported_vector(Op_LoadVectorMasked, lane_count, type) ||
!Matcher::match_rule_supported_vector(Op_StoreVectorMasked, lane_count, type) ||
!Matcher::match_rule_supported_vector(Op_VectorMaskGen, lane_count, type)) {
return;
}
- int inline_limit = ArrayOperationPartialInlineSize / type2aelembytes(type);
- Node* casted_length = new CastLLNode(*ctrl, length, TypeLong::make(0, inline_limit, Type::WidenMin));
- transform_later(casted_length);
- Node* copy_bytes = new LShiftXNode(length, intcon(shift));
- transform_later(copy_bytes);
-
- Node* cmp_le = new CmpULNode(copy_bytes, longcon(ArrayOperationPartialInlineSize));
+ Node* cmp_le = new CmpULNode(length, longcon(inline_limit));
transform_later(cmp_le);
Node* bol_le = new BoolNode(cmp_le, BoolTest::le);
transform_later(bol_le);
- inline_block = generate_guard(ctrl, bol_le, nullptr, PROB_FAIR);
- stub_block = *ctrl;
+ Node* inline_block = generate_guard(ctrl, bol_le, nullptr, PROB_FAIR);
+ Node* stub_block = *ctrl;
+ Node* casted_length = new CastLLNode(inline_block, length, inline_range, ConstraintCastNode::RegularDependency);
+ transform_later(casted_length);
Node* mask_gen = VectorMaskGenNode::make(casted_length, type);
transform_later(mask_gen);
- unsigned vec_size = lane_count * type2aelembytes(type);
+ unsigned vec_size = lane_count * type2aelembytes(type);
if (C->max_vector_size() < vec_size) {
C->set_max_vector_size(vec_size);
}
+ const TypePtr* src_adr_type = _igvn.type(src_start)->isa_ptr();
const TypeVect * vt = TypeVect::make(type, lane_count);
Node* mm = (*mem)->memory_at(C->get_alias_index(src_adr_type));
Node* masked_load = new LoadVectorMaskedNode(inline_block, mm, src_start,
diff --git a/src/hotspot/share/opto/memnode.cpp b/src/hotspot/share/opto/memnode.cpp
index f42a6ea9489..61300ab4fcb 100644
--- a/src/hotspot/share/opto/memnode.cpp
+++ b/src/hotspot/share/opto/memnode.cpp
@@ -2012,11 +2012,7 @@ const Type* LoadNode::Value(PhaseGVN* phase) const {
assert(off != Type::OffsetTop, "case covered by TypePtr::empty");
Compile* C = phase->C;
- // If we are loading from a freshly-allocated object, produce a zero,
- // if the load is provably beyond the header of the object.
- // (Also allow a variable load from a fresh array to produce zero.)
- const TypeOopPtr* tinst = tp->isa_oopptr();
- bool is_instance = (tinst != nullptr) && tinst->is_known_instance_field();
+ // If load can see a previous constant store, use that.
Node* value = can_see_stored_value(mem, phase);
if (value != nullptr && value->is_Con()) {
assert(value->bottom_type()->higher_equal(_type), "sanity");
@@ -2227,13 +2223,16 @@ const Type* LoadNode::Value(PhaseGVN* phase) const {
}
}
- bool is_vect = (_type->isa_vect() != nullptr);
- if (is_instance && !is_vect) {
- // If we have an instance type and our memory input is the
- // programs's initial memory state, there is no matching store,
- // so just return a zero of the appropriate type -
- // except if it is vectorized - then we have no zero constant.
- Node *mem = in(MemNode::Memory);
+ // If we are loading from a freshly-allocated object/array, produce a zero.
+ // Things to check:
+ // 1. Load is beyond the header: headers are not guaranteed to be zero
+ // 2. Load is not vectorized: vectors have no zero constant
+ // 3. Load has no matching store, i.e. the input is the initial memory state
+ const TypeOopPtr* tinst = tp->isa_oopptr();
+ bool is_not_header = (tinst != nullptr) && tinst->is_known_instance_field();
+ bool is_not_vect = (_type->isa_vect() == nullptr);
+ if (is_not_header && is_not_vect) {
+ Node* mem = in(MemNode::Memory);
if (mem->is_Parm() && mem->in(0)->is_Start()) {
assert(mem->as_Parm()->_con == TypeFunc::Memory, "must be memory Parm");
return Type::get_zero_type(_type->basic_type());
diff --git a/src/hotspot/share/opto/node.cpp b/src/hotspot/share/opto/node.cpp
index 93ded36363e..2452677caf3 100644
--- a/src/hotspot/share/opto/node.cpp
+++ b/src/hotspot/share/opto/node.cpp
@@ -1209,9 +1209,12 @@ bool Node::has_special_unique_user() const {
if (this->is_Store()) {
// Condition for back-to-back stores folding.
return n->Opcode() == op && n->in(MemNode::Memory) == this;
- } else if (this->is_Load() || this->is_DecodeN() || this->is_Phi()) {
+ } else if ((this->is_Load() || this->is_DecodeN() || this->is_Phi()) && n->Opcode() == Op_MemBarAcquire) {
// Condition for removing an unused LoadNode or DecodeNNode from the MemBarAcquire precedence input
- return n->Opcode() == Op_MemBarAcquire;
+ return true;
+ } else if (this->is_Load() && n->is_Move()) {
+ // Condition for MoveX2Y (LoadX mem) => LoadY mem
+ return true;
} else if (op == Op_AddL) {
// Condition for convL2I(addL(x,y)) ==> addI(convL2I(x),convL2I(y))
return n->Opcode() == Op_ConvL2I && n->in(1) == this;
diff --git a/src/hotspot/share/opto/vtransform.cpp b/src/hotspot/share/opto/vtransform.cpp
index e775eb60cab..b437d2e6eac 100644
--- a/src/hotspot/share/opto/vtransform.cpp
+++ b/src/hotspot/share/opto/vtransform.cpp
@@ -1252,7 +1252,6 @@ bool VTransformReductionVectorNode::optimize_move_non_strict_order_reductions_ou
// back to the phi. Check that all non strict order reductions only have a single
// use, except for the last (last_red), which only has phi as a use in the loop,
// and all other uses are outside the loop.
- VTransformReductionVectorNode* first_red = this;
VTransformReductionVectorNode* last_red = phi->in_req(2)->isa_ReductionVector();
VTransformReductionVectorNode* current_red = last_red;
while (true) {
@@ -1264,7 +1263,11 @@ bool VTransformReductionVectorNode::optimize_move_non_strict_order_reductions_ou
tty->print(" Cannot move out of loop, other reduction node does not match:");
print();
tty->print(" other: ");
- current_red->print();
+ if (current_red != nullptr) {
+ current_red->print();
+ } else {
+ tty->print("nullptr");
+ }
)
return false; // not compatible
}
diff --git a/src/hotspot/share/prims/jni.cpp b/src/hotspot/share/prims/jni.cpp
index 5af8edbb758..2297ce9b790 100644
--- a/src/hotspot/share/prims/jni.cpp
+++ b/src/hotspot/share/prims/jni.cpp
@@ -1867,6 +1867,32 @@ address jni_GetDoubleField_addr() {
return (address)jni_GetDoubleField;
}
+static void log_debug_if_final_static_field(JavaThread* current, const char* func_name, InstanceKlass* ik, int offset) {
+ if (log_is_enabled(Debug, jni)) {
+ fieldDescriptor fd;
+ bool found = ik->find_field_from_offset(offset, true, &fd);
+ assert(found, "bad field offset");
+ assert(fd.is_static(), "static/instance mismatch");
+ if (fd.is_final() && !fd.is_mutable_static_final()) {
+ ResourceMark rm(current);
+ log_debug(jni)("%s mutated final static field %s.%s", func_name, ik->external_name(), fd.name()->as_C_string());
+ }
+ }
+}
+
+static void log_debug_if_final_instance_field(JavaThread* current, const char* func_name, InstanceKlass* ik, int offset) {
+ if (log_is_enabled(Debug, jni)) {
+ fieldDescriptor fd;
+ bool found = ik->find_field_from_offset(offset, false, &fd);
+ assert(found, "bad field offset");
+ assert(!fd.is_static(), "static/instance mismatch");
+ if (fd.is_final()) {
+ ResourceMark rm(current);
+ log_debug(jni)("%s mutated final instance field %s.%s", func_name, ik->external_name(), fd.name()->as_C_string());
+ }
+ }
+}
+
JNI_ENTRY_NO_PRESERVE(void, jni_SetObjectField(JNIEnv *env, jobject obj, jfieldID fieldID, jobject value))
HOTSPOT_JNI_SETOBJECTFIELD_ENTRY(env, obj, (uintptr_t) fieldID, value);
oop o = JNIHandles::resolve_non_null(obj);
@@ -1879,6 +1905,7 @@ JNI_ENTRY_NO_PRESERVE(void, jni_SetObjectField(JNIEnv *env, jobject obj, jfieldI
o = JvmtiExport::jni_SetField_probe(thread, obj, o, k, fieldID, false, JVM_SIGNATURE_CLASS, (jvalue *)&field_value);
}
HeapAccess::oop_store_at(o, offset, JNIHandles::resolve(value));
+ log_debug_if_final_instance_field(thread, "SetObjectField", InstanceKlass::cast(k), offset);
HOTSPOT_JNI_SETOBJECTFIELD_RETURN();
JNI_END
@@ -1901,6 +1928,7 @@ JNI_ENTRY_NO_PRESERVE(void, jni_Set##Result##Field(JNIEnv *env, jobject obj, jfi
o = JvmtiExport::jni_SetField_probe(thread, obj, o, k, fieldID, false, SigType, (jvalue *)&field_value); \
} \
o->Fieldname##_field_put(offset, value); \
+ log_debug_if_final_instance_field(thread, "SetField", InstanceKlass::cast(k), offset); \
ReturnProbe; \
JNI_END
@@ -2072,6 +2100,7 @@ JNI_ENTRY(void, jni_SetStaticObjectField(JNIEnv *env, jclass clazz, jfieldID fie
JvmtiExport::jni_SetField_probe(thread, nullptr, nullptr, id->holder(), fieldID, true, JVM_SIGNATURE_CLASS, (jvalue *)&field_value);
}
id->holder()->java_mirror()->obj_field_put(id->offset(), JNIHandles::resolve(value));
+ log_debug_if_final_static_field(THREAD, "SetStaticObjectField", id->holder(), id->offset());
HOTSPOT_JNI_SETSTATICOBJECTFIELD_RETURN();
JNI_END
@@ -2093,6 +2122,7 @@ JNI_ENTRY(void, jni_SetStatic##Result##Field(JNIEnv *env, jclass clazz, jfieldID
JvmtiExport::jni_SetField_probe(thread, nullptr, nullptr, id->holder(), fieldID, true, SigType, (jvalue *)&field_value); \
} \
id->holder()->java_mirror()-> Fieldname##_field_put (id->offset(), value); \
+ log_debug_if_final_static_field(THREAD, "SetStaticField", id->holder(), id->offset()); \
ReturnProbe;\
JNI_END
diff --git a/src/hotspot/share/prims/jniCheck.cpp b/src/hotspot/share/prims/jniCheck.cpp
index 43cc61d7363..5f4cf10ebf4 100644
--- a/src/hotspot/share/prims/jniCheck.cpp
+++ b/src/hotspot/share/prims/jniCheck.cpp
@@ -233,7 +233,7 @@ functionExit(JavaThread* thr)
}
static inline void
-checkStaticFieldID(JavaThread* thr, jfieldID fid, jclass cls, int ftype)
+checkStaticFieldID(JavaThread* thr, jfieldID fid, jclass cls, int ftype, bool setter)
{
fieldDescriptor fd;
@@ -258,10 +258,18 @@ checkStaticFieldID(JavaThread* thr, jfieldID fid, jclass cls, int ftype)
!(fd.field_type() == T_ARRAY && ftype == T_OBJECT)) {
ReportJNIFatalError(thr, fatal_static_field_mismatch);
}
+
+ /* check if setting a final field */
+ if (setter && fd.is_final() && !fd.is_mutable_static_final()) {
+ ResourceMark rm(thr);
+ stringStream ss;
+ ss.print("SetStaticField called to mutate final static field %s.%s", k_oop->external_name(), fd.name()->as_C_string());
+ ReportJNIWarning(thr, ss.as_string());
+ }
}
static inline void
-checkInstanceFieldID(JavaThread* thr, jfieldID fid, jobject obj, int ftype)
+checkInstanceFieldID(JavaThread* thr, jfieldID fid, jobject obj, int ftype, bool setter)
{
fieldDescriptor fd;
@@ -287,14 +295,21 @@ checkInstanceFieldID(JavaThread* thr, jfieldID fid, jobject obj, int ftype)
ReportJNIFatalError(thr, fatal_wrong_field);
/* check for proper field type */
- if (!InstanceKlass::cast(k_oop)->find_field_from_offset(offset,
- false, &fd))
+ if (!InstanceKlass::cast(k_oop)->find_field_from_offset(offset, false, &fd))
ReportJNIFatalError(thr, fatal_instance_field_not_found);
if ((fd.field_type() != ftype) &&
!(fd.field_type() == T_ARRAY && ftype == T_OBJECT)) {
ReportJNIFatalError(thr, fatal_instance_field_mismatch);
}
+
+ /* check if setting a final field */
+ if (setter && fd.is_final()) {
+ ResourceMark rm(thr);
+ stringStream ss;
+ ss.print("SetField called to mutate final instance field %s.%s", k_oop->external_name(), fd.name()->as_C_string());
+ ReportJNIWarning(thr, ss.as_string());
+ }
}
static inline void
@@ -1204,7 +1219,7 @@ JNI_ENTRY_CHECKED(ReturnType, \
jfieldID fieldID)) \
functionEnter(thr); \
IN_VM( \
- checkInstanceFieldID(thr, fieldID, obj, FieldType); \
+ checkInstanceFieldID(thr, fieldID, obj, FieldType, false); \
) \
ReturnType result = UNCHECKED()->Get##Result##Field(env,obj,fieldID); \
functionExit(thr); \
@@ -1229,7 +1244,7 @@ JNI_ENTRY_CHECKED(void, \
ValueType val)) \
functionEnter(thr); \
IN_VM( \
- checkInstanceFieldID(thr, fieldID, obj, FieldType); \
+ checkInstanceFieldID(thr, fieldID, obj, FieldType, true); \
) \
UNCHECKED()->Set##Result##Field(env,obj,fieldID,val); \
functionExit(thr); \
@@ -1395,7 +1410,7 @@ JNI_ENTRY_CHECKED(ReturnType, \
functionEnter(thr); \
IN_VM( \
jniCheck::validate_class(thr, clazz, false); \
- checkStaticFieldID(thr, fieldID, clazz, FieldType); \
+ checkStaticFieldID(thr, fieldID, clazz, FieldType, false); \
) \
ReturnType result = UNCHECKED()->GetStatic##Result##Field(env, \
clazz, \
@@ -1423,7 +1438,7 @@ JNI_ENTRY_CHECKED(void, \
functionEnter(thr); \
IN_VM( \
jniCheck::validate_class(thr, clazz, false); \
- checkStaticFieldID(thr, fieldID, clazz, FieldType); \
+ checkStaticFieldID(thr, fieldID, clazz, FieldType, true); \
) \
UNCHECKED()->SetStatic##Result##Field(env,clazz,fieldID,value); \
functionExit(thr); \
diff --git a/src/hotspot/share/prims/jvmtiTagMap.cpp b/src/hotspot/share/prims/jvmtiTagMap.cpp
index a69c7cb7142..c923f91f69d 100644
--- a/src/hotspot/share/prims/jvmtiTagMap.cpp
+++ b/src/hotspot/share/prims/jvmtiTagMap.cpp
@@ -2190,6 +2190,39 @@ class SimpleRootsClosure : public OopClosure {
virtual void do_oop(narrowOop* obj_p) { ShouldNotReachHere(); }
};
+// A supporting closure used to process ClassLoaderData roots.
+class CLDRootsClosure: public OopClosure {
+private:
+ bool _continue;
+public:
+ CLDRootsClosure(): _continue(true) {}
+
+ inline bool stopped() {
+ return !_continue;
+ }
+
+ void do_oop(oop* obj_p) {
+ if (stopped()) {
+ return;
+ }
+
+ oop o = NativeAccess::oop_load(obj_p);
+ // ignore null
+ if (o == nullptr) {
+ return;
+ }
+
+ jvmtiHeapReferenceKind kind = JVMTI_HEAP_REFERENCE_OTHER;
+ if (o->klass() == vmClasses::Class_klass()) {
+ kind = JVMTI_HEAP_REFERENCE_SYSTEM_CLASS;
+ }
+
+ // invoke the callback
+ _continue = CallbackInvoker::report_simple_root(kind, o);
+ }
+ virtual void do_oop(narrowOop* obj_p) { ShouldNotReachHere(); }
+};
+
// A supporting closure used to process JNI locals
class JNILocalRootsClosure : public OopClosure {
private:
@@ -2776,10 +2809,10 @@ inline bool VM_HeapWalkOperation::collect_simple_roots() {
}
// Preloaded classes and loader from the system dictionary
- blk.set_kind(JVMTI_HEAP_REFERENCE_SYSTEM_CLASS);
- CLDToOopClosure cld_closure(&blk, ClassLoaderData::_claim_none);
+ CLDRootsClosure cld_roots_closure;
+ CLDToOopClosure cld_closure(&cld_roots_closure, ClassLoaderData::_claim_none);
ClassLoaderDataGraph::always_strong_cld_do(&cld_closure);
- if (blk.stopped()) {
+ if (cld_roots_closure.stopped()) {
return false;
}
diff --git a/src/hotspot/share/prims/whitebox.cpp b/src/hotspot/share/prims/whitebox.cpp
index 8f0a2320288..5514f7d3260 100644
--- a/src/hotspot/share/prims/whitebox.cpp
+++ b/src/hotspot/share/prims/whitebox.cpp
@@ -2590,7 +2590,13 @@ WB_END
// Physical swap of the host machine (including containers), Linux only.
WB_ENTRY(jlong, WB_HostPhysicalSwap(JNIEnv* env, jobject o))
- LINUX_ONLY(return (jlong)os::Linux::host_swap();)
+#ifdef LINUX
+ physical_memory_size_type swap_val = 0;
+ if (!os::Linux::host_swap(swap_val)) {
+ return -1; // treat as unlimited
+ }
+ return static_cast(swap_val);
+#endif
return -1; // Not used/implemented on other platforms
WB_END
diff --git a/src/hotspot/share/runtime/arguments.cpp b/src/hotspot/share/runtime/arguments.cpp
index 79027cf113f..55ee7641a5f 100644
--- a/src/hotspot/share/runtime/arguments.cpp
+++ b/src/hotspot/share/runtime/arguments.cpp
@@ -317,6 +317,10 @@ bool needs_module_property_warning = false;
#define ENABLE_NATIVE_ACCESS_LEN 20
#define ILLEGAL_NATIVE_ACCESS "illegal.native.access"
#define ILLEGAL_NATIVE_ACCESS_LEN 21
+#define ENABLE_FINAL_FIELD_MUTATION "enable.final.field.mutation"
+#define ENABLE_FINAL_FIELD_MUTATION_LEN 27
+#define ILLEGAL_FINAL_FIELD_MUTATION "illegal.final.field.mutation"
+#define ILLEGAL_FINAL_FIELD_MUTATION_LEN 28
// Return TRUE if option matches 'property', or 'property=', or 'property.'.
static bool matches_property_suffix(const char* option, const char* property, size_t len) {
@@ -343,7 +347,9 @@ bool Arguments::internal_module_property_helper(const char* property, bool check
if (matches_property_suffix(property_suffix, PATCH, PATCH_LEN) ||
matches_property_suffix(property_suffix, LIMITMODS, LIMITMODS_LEN) ||
matches_property_suffix(property_suffix, UPGRADE_PATH, UPGRADE_PATH_LEN) ||
- matches_property_suffix(property_suffix, ILLEGAL_NATIVE_ACCESS, ILLEGAL_NATIVE_ACCESS_LEN)) {
+ matches_property_suffix(property_suffix, ILLEGAL_NATIVE_ACCESS, ILLEGAL_NATIVE_ACCESS_LEN) ||
+ matches_property_suffix(property_suffix, ENABLE_FINAL_FIELD_MUTATION, ENABLE_FINAL_FIELD_MUTATION_LEN) ||
+ matches_property_suffix(property_suffix, ILLEGAL_FINAL_FIELD_MUTATION, ILLEGAL_FINAL_FIELD_MUTATION_LEN)) {
return true;
}
@@ -530,7 +536,7 @@ static SpecialFlag const special_jvm_flags[] = {
{ "UseSharedSpaces", JDK_Version::jdk(18), JDK_Version::jdk(19), JDK_Version::undefined() },
{ "LockingMode", JDK_Version::jdk(24), JDK_Version::jdk(26), JDK_Version::jdk(27) },
#ifdef _LP64
- { "UseCompressedClassPointers", JDK_Version::jdk(25), JDK_Version::jdk(26), JDK_Version::undefined() },
+ { "UseCompressedClassPointers", JDK_Version::jdk(25), JDK_Version::jdk(27), JDK_Version::undefined() },
#endif
{ "ParallelRefProcEnabled", JDK_Version::jdk(26), JDK_Version::jdk(27), JDK_Version::jdk(28) },
{ "ParallelRefProcBalancingEnabled", JDK_Version::jdk(26), JDK_Version::jdk(27), JDK_Version::jdk(28) },
@@ -1809,6 +1815,7 @@ static unsigned int addexports_count = 0;
static unsigned int addopens_count = 0;
static unsigned int patch_mod_count = 0;
static unsigned int enable_native_access_count = 0;
+static unsigned int enable_final_field_mutation = 0;
static bool patch_mod_javabase = false;
// Check the consistency of vm_init_args
@@ -2273,6 +2280,19 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, JVMFlagOrigin
if (res != JNI_OK) {
return res;
}
+ } else if (match_option(option, "--enable-final-field-mutation=", &tail)) {
+ if (!create_numbered_module_property("jdk.module.enable.final.field.mutation", tail, enable_final_field_mutation++)) {
+ return JNI_ENOMEM;
+ }
+ } else if (match_option(option, "--illegal-final-field-mutation=", &tail)) {
+ if (strcmp(tail, "allow") == 0 || strcmp(tail, "warn") == 0 || strcmp(tail, "debug") == 0 || strcmp(tail, "deny") == 0) {
+ PropertyList_unique_add(&_system_properties, "jdk.module.illegal.final.field.mutation", tail,
+ AddProperty, WriteableProperty, InternalProperty);
+ } else {
+ jio_fprintf(defaultStream::error_stream(),
+ "Value specified to --illegal-final-field-mutation not recognized: '%s'\n", tail);
+ return JNI_ERR;
+ }
} else if (match_option(option, "--sun-misc-unsafe-memory-access=", &tail)) {
if (strcmp(tail, "allow") == 0 || strcmp(tail, "warn") == 0 || strcmp(tail, "debug") == 0 || strcmp(tail, "deny") == 0) {
PropertyList_unique_add(&_system_properties, "sun.misc.unsafe.memory.access", tail,
@@ -2463,6 +2483,9 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, JVMFlagOrigin
}
} else if (match_option(option, "-Xmaxjitcodesize", &tail) ||
match_option(option, "-XX:ReservedCodeCacheSize=", &tail)) {
+ if (match_option(option, "-Xmaxjitcodesize", &tail)) {
+ warning("Option -Xmaxjitcodesize was deprecated in JDK 26 and will likely be removed in a future release.");
+ }
julong long_ReservedCodeCacheSize = 0;
ArgsRange errcode = parse_memory_size(tail, &long_ReservedCodeCacheSize, 1);
diff --git a/src/hotspot/share/runtime/fieldDescriptor.cpp b/src/hotspot/share/runtime/fieldDescriptor.cpp
index c5c3bdbd4bc..491157d5bf7 100644
--- a/src/hotspot/share/runtime/fieldDescriptor.cpp
+++ b/src/hotspot/share/runtime/fieldDescriptor.cpp
@@ -46,6 +46,16 @@ bool fieldDescriptor::is_trusted_final() const {
return is_final() && (is_static() || ik->is_hidden() || ik->is_record());
}
+bool fieldDescriptor::is_mutable_static_final() const {
+ InstanceKlass* ik = field_holder();
+ // write protected fields (JLS 17.5.4)
+ if (is_final() && is_static() && ik == vmClasses::System_klass() &&
+ (offset() == java_lang_System::in_offset() || offset() == java_lang_System::out_offset() || offset() == java_lang_System::err_offset())) {
+ return true;
+ }
+ return false;
+}
+
AnnotationArray* fieldDescriptor::annotations() const {
InstanceKlass* ik = field_holder();
Array* md = ik->fields_annotations();
diff --git a/src/hotspot/share/runtime/fieldDescriptor.hpp b/src/hotspot/share/runtime/fieldDescriptor.hpp
index aae789b1fb7..fa3d1b9d23c 100644
--- a/src/hotspot/share/runtime/fieldDescriptor.hpp
+++ b/src/hotspot/share/runtime/fieldDescriptor.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -98,6 +98,8 @@ class fieldDescriptor {
bool is_trusted_final() const;
+ bool is_mutable_static_final() const;
+
inline void set_is_field_access_watched(const bool value);
inline void set_is_field_modification_watched(const bool value);
inline void set_has_initialized_final_update(const bool value);
diff --git a/src/hotspot/share/runtime/os.cpp b/src/hotspot/share/runtime/os.cpp
index 674b0a55841..ceff9b54c33 100644
--- a/src/hotspot/share/runtime/os.cpp
+++ b/src/hotspot/share/runtime/os.cpp
@@ -2207,13 +2207,7 @@ static void assert_nonempty_range(const char* addr, size_t bytes) {
bool os::used_memory(physical_memory_size_type& value) {
#ifdef LINUX
if (OSContainer::is_containerized()) {
- jlong mem_usage = OSContainer::memory_usage_in_bytes();
- if (mem_usage > 0) {
- value = static_cast(mem_usage);
- return true;
- } else {
- return false;
- }
+ return OSContainer::memory_usage_in_bytes(value);
}
#endif
physical_memory_size_type avail_mem = 0;
diff --git a/src/hotspot/share/runtime/vmStructs.cpp b/src/hotspot/share/runtime/vmStructs.cpp
index a7342448522..a75e67e9b56 100644
--- a/src/hotspot/share/runtime/vmStructs.cpp
+++ b/src/hotspot/share/runtime/vmStructs.cpp
@@ -350,7 +350,6 @@
nonstatic_field(ThreadLocalAllocBuffer, _pf_top, HeapWord*) \
nonstatic_field(ThreadLocalAllocBuffer, _desired_size, size_t) \
nonstatic_field(ThreadLocalAllocBuffer, _refill_waste_limit, size_t) \
- static_field(ThreadLocalAllocBuffer, _reserve_for_allocation_prefetch, int) \
static_field(ThreadLocalAllocBuffer, _target_refills, unsigned) \
nonstatic_field(ThreadLocalAllocBuffer, _number_of_refills, unsigned) \
nonstatic_field(ThreadLocalAllocBuffer, _refill_waste, unsigned) \
diff --git a/src/hotspot/share/services/cpuTimeUsage.cpp b/src/hotspot/share/services/cpuTimeUsage.cpp
index 27b5e90fbaf..0c7ecfdb655 100644
--- a/src/hotspot/share/services/cpuTimeUsage.cpp
+++ b/src/hotspot/share/services/cpuTimeUsage.cpp
@@ -36,6 +36,7 @@
volatile bool CPUTimeUsage::Error::_has_error = false;
static inline jlong thread_cpu_time_or_zero(Thread* thread) {
+ assert(!Universe::is_shutting_down(), "Should not query during shutdown");
jlong cpu_time = os::thread_cpu_time(thread);
if (cpu_time == -1) {
CPUTimeUsage::Error::mark_error();
diff --git a/src/hotspot/share/services/management.cpp b/src/hotspot/share/services/management.cpp
index cfe13d0c8f1..cc26e2e1352 100644
--- a/src/hotspot/share/services/management.cpp
+++ b/src/hotspot/share/services/management.cpp
@@ -54,6 +54,7 @@
#include "runtime/threadSMR.hpp"
#include "runtime/vmOperations.hpp"
#include "services/classLoadingService.hpp"
+#include "services/cpuTimeUsage.hpp"
#include "services/diagnosticCommand.hpp"
#include "services/diagnosticFramework.hpp"
#include "services/finalizerService.hpp"
@@ -889,6 +890,21 @@ static jint get_num_flags() {
return count;
}
+static jlong get_gc_cpu_time() {
+ if (!os::is_thread_cpu_time_supported()) {
+ return -1;
+ }
+
+ {
+ MutexLocker hl(Heap_lock);
+ if (Universe::heap()->is_shutting_down()) {
+ return -1;
+ }
+
+ return CPUTimeUsage::GC::total();
+ }
+}
+
static jlong get_long_attribute(jmmLongAttribute att) {
switch (att) {
case JMM_CLASS_LOADED_COUNT:
@@ -915,6 +931,9 @@ static jlong get_long_attribute(jmmLongAttribute att) {
case JMM_JVM_UPTIME_MS:
return Management::ticks_to_ms(os::elapsed_counter());
+ case JMM_TOTAL_GC_CPU_TIME:
+ return get_gc_cpu_time();
+
case JMM_COMPILE_TOTAL_TIME_MS:
return Management::ticks_to_ms(CompileBroker::total_compilation_ticks());
diff --git a/src/hotspot/share/utilities/debug.cpp b/src/hotspot/share/utilities/debug.cpp
index 89c0a1ebc08..de39fe32dc1 100644
--- a/src/hotspot/share/utilities/debug.cpp
+++ b/src/hotspot/share/utilities/debug.cpp
@@ -29,6 +29,7 @@
#include "code/vtableStubs.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/disassembler.hpp"
+#include "cppstdlib/new.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"
@@ -63,7 +64,6 @@
#include "utilities/unsigned5.hpp"
#include "utilities/vmError.hpp"
-#include
#include
#include
diff --git a/src/hotspot/share/utilities/deferredStatic.hpp b/src/hotspot/share/utilities/deferredStatic.hpp
index 56bdb9b8e6b..3a32f920fe8 100644
--- a/src/hotspot/share/utilities/deferredStatic.hpp
+++ b/src/hotspot/share/utilities/deferredStatic.hpp
@@ -25,11 +25,10 @@
#ifndef SHARE_UTILITIES_DEFERREDSTATIC_HPP
#define SHARE_UTILITIES_DEFERREDSTATIC_HPP
+#include "cppstdlib/new.hpp"
#include "cppstdlib/type_traits.hpp"
#include "utilities/globalDefinitions.hpp"
-#include
-
// The purpose of this class is to provide control over the initialization
// time for an object of type T with static storage duration. An instance of
// this class provides storage for an object, sized and aligned for T. The
diff --git a/src/hotspot/share/utilities/elfFile.cpp b/src/hotspot/share/utilities/elfFile.cpp
index 9ea19b38276..0b7713e9ca9 100644
--- a/src/hotspot/share/utilities/elfFile.cpp
+++ b/src/hotspot/share/utilities/elfFile.cpp
@@ -25,6 +25,7 @@
#if !defined(_WINDOWS) && !defined(__APPLE__)
+#include "cppstdlib/new.hpp"
#include "jvm_io.h"
#include "logging/log.hpp"
#include "memory/allocation.inline.hpp"
@@ -37,7 +38,6 @@
#include "utilities/ostream.hpp"
#include
-#include
#include
#include
diff --git a/src/hotspot/share/utilities/globalDefinitions.hpp b/src/hotspot/share/utilities/globalDefinitions.hpp
index 1910759b434..3284fd3bd15 100644
--- a/src/hotspot/share/utilities/globalDefinitions.hpp
+++ b/src/hotspot/share/utilities/globalDefinitions.hpp
@@ -1386,4 +1386,25 @@ template inline constexpr bool DependentAlwaysFalse = false;
// handled.
bool IEEE_subnormal_handling_OK();
+//----------------------------------------------------------------------------------------------------
+// Forbid using the global allocator by HotSpot code.
+//
+// This is a subset of allocator and deallocator functions. These are
+// implicitly declared in all translation units, without needing to include
+// ; see C++17 6.7.4. This isn't even the full set of those; implicit
+// declarations involving std::align_val_t are not covered here, since that
+// type is defined in . A translation unit that doesn't include is
+// still likely to include this file. See cppstdlib/new.hpp for more details.
+#ifndef HOTSPOT_GTEST
+
+[[deprecated]] void* operator new(std::size_t);
+[[deprecated]] void operator delete(void*) noexcept;
+[[deprecated]] void operator delete(void*, std::size_t) noexcept;
+
+[[deprecated]] void* operator new[](std::size_t);
+[[deprecated]] void operator delete[](void*) noexcept;
+[[deprecated]] void operator delete[](void*, std::size_t) noexcept;
+
+#endif // HOTSPOT_GTEST
+
#endif // SHARE_UTILITIES_GLOBALDEFINITIONS_HPP
diff --git a/src/hotspot/share/utilities/lockFreeStack.hpp b/src/hotspot/share/utilities/lockFreeStack.hpp
index 43bc58fbc44..3f63482a268 100644
--- a/src/hotspot/share/utilities/lockFreeStack.hpp
+++ b/src/hotspot/share/utilities/lockFreeStack.hpp
@@ -25,6 +25,7 @@
#ifndef SHARE_UTILITIES_LOCKFREESTACK_HPP
#define SHARE_UTILITIES_LOCKFREESTACK_HPP
+#include "runtime/atomic.hpp"
#include "runtime/atomicAccess.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
@@ -34,11 +35,14 @@
// a result, there is no allocation involved in adding objects to the stack
// or removing them from the stack.
//
-// To be used in a LockFreeStack of objects of type T, an object of
-// type T must have a list entry member of type T* volatile, with an
-// non-member accessor function returning a pointer to that member. A
-// LockFreeStack is associated with the class of its elements and an
-// entry member from that class.
+// To be used in a LockFreeStack of objects of type T, an object of type T
+// must have a list entry member. A list entry member is a data member whose
+// type is either (1) Atomic, or (2) T* volatile. There must be a
+// non-member or static member function returning a pointer to that member,
+// which is used to provide access to it by a LockFreeStack. A LockFreeStack
+// is associated with the class of its elements and an entry member from that
+// class by being specialized on the element class and a pointer to the
+// function for accessing that entry member.
//
// An object can be in multiple stacks at the same time, so long as
// each stack uses a different entry member. That is, the class of the
@@ -52,12 +56,12 @@
//
// \tparam T is the class of the elements in the stack.
//
-// \tparam next_ptr is a function pointer. Applying this function to
+// \tparam next_accessor is a function pointer. Applying this function to
// an object of type T must return a pointer to the list entry member
// of the object associated with the LockFreeStack type.
-template
+template
class LockFreeStack {
- T* volatile _top;
+ Atomic _top;
void prepend_impl(T* first, T* last) {
T* cur = top();
@@ -65,12 +69,21 @@ class LockFreeStack {
do {
old = cur;
set_next(*last, cur);
- cur = AtomicAccess::cmpxchg(&_top, cur, first);
+ cur = _top.compare_exchange(cur, first);
} while (old != cur);
}
NONCOPYABLE(LockFreeStack);
+ template
+ static constexpr void use_atomic_access_impl(NextAccessor) {
+ static_assert(DependentAlwaysFalse, "Invalid next accessor");
+ }
+ static constexpr bool use_atomic_access_impl(T* volatile* (*)(T&)) { return true; }
+ static constexpr bool use_atomic_access_impl(Atomic* (*)(T&)) { return false; }
+
+ static constexpr bool use_atomic_access = use_atomic_access_impl(next_accessor);
+
public:
LockFreeStack() : _top(nullptr) {}
~LockFreeStack() { assert(empty(), "stack not empty"); }
@@ -89,7 +102,7 @@ public:
new_top = next(*result);
}
// CAS even on empty pop, for consistent membar behavior.
- result = AtomicAccess::cmpxchg(&_top, result, new_top);
+ result = _top.compare_exchange(result, new_top);
} while (result != old);
if (result != nullptr) {
set_next(*result, nullptr);
@@ -101,7 +114,7 @@ public:
// list of elements. Acts as a full memory barrier.
// postcondition: empty()
T* pop_all() {
- return AtomicAccess::xchg(&_top, (T*)nullptr);
+ return _top.exchange(nullptr);
}
// Atomically adds value to the top of this stack. Acts as a full
@@ -143,9 +156,9 @@ public:
// Return true if the stack is empty.
bool empty() const { return top() == nullptr; }
- // Return the most recently pushed element, or nullptr if the stack is empty.
+ // Return the most recently pushed element, or null if the stack is empty.
// The returned element is not removed from the stack.
- T* top() const { return AtomicAccess::load(&_top); }
+ T* top() const { return _top.load_relaxed(); }
// Return the number of objects in the stack. There must be no concurrent
// pops while the length is being determined.
@@ -160,7 +173,11 @@ public:
// Return the entry following value in the list used by the
// specialized LockFreeStack class.
static T* next(const T& value) {
- return AtomicAccess::load(next_ptr(const_cast(value)));
+ if constexpr (use_atomic_access) {
+ return AtomicAccess::load(next_accessor(const_cast(value)));
+ } else {
+ return next_accessor(const_cast(value))->load_relaxed();
+ }
}
// Set the entry following value to new_next in the list used by the
@@ -168,7 +185,11 @@ public:
// if value is in an instance of this specialization of LockFreeStack,
// there must be no concurrent push or pop operations on that stack.
static void set_next(T& value, T* new_next) {
- AtomicAccess::store(next_ptr(value), new_next);
+ if constexpr (use_atomic_access) {
+ AtomicAccess::store(next_accessor(value), new_next);
+ } else {
+ next_accessor(value)->store_relaxed(new_next);
+ }
}
};
diff --git a/src/hotspot/share/utilities/nonblockingQueue.hpp b/src/hotspot/share/utilities/nonblockingQueue.hpp
deleted file mode 100644
index 1b7e4b8bac4..00000000000
--- a/src/hotspot/share/utilities/nonblockingQueue.hpp
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_UTILITIES_NONBLOCKINGQUEUE_HPP
-#define SHARE_UTILITIES_NONBLOCKINGQUEUE_HPP
-
-#include "memory/padded.hpp"
-#include "utilities/globalDefinitions.hpp"
-#include "utilities/pair.hpp"
-
-// The NonblockingQueue template provides a non-blocking FIFO.
-// It has inner padding of one cache line between its two internal pointers.
-//
-// The queue is internally represented by a linked list of elements, with
-// the link to the next element provided by a member of each element.
-// Access to this member is provided by the next_ptr function.
-//
-// The queue has a special pseudo-element that marks the end of the list.
-// Each queue has its own unique special element. A pointer to this element
-// can be recognized using the is_end() function. Such a pointer must never
-// be dereferenced. This end marker is the value of the next member of the
-// last element in the queue, and possibly other elements while modifying
-// the queue.
-//
-// A queue may temporarily appear to be empty even though elements have been
-// added and not removed. For example, after running the following program,
-// the value of r may be null.
-//
-// thread1: q.push(a); r = q.pop();
-// thread2: q.push(b);
-//
-// This can occur if the push of b started before the push of a, but didn't
-// complete until after the pop.
-//
-// \tparam T is the class of the elements in the queue.
-//
-// \tparam next_ptr is a function pointer. Applying this function to
-// an object of type T must return a pointer to the list entry member
-// of the object associated with the NonblockingQueue type.
-template
-class NonblockingQueue {
- T* volatile _head;
- // Padding of one cache line to avoid false sharing.
- DEFINE_PAD_MINUS_SIZE(1, DEFAULT_PADDING_SIZE, sizeof(T*));
- T* volatile _tail;
-
- NONCOPYABLE(NonblockingQueue);
-
- // Return the entry following node in the list used by the
- // specialized NonblockingQueue class.
- static inline T* next(const T& node);
-
- // Set the entry following node to new_next in the list used by the
- // specialized NonblockingQueue class. Not thread-safe, as it cannot
- // concurrently run with push or try_pop operations that modify this
- // node.
- static inline void set_next(T& node, T* new_next);
-
- // A unique pseudo-object pointer associated with this specific queue.
- // The resulting pointer must not be dereferenced.
- inline T* end_marker() const;
-
-public:
- inline NonblockingQueue();
- inline ~NonblockingQueue() NOT_DEBUG(= default);
-
- // Return true if the queue is empty.
- // Not thread-safe. There must be no concurrent modification while the
- // queue is being tested.
- inline bool empty() const;
-
- // Return the number of objects in the queue.
- // Not thread-safe. There must be no concurrent modification while the
- // length is being determined.
- inline size_t length() const;
-
- // Thread-safe add the object to the end of the queue.
- // Subject to ABA behavior; callers must ensure usage is safe.
- inline void push(T& node) { append(node, node); }
-
- // Thread-safe add the objects from first to last to the end of the queue.
- // Subject to ABA behavior; callers must ensure usage is safe.
- inline void append(T& first, T& last);
-
- // Thread-safe attempt to remove and return the first object in the queue.
- // Returns true if successful. If successful then *node_ptr is the former
- // first object, or null if the queue was empty. If unsuccessful, because
- // of contention with a concurrent modification, then returns false with
- // the value of *node_ptr unspecified. Subject to ABA behavior; callers
- // must ensure usage is safe.
- inline bool try_pop(T** node_ptr);
-
- // Thread-safe remove and return the first object in the queue, or null
- // if the queue was empty. This just iterates on try_pop() until it
- // succeeds, returning the (possibly null) element obtained from that.
- // Subject to ABA behavior; callers must ensure usage is safe.
- inline T* pop();
-
- // Take all the objects from the queue, leaving the queue empty.
- // Not thread-safe. There must be no concurrent operations.
- // Returns a pair of pointers to the current queue.
- inline Pair take_all();
-
- // Iteration support is provided by first() and is_end(). The queue must
- // not be modified while iterating over its elements.
-
- // Return the first object in the queue, or an end marker (a pointer p for
- // which is_end(p) is true) if the queue is empty.
- inline T* first() const;
-
- // Test whether entry is an end marker for this queue.
- inline bool is_end(const T* entry) const;
-};
-
-#endif // SHARE_UTILITIES_NONBLOCKINGQUEUE_HPP
diff --git a/src/hotspot/share/utilities/nonblockingQueue.inline.hpp b/src/hotspot/share/utilities/nonblockingQueue.inline.hpp
deleted file mode 100644
index d805eedb7a4..00000000000
--- a/src/hotspot/share/utilities/nonblockingQueue.inline.hpp
+++ /dev/null
@@ -1,248 +0,0 @@
-/*
- * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_UTILITIES_NONBLOCKINGQUEUE_INLINE_HPP
-#define SHARE_UTILITIES_NONBLOCKINGQUEUE_INLINE_HPP
-
-#include "utilities/nonblockingQueue.hpp"
-
-#include "runtime/atomicAccess.hpp"
-
-template
-T* NonblockingQueue::next(const T& node) {
- return AtomicAccess::load(next_ptr(const_cast(node)));
-}
-
-template
-void NonblockingQueue::set_next(T& node, T* new_next) {
- AtomicAccess::store(next_ptr(node), new_next);
-}
-
-template
-NonblockingQueue::NonblockingQueue() : _head(nullptr), _tail(nullptr) {}
-
-#ifdef ASSERT
-template
-NonblockingQueue::~NonblockingQueue() {
- assert(_head == nullptr, "precondition");
- assert(_tail == nullptr, "precondition");
-}
-#endif
-
-// The end_marker must be uniquely associated with the specific queue, in
-// case queue elements can make their way through multiple queues. A
-// pointer to the queue itself (after casting) satisfies that requirement.
-template
-T* NonblockingQueue::end_marker() const {
- return const_cast(reinterpret_cast(this));
-}
-
-template
-T* NonblockingQueue::first() const {
- T* head = AtomicAccess::load(&_head);
- return head == nullptr ? end_marker() : head;
-}
-
-template
-bool NonblockingQueue::is_end(const T* entry) const {
- return entry == end_marker();
-}
-
-template
-bool NonblockingQueue::empty() const {
- return AtomicAccess::load(&_head) == nullptr;
-}
-
-template
-size_t NonblockingQueue::length() const {
- size_t result = 0;
- for (T* cur = first(); !is_end(cur); cur = next(*cur)) {
- ++result;
- }
- return result;
-}
-
-// An append operation atomically exchanges the new tail with the queue tail.
-// It then sets the "next" value of the old tail to the head of the list being
-// appended. If the old tail is null then the queue was empty, then the
-// head of the list being appended is instead stored in the queue head.
-//
-// This means there is a period between the exchange and the old tail update
-// where the queue sequence is split into two parts, the list from the queue
-// head to the old tail, and the list being appended. If there are concurrent
-// push/append operations, each may introduce another such segment. But they
-// all eventually get resolved by their respective updates of their old tail's
-// "next" value. This also means that try_pop operation must handle an object
-// differently depending on its "next" value.
-//
-// A push operation is just a degenerate append, where the object being pushed
-// is both the head and the tail of the list being appended.
-template
-void NonblockingQueue::append(T& first, T& last) {
- assert(next(last) == nullptr, "precondition");
- // Make last the new end of the queue. Any further push/appends will
- // extend after last. We will try to extend from the previous end of
- // queue.
- set_next(last, end_marker());
- T* old_tail = AtomicAccess::xchg(&_tail, &last);
- if (old_tail == nullptr) {
- // If old_tail is null then the queue was empty, and _head must also be
- // null. The correctness of this assertion depends on try_pop clearing
- // first _head then _tail when taking the last entry.
- assert(AtomicAccess::load(&_head) == nullptr, "invariant");
- // Fall through to common update of _head.
- } else if (is_end(AtomicAccess::cmpxchg(next_ptr(*old_tail), end_marker(), &first))) {
- // Successfully extended the queue list from old_tail to first. No
- // other push/append could have competed with us, because we claimed
- // old_tail for extension. We won any races with try_pop by changing
- // away from end-marker. So we're done.
- //
- // Note that ABA is possible here. A concurrent try_pop could take
- // old_tail before our update of old_tail's next_ptr, old_tail gets
- // recycled and re-added to the end of this queue, and then we
- // successfully cmpxchg, making the list in _tail circular. Callers
- // must ensure this can't happen.
- return;
- } else {
- // A concurrent try_pop has claimed old_tail, so it is no longer in the
- // list. The queue was logically empty. _head is either null or
- // old_tail, depending on how far try_pop operations have progressed.
- DEBUG_ONLY(T* old_head = AtomicAccess::load(&_head);)
- assert((old_head == nullptr) || (old_head == old_tail), "invariant");
- // Fall through to common update of _head.
- }
- // The queue was empty, and first should become the new _head. The queue
- // will appear to be empty to any further try_pops until done.
- AtomicAccess::store(&_head, &first);
-}
-
-template
-bool NonblockingQueue::try_pop(T** node_ptr) {
- // We only need memory_order_consume. Upgrade it to "load_acquire"
- // as the memory_order_consume API is not ready for use yet.
- T* old_head = AtomicAccess::load_acquire(&_head);
- if (old_head == nullptr) {
- *node_ptr = nullptr;
- return true; // Queue is empty.
- }
-
- T* next_node = AtomicAccess::load_acquire(next_ptr(*old_head));
- if (!is_end(next_node)) {
- // [Clause 1]
- // There are several cases for next_node.
- // (1) next_node is the extension of the queue's list.
- // (2) next_node is null, because a competing try_pop took old_head.
- // (3) next_node is the extension of some unrelated list, because a
- // competing try_pop took old_head and put it in some other list.
- //
- // Attempt to advance the list, replacing old_head with next_node in
- // _head. The success or failure of that attempt, along with the value
- // of next_node, are used to partially determine which case we're in and
- // how to proceed. In particular, advancement will fail for case (3).
- if (old_head != AtomicAccess::cmpxchg(&_head, old_head, next_node)) {
- // [Clause 1a]
- // The cmpxchg to advance the list failed; a concurrent try_pop won
- // the race and claimed old_head. This can happen for any of the
- // next_node cases.
- return false;
- } else if (next_node == nullptr) {
- // [Clause 1b]
- // The cmpxchg to advance the list succeeded, but a concurrent try_pop
- // has already claimed old_head (see [Clause 2] - old_head was the last
- // entry in the list) by nulling old_head's next field. The advance set
- // _head to null, "helping" the competing try_pop. _head will remain
- // nullptr until a subsequent push/append. This is a lost race, and we
- // report it as such for consistency, though we could report the queue
- // was empty. We don't attempt to further help [Clause 2] by also
- // trying to set _tail to nullptr, as that would just ensure that one or
- // the other cmpxchg is a wasted failure.
- return false;
- } else {
- // [Clause 1c]
- // Successfully advanced the list and claimed old_head. next_node was
- // in the extension of the queue's list. Return old_head after
- // unlinking it from next_node.
- set_next(*old_head, nullptr);
- *node_ptr = old_head;
- return true;
- }
-
- } else if (is_end(AtomicAccess::cmpxchg(next_ptr(*old_head), next_node, (T*)nullptr))) {
- // [Clause 2]
- // Old_head was the last entry and we've claimed it by setting its next
- // value to null. However, this leaves the queue in disarray. Fix up
- // the queue, possibly in conjunction with other concurrent operations.
- // Any further try_pops will consider the queue empty until a
- // push/append completes by installing a new head.
-
- // The order of the two cmpxchgs doesn't matter algorithmically, but
- // dealing with _head first gives a stronger invariant in append, and is
- // also consistent with [Clause 1b].
-
- // Attempt to change the queue head from old_head to null. Failure of
- // the cmpxchg indicates a concurrent operation updated _head first. That
- // could be either a push/append or a try_pop in [Clause 1b].
- AtomicAccess::cmpxchg(&_head, old_head, (T*)nullptr);
-
- // Attempt to change the queue tail from old_head to null. Failure of
- // the cmpxchg indicates that a concurrent push/append updated _tail first.
- // That operation will eventually recognize the old tail (our old_head) is
- // no longer in the list and update _head from the list being appended.
- AtomicAccess::cmpxchg(&_tail, old_head, (T*)nullptr);
-
- // The queue has been restored to order, and we can return old_head.
- *node_ptr = old_head;
- return true;
-
- } else {
- // [Clause 3]
- // Old_head was the last entry in the list, but either a concurrent
- // try_pop claimed it first or a concurrent push/append extended the
- // list from it. Either way, we lost the race to claim it.
- return false;
- }
-}
-
-template
-T* NonblockingQueue::pop() {
- T* result = nullptr;
- // Typically try_pop() will succeed without retrying many times, thus we
- // omit SpinPause in the loop body. SpinPause or yield may be worthwhile
- // in rare, highly contended cases, and client code could implement such
- // with try_pop().
- while (!try_pop(&result)) {}
- return result;
-}
-
-template
-Pair NonblockingQueue::take_all() {
- T* tail = AtomicAccess::load(&_tail);
- if (tail != nullptr) set_next(*tail, nullptr); // Clear end marker.
- Pair result(AtomicAccess::load(&_head), tail);
- AtomicAccess::store(&_head, (T*)nullptr);
- AtomicAccess::store(&_tail, (T*)nullptr);
- return result;
-}
-
-#endif // SHARE_UTILITIES_NONBLOCKINGQUEUE_INLINE_HPP
diff --git a/src/java.base/linux/classes/jdk/internal/platform/cgroupv2/CgroupV2Subsystem.java b/src/java.base/linux/classes/jdk/internal/platform/cgroupv2/CgroupV2Subsystem.java
index aa618766b38..3e3f637cd4c 100644
--- a/src/java.base/linux/classes/jdk/internal/platform/cgroupv2/CgroupV2Subsystem.java
+++ b/src/java.base/linux/classes/jdk/internal/platform/cgroupv2/CgroupV2Subsystem.java
@@ -156,22 +156,39 @@ public class CgroupV2Subsystem implements CgroupSubsystem {
@Override
public long getCpuShares() {
long sharesRaw = getLongVal("cpu.weight");
- if (sharesRaw == 100 || sharesRaw <= 0) {
+ // cg v2 value must be in range [1,10000]
+ if (sharesRaw == 100 || sharesRaw <= 0 || sharesRaw > 10000) {
return CgroupSubsystem.LONG_RETVAL_UNLIMITED;
}
int shares = (int)sharesRaw;
// CPU shares (OCI) value needs to get translated into
// a proper Cgroups v2 value. See:
- // https://github.com/containers/crun/blob/master/crun.1.md#cpu-controller
+ // https://github.com/containers/crun/blob/1.24/crun.1.md#cpu-controller
//
// Use the inverse of (x == OCI value, y == cgroupsv2 value):
- // ((262142 * y - 1)/9999) + 2 = x
+ // y = 10^(log2(x)^2/612 + 125/612 * log2(x) - 7.0/34.0)
//
- int x = 262142 * shares - 1;
- double frac = x/9999.0;
- x = ((int)frac) + 2;
+ // By re-arranging it to the standard quadratic form:
+ // log2(x)^2 + 125 * log2(x) - (126 + 612 * log_10(y)) = 0
+ //
+ // Therefore, log2(x) = (-125 + sqrt( 125^2 - 4 * (-(126 + 612 * log_10(y)))))/2
+ //
+ // As a result we have the inverse (we can discount substraction of the
+ // square root value since those values result in very small numbers and the
+ // cpu shares values - OCI - are in range [2-262144])
+ //
+ // x = 2^((-125 + sqrt(16129 + 2448* log10(y)))/2)
+ //
+ double logMultiplicand = Math.log10(shares);
+ double discriminant = 16129 + 2448 * logMultiplicand;
+ double squareRoot = Math.sqrt(discriminant);
+ double exponent = (-125 + squareRoot)/2;
+ double scaledValue = Math.pow(2, exponent);
+
+ int x = (int)scaledValue;
if ( x <= PER_CPU_SHARES ) {
- return PER_CPU_SHARES; // mimic cgroups v1
+ // Return the back-mapped value.
+ return x;
}
int f = x/PER_CPU_SHARES;
int lower_multiple = f * PER_CPU_SHARES;
diff --git a/src/java.base/share/classes/java/lang/Character.java b/src/java.base/share/classes/java/lang/Character.java
index 72ff33651f9..d866202909c 100644
--- a/src/java.base/share/classes/java/lang/Character.java
+++ b/src/java.base/share/classes/java/lang/Character.java
@@ -63,7 +63,7 @@ import static java.lang.constant.ConstantDescs.DEFAULT_NAME;
* from the Unicode Consortium at
* http://www.unicode.org.
*
- * Character information is based on the Unicode Standard, version 16.0.
+ * Character information is based on the Unicode Standard, version 17.0.
*
* The Java platform has supported different versions of the Unicode
* Standard over time. Upgrades to newer versions of the Unicode Standard
@@ -75,6 +75,8 @@ import static java.lang.constant.ConstantDescs.DEFAULT_NAME;
*
Unicode version |
*
*
+ * | Java SE 26 |
+ * Unicode 17.0 |
* | Java SE 24 |
* Unicode 16.0 |
* | Java SE 22 |
@@ -745,7 +747,7 @@ class Character implements java.io.Serializable, Comparable, Constabl
* It should be adjusted whenever the Unicode Character Database
* is upgraded.
*/
- private static final int NUM_ENTITIES = 782;
+ private static final int NUM_ENTITIES = 804;
private static Map map = HashMap.newHashMap(NUM_ENTITIES);
/**
@@ -3715,6 +3717,85 @@ class Character implements java.io.Serializable, Comparable, Constabl
"OL ONAL",
"OLONAL");
+ /**
+ * Constant for the "Sidetic" Unicode
+ * character block.
+ * @since 26
+ */
+ public static final UnicodeBlock SIDETIC =
+ new UnicodeBlock("SIDETIC");
+
+ /**
+ * Constant for the "Sharada Supplement" Unicode
+ * character block.
+ * @since 26
+ */
+ public static final UnicodeBlock SHARADA_SUPPLEMENT =
+ new UnicodeBlock("SHARADA_SUPPLEMENT",
+ "SHARADA SUPPLEMENT",
+ "SHARADASUPPLEMENT");
+
+ /**
+ * Constant for the "Tolong Siki" Unicode
+ * character block.
+ * @since 26
+ */
+ public static final UnicodeBlock TOLONG_SIKI =
+ new UnicodeBlock("TOLONG_SIKI",
+ "TOLONG SIKI",
+ "TOLONGSIKI");
+
+ /**
+ * Constant for the "Beria Erfe" Unicode
+ * character block.
+ * @since 26
+ */
+ public static final UnicodeBlock BERIA_ERFE =
+ new UnicodeBlock("BERIA_ERFE",
+ "BERIA ERFE",
+ "BERIAERFE");
+
+ /**
+ * Constant for the "Tangut Components Supplement" Unicode
+ * character block.
+ * @since 26
+ */
+ public static final UnicodeBlock TANGUT_COMPONENTS_SUPPLEMENT =
+ new UnicodeBlock("TANGUT_COMPONENTS_SUPPLEMENT",
+ "TANGUT COMPONENTS SUPPLEMENT",
+ "TANGUTCOMPONENTSSUPPLEMENT");
+
+ /**
+ * Constant for the "Miscellaneous Symbols Supplement" Unicode
+ * character block.
+ * @since 26
+ */
+ public static final UnicodeBlock MISCELLANEOUS_SYMBOLS_SUPPLEMENT =
+ new UnicodeBlock("MISCELLANEOUS_SYMBOLS_SUPPLEMENT",
+ "MISCELLANEOUS SYMBOLS SUPPLEMENT",
+ "MISCELLANEOUSSYMBOLSSUPPLEMENT");
+
+ /**
+ * Constant for the "Tai Yo" Unicode
+ * character block.
+ * @since 26
+ */
+ public static final UnicodeBlock TAI_YO =
+ new UnicodeBlock("TAI_YO",
+ "TAI YO",
+ "TAIYO");
+
+ /**
+ * Constant for the "CJK Unified Ideographs Extension J" Unicode
+ * character block.
+ * @since 26
+ */
+ public static final UnicodeBlock CJK_UNIFIED_IDEOGRAPHS_EXTENSION_J =
+ new UnicodeBlock("CJK_UNIFIED_IDEOGRAPHS_EXTENSION_J",
+ "CJK UNIFIED IDEOGRAPHS EXTENSION J",
+ "CJKUNIFIEDIDEOGRAPHSEXTENSIONJ");
+
+
private static final int[] blockStarts = {
0x0000, // 0000..007F; Basic Latin
0x0080, // 0080..00FF; Latin-1 Supplement
@@ -3916,7 +3997,8 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x108E0, // 108E0..108FF; Hatran
0x10900, // 10900..1091F; Phoenician
0x10920, // 10920..1093F; Lydian
- 0x10940, // unassigned
+ 0x10940, // 10940..1095F; Sidetic
+ 0x10960, // unassigned
0x10980, // 10980..1099F; Meroitic Hieroglyphs
0x109A0, // 109A0..109FF; Meroitic Cursive
0x10A00, // 10A00..10A5F; Kharoshthi
@@ -3977,14 +4059,16 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x11AB0, // 11AB0..11ABF; Unified Canadian Aboriginal Syllabics Extended-A
0x11AC0, // 11AC0..11AFF; Pau Cin Hau
0x11B00, // 11B00..11B5F; Devanagari Extended-A
- 0x11B60, // unassigned
+ 0x11B60, // 11B60..11B7F; Sharada Supplement
+ 0x11B80, // unassigned
0x11BC0, // 11BC0..11BFF; Sunuwar
0x11C00, // 11C00..11C6F; Bhaiksuki
0x11C70, // 11C70..11CBF; Marchen
0x11CC0, // unassigned
0x11D00, // 11D00..11D5F; Masaram Gondi
0x11D60, // 11D60..11DAF; Gunjala Gondi
- 0x11DB0, // unassigned
+ 0x11DB0, // 11DB0..11DEF; Tolong Siki
+ 0x11DF0, // unassigned
0x11EE0, // 11EE0..11EFF; Makasar
0x11F00, // 11F00..11F5F; Kawi
0x11F60, // unassigned
@@ -4011,7 +4095,8 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x16D40, // 16D40..16D7F; Kirat Rai
0x16D80, // unassigned
0x16E40, // 16E40..16E9F; Medefaidrin
- 0x16EA0, // unassigned
+ 0x16EA0, // 16EA0..16EDF; Beria Erfe
+ 0x16EE0, // unassigned
0x16F00, // 16F00..16F9F; Miao
0x16FA0, // unassigned
0x16FE0, // 16FE0..16FFF; Ideographic Symbols and Punctuation
@@ -4019,7 +4104,8 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x18800, // 18800..18AFF; Tangut Components
0x18B00, // 18B00..18CFF; Khitan Small Script
0x18D00, // 18D00..18D7F; Tangut Supplement
- 0x18D80, // unassigned
+ 0x18D80, // 18D80..18DFF; Tangut Components Supplement
+ 0x18E00, // unassigned
0x1AFF0, // 1AFF0..1AFFF; Kana Extended-B
0x1B000, // 1B000..1B0FF; Kana Supplement
0x1B100, // 1B100..1B12F; Kana Extended-A
@@ -4030,7 +4116,7 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x1BCA0, // 1BCA0..1BCAF; Shorthand Format Controls
0x1BCB0, // unassigned
0x1CC00, // 1CC00..1CEBF; Symbols for Legacy Computing Supplement
- 0x1CEC0, // unassigned
+ 0x1CEC0, // 1CEC0..1CEFF; Miscellaneous Symbols Supplement
0x1CF00, // 1CF00..1CFCF; Znamenny Musical Notation
0x1CFD0, // unassigned
0x1D000, // 1D000..1D0FF; Byzantine Musical Symbols
@@ -4058,6 +4144,8 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x1E500, // unassigned
0x1E5D0, // 1E5D0..1E5FF; Ol Onal
0x1E600, // unassigned
+ 0x1E6C0, // 1E6C0..1E6FF; Tai Yo
+ 0x1E700, // unassigned
0x1E7E0, // 1E7E0..1E7FF; Ethiopic Extended-B
0x1E800, // 1E800..1E8DF; Mende Kikakui
0x1E8E0, // unassigned
@@ -4098,7 +4186,8 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x2FA20, // unassigned
0x30000, // 30000..3134F; CJK Unified Ideographs Extension G
0x31350, // 31350..323AF; CJK Unified Ideographs Extension H
- 0x323B0, // unassigned
+ 0x323B0, // 323B0..3347F; CJK Unified Ideographs Extension J
+ 0x33480, // unassigned
0xE0000, // E0000..E007F; Tags
0xE0080, // unassigned
0xE0100, // E0100..E01EF; Variation Selectors Supplement
@@ -4308,6 +4397,7 @@ class Character implements java.io.Serializable, Comparable, Constabl
HATRAN,
PHOENICIAN,
LYDIAN,
+ SIDETIC,
null,
MEROITIC_HIEROGLYPHS,
MEROITIC_CURSIVE,
@@ -4369,6 +4459,7 @@ class Character implements java.io.Serializable, Comparable, Constabl
UNIFIED_CANADIAN_ABORIGINAL_SYLLABICS_EXTENDED_A,
PAU_CIN_HAU,
DEVANAGARI_EXTENDED_A,
+ SHARADA_SUPPLEMENT,
null,
SUNUWAR,
BHAIKSUKI,
@@ -4376,6 +4467,7 @@ class Character implements java.io.Serializable, Comparable, Constabl
null,
MASARAM_GONDI,
GUNJALA_GONDI,
+ TOLONG_SIKI,
null,
MAKASAR,
KAWI,
@@ -4403,6 +4495,7 @@ class Character implements java.io.Serializable, Comparable, Constabl
KIRAT_RAI,
null,
MEDEFAIDRIN,
+ BERIA_ERFE,
null,
MIAO,
null,
@@ -4411,6 +4504,7 @@ class Character implements java.io.Serializable, Comparable, Constabl
TANGUT_COMPONENTS,
KHITAN_SMALL_SCRIPT,
TANGUT_SUPPLEMENT,
+ TANGUT_COMPONENTS_SUPPLEMENT,
null,
KANA_EXTENDED_B,
KANA_SUPPLEMENT,
@@ -4422,7 +4516,7 @@ class Character implements java.io.Serializable, Comparable, Constabl
SHORTHAND_FORMAT_CONTROLS,
null,
SYMBOLS_FOR_LEGACY_COMPUTING_SUPPLEMENT,
- null,
+ MISCELLANEOUS_SYMBOLS_SUPPLEMENT,
ZNAMENNY_MUSICAL_NOTATION,
null,
BYZANTINE_MUSICAL_SYMBOLS,
@@ -4450,6 +4544,8 @@ class Character implements java.io.Serializable, Comparable, Constabl
null,
OL_ONAL,
null,
+ TAI_YO,
+ null,
ETHIOPIC_EXTENDED_B,
MENDE_KIKAKUI,
null,
@@ -4490,6 +4586,7 @@ class Character implements java.io.Serializable, Comparable, Constabl
null,
CJK_UNIFIED_IDEOGRAPHS_EXTENSION_G,
CJK_UNIFIED_IDEOGRAPHS_EXTENSION_H,
+ CJK_UNIFIED_IDEOGRAPHS_EXTENSION_J,
null,
TAGS,
null,
@@ -5547,6 +5644,30 @@ class Character implements java.io.Serializable, Comparable, Constabl
*/
OL_ONAL,
+ /**
+ * Unicode script "Sidetic".
+ * @since 26
+ */
+ SIDETIC,
+
+ /**
+ * Unicode script "Tolong Siki".
+ * @since 26
+ */
+ TOLONG_SIKI,
+
+ /**
+ * Unicode script "Beria Erfe".
+ * @since 26
+ */
+ BERIA_ERFE,
+
+ /**
+ * Unicode script "Tai Yo".
+ * @since 26
+ */
+ TAI_YO,
+
/**
* Unicode script "Unknown".
*/
@@ -5648,9 +5769,7 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x085F, // 085F ; UNKNOWN
0x0860, // 0860..086A; SYRIAC
0x086B, // 086B..086F; UNKNOWN
- 0x0870, // 0870..088E; ARABIC
- 0x088F, // 088F ; UNKNOWN
- 0x0890, // 0890..0891; ARABIC
+ 0x0870, // 0870..0891; ARABIC
0x0892, // 0892..0896; UNKNOWN
0x0897, // 0897..08E1; ARABIC
0x08E2, // 08E2 ; COMMON
@@ -5825,8 +5944,8 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x0C55, // 0C55..0C56; TELUGU
0x0C57, // 0C57 ; UNKNOWN
0x0C58, // 0C58..0C5A; TELUGU
- 0x0C5B, // 0C5B..0C5C; UNKNOWN
- 0x0C5D, // 0C5D ; TELUGU
+ 0x0C5B, // 0C5B ; UNKNOWN
+ 0x0C5C, // 0C5C..0C5D; TELUGU
0x0C5E, // 0C5E..0C5F; UNKNOWN
0x0C60, // 0C60..0C63; TELUGU
0x0C64, // 0C64..0C65; UNKNOWN
@@ -5850,8 +5969,8 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x0CCA, // 0CCA..0CCD; KANNADA
0x0CCE, // 0CCE..0CD4; UNKNOWN
0x0CD5, // 0CD5..0CD6; KANNADA
- 0x0CD7, // 0CD7..0CDC; UNKNOWN
- 0x0CDD, // 0CDD..0CDE; KANNADA
+ 0x0CD7, // 0CD7..0CDB; UNKNOWN
+ 0x0CDC, // 0CDC..0CDE; KANNADA
0x0CDF, // 0CDF ; UNKNOWN
0x0CE0, // 0CE0..0CE3; KANNADA
0x0CE4, // 0CE4..0CE5; UNKNOWN
@@ -6062,8 +6181,10 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x1A9A, // 1A9A..1A9F; UNKNOWN
0x1AA0, // 1AA0..1AAD; TAI_THAM
0x1AAE, // 1AAE..1AAF; UNKNOWN
- 0x1AB0, // 1AB0..1ACE; INHERITED
- 0x1ACF, // 1ACF..1AFF; UNKNOWN
+ 0x1AB0, // 1AB0..1ADD; INHERITED
+ 0x1ADE, // 1ADE..1ADF; UNKNOWN
+ 0x1AE0, // 1AE0..1AEB; INHERITED
+ 0x1AEC, // 1AEC..1AFF; UNKNOWN
0x1B00, // 1B00..1B4C; BALINESE
0x1B4D, // 1B4D ; UNKNOWN
0x1B4E, // 1B4E..1B7F; BALINESE
@@ -6155,8 +6276,8 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x208F, // 208F ; UNKNOWN
0x2090, // 2090..209C; LATIN
0x209D, // 209D..209F; UNKNOWN
- 0x20A0, // 20A0..20C0; COMMON
- 0x20C1, // 20C1..20CF; UNKNOWN
+ 0x20A0, // 20A0..20C1; COMMON
+ 0x20C2, // 20C2..20CF; UNKNOWN
0x20D0, // 20D0..20F0; INHERITED
0x20F1, // 20F1..20FF; UNKNOWN
0x2100, // 2100..2125; COMMON
@@ -6179,9 +6300,7 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x2800, // 2800..28FF; BRAILLE
0x2900, // 2900..2B73; COMMON
0x2B74, // 2B74..2B75; UNKNOWN
- 0x2B76, // 2B76..2B95; COMMON
- 0x2B96, // 2B96 ; UNKNOWN
- 0x2B97, // 2B97..2BFF; COMMON
+ 0x2B76, // 2B76..2BFF; COMMON
0x2C00, // 2C00..2C5F; GLAGOLITIC
0x2C60, // 2C60..2C7F; LATIN
0x2C80, // 2C80..2CF3; COPTIC
@@ -6282,15 +6401,9 @@ class Character implements java.io.Serializable, Comparable, Constabl
0xA700, // A700..A721; COMMON
0xA722, // A722..A787; LATIN
0xA788, // A788..A78A; COMMON
- 0xA78B, // A78B..A7CD; LATIN
- 0xA7CE, // A7CE..A7CF; UNKNOWN
- 0xA7D0, // A7D0..A7D1; LATIN
- 0xA7D2, // A7D2 ; UNKNOWN
- 0xA7D3, // A7D3 ; LATIN
- 0xA7D4, // A7D4 ; UNKNOWN
- 0xA7D5, // A7D5..A7DC; LATIN
- 0xA7DD, // A7DD..A7F1; UNKNOWN
- 0xA7F2, // A7F2..A7FF; LATIN
+ 0xA78B, // A78B..A7DC; LATIN
+ 0xA7DD, // A7DD..A7F0; UNKNOWN
+ 0xA7F1, // A7F1..A7FF; LATIN
0xA800, // A800..A82C; SYLOTI_NAGRI
0xA82D, // A82D..A82F; UNKNOWN
0xA830, // A830..A839; COMMON
@@ -6378,15 +6491,9 @@ class Character implements java.io.Serializable, Comparable, Constabl
0xFB43, // FB43..FB44; HEBREW
0xFB45, // FB45 ; UNKNOWN
0xFB46, // FB46..FB4F; HEBREW
- 0xFB50, // FB50..FBC2; ARABIC
- 0xFBC3, // FBC3..FBD2; UNKNOWN
- 0xFBD3, // FBD3..FD3D; ARABIC
+ 0xFB50, // FB50..FD3D; ARABIC
0xFD3E, // FD3E..FD3F; COMMON
- 0xFD40, // FD40..FD8F; ARABIC
- 0xFD90, // FD90..FD91; UNKNOWN
- 0xFD92, // FD92..FDC7; ARABIC
- 0xFDC8, // FDC8..FDCE; UNKNOWN
- 0xFDCF, // FDCF ; ARABIC
+ 0xFD40, // FD40..FDCF; ARABIC
0xFDD0, // FDD0..FDEF; UNKNOWN
0xFDF0, // FDF0..FDFF; ARABIC
0xFE00, // FE00..FE0F; INHERITED
@@ -6555,7 +6662,8 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x10920, // 10920..10939; LYDIAN
0x1093A, // 1093A..1093E; UNKNOWN
0x1093F, // 1093F ; LYDIAN
- 0x10940, // 10940..1097F; UNKNOWN
+ 0x10940, // 10940..10959; SIDETIC
+ 0x1095A, // 1095A..1097F; UNKNOWN
0x10980, // 10980..1099F; MEROITIC_HIEROGLYPHS
0x109A0, // 109A0..109B7; MEROITIC_CURSIVE
0x109B8, // 109B8..109BB; UNKNOWN
@@ -6625,9 +6733,11 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x10EAE, // 10EAE..10EAF; UNKNOWN
0x10EB0, // 10EB0..10EB1; YEZIDI
0x10EB2, // 10EB2..10EC1; UNKNOWN
- 0x10EC2, // 10EC2..10EC4; ARABIC
- 0x10EC5, // 10EC5..10EFB; UNKNOWN
- 0x10EFC, // 10EFC..10EFF; ARABIC
+ 0x10EC2, // 10EC2..10EC7; ARABIC
+ 0x10EC8, // 10EC8..10ECF; UNKNOWN
+ 0x10ED0, // 10ED0..10ED8; ARABIC
+ 0x10ED9, // 10ED9..10EF9; UNKNOWN
+ 0x10EFA, // 10EFA..10EFF; ARABIC
0x10F00, // 10F00..10F27; OLD_SOGDIAN
0x10F28, // 10F28..10F2F; UNKNOWN
0x10F30, // 10F30..10F59; SOGDIAN
@@ -6797,7 +6907,9 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x11AC0, // 11AC0..11AF8; PAU_CIN_HAU
0x11AF9, // 11AF9..11AFF; UNKNOWN
0x11B00, // 11B00..11B09; DEVANAGARI
- 0x11B0A, // 11B0A..11BBF; UNKNOWN
+ 0x11B0A, // 11B0A..11B5F; UNKNOWN
+ 0x11B60, // 11B60..11B67; SHARADA
+ 0x11B68, // 11B68..11BBF; UNKNOWN
0x11BC0, // 11BC0..11BE1; SUNUWAR
0x11BE2, // 11BE2..11BEF; UNKNOWN
0x11BF0, // 11BF0..11BF9; SUNUWAR
@@ -6841,7 +6953,11 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x11D93, // 11D93..11D98; GUNJALA_GONDI
0x11D99, // 11D99..11D9F; UNKNOWN
0x11DA0, // 11DA0..11DA9; GUNJALA_GONDI
- 0x11DAA, // 11DAA..11EDF; UNKNOWN
+ 0x11DAA, // 11DAA..11DAF; UNKNOWN
+ 0x11DB0, // 11DB0..11DDB; TOLONG_SIKI
+ 0x11DDC, // 11DDC..11DDF; UNKNOWN
+ 0x11DE0, // 11DE0..11DE9; TOLONG_SIKI
+ 0x11DEA, // 11DEA..11EDF; UNKNOWN
0x11EE0, // 11EE0..11EF8; MAKASAR
0x11EF9, // 11EF9..11EFF; UNKNOWN
0x11F00, // 11F00..11F10; KAWI
@@ -6901,7 +7017,11 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x16D40, // 16D40..16D79; KIRAT_RAI
0x16D7A, // 16D7A..16E3F; UNKNOWN
0x16E40, // 16E40..16E9A; MEDEFAIDRIN
- 0x16E9B, // 16E9B..16EFF; UNKNOWN
+ 0x16E9B, // 16E9B..16E9F; UNKNOWN
+ 0x16EA0, // 16EA0..16EB8; BERIA_ERFE
+ 0x16EB9, // 16EB9..16EBA; UNKNOWN
+ 0x16EBB, // 16EBB..16ED3; BERIA_ERFE
+ 0x16ED4, // 16ED4..16EFF; UNKNOWN
0x16F00, // 16F00..16F4A; MIAO
0x16F4B, // 16F4B..16F4E; UNKNOWN
0x16F4F, // 16F4F..16F87; MIAO
@@ -6913,16 +7033,16 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x16FE2, // 16FE2..16FE3; HAN
0x16FE4, // 16FE4 ; KHITAN_SMALL_SCRIPT
0x16FE5, // 16FE5..16FEF; UNKNOWN
- 0x16FF0, // 16FF0..16FF1; HAN
- 0x16FF2, // 16FF2..16FFF; UNKNOWN
- 0x17000, // 17000..187F7; TANGUT
- 0x187F8, // 187F8..187FF; UNKNOWN
- 0x18800, // 18800..18AFF; TANGUT
+ 0x16FF0, // 16FF0..16FF6; HAN
+ 0x16FF7, // 16FF7..16FFF; UNKNOWN
+ 0x17000, // 17000..18AFF; TANGUT
0x18B00, // 18B00..18CD5; KHITAN_SMALL_SCRIPT
0x18CD6, // 18CD6..18CFE; UNKNOWN
0x18CFF, // 18CFF ; KHITAN_SMALL_SCRIPT
- 0x18D00, // 18D00..18D08; TANGUT
- 0x18D09, // 18D09..1AFEF; UNKNOWN
+ 0x18D00, // 18D00..18D1E; TANGUT
+ 0x18D1F, // 18D1F..18D7F; UNKNOWN
+ 0x18D80, // 18D80..18DF2; TANGUT
+ 0x18DF3, // 18DF3..1AFEF; UNKNOWN
0x1AFF0, // 1AFF0..1AFF3; KATAKANA
0x1AFF4, // 1AFF4 ; UNKNOWN
0x1AFF5, // 1AFF5..1AFFB; KATAKANA
@@ -6954,10 +7074,14 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x1BC9C, // 1BC9C..1BC9F; DUPLOYAN
0x1BCA0, // 1BCA0..1BCA3; COMMON
0x1BCA4, // 1BCA4..1CBFF; UNKNOWN
- 0x1CC00, // 1CC00..1CCF9; COMMON
- 0x1CCFA, // 1CCFA..1CCFF; UNKNOWN
+ 0x1CC00, // 1CC00..1CCFC; COMMON
+ 0x1CCFD, // 1CCFD..1CCFF; UNKNOWN
0x1CD00, // 1CD00..1CEB3; COMMON
- 0x1CEB4, // 1CEB4..1CEFF; UNKNOWN
+ 0x1CEB4, // 1CEB4..1CEB9; UNKNOWN
+ 0x1CEBA, // 1CEBA..1CED0; COMMON
+ 0x1CED1, // 1CED1..1CEDF; UNKNOWN
+ 0x1CEE0, // 1CEE0..1CEF0; COMMON
+ 0x1CEF1, // 1CEF1..1CEFF; UNKNOWN
0x1CF00, // 1CF00..1CF2D; INHERITED
0x1CF2E, // 1CF2E..1CF2F; UNKNOWN
0x1CF30, // 1CF30..1CF46; INHERITED
@@ -7072,7 +7196,13 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x1E5D0, // 1E5D0..1E5FA; OL_ONAL
0x1E5FB, // 1E5FB..1E5FE; UNKNOWN
0x1E5FF, // 1E5FF ; OL_ONAL
- 0x1E600, // 1E600..1E7DF; UNKNOWN
+ 0x1E600, // 1E600..1E6BF; UNKNOWN
+ 0x1E6C0, // 1E6C0..1E6DE; TAI_YO
+ 0x1E6DF, // 1E6DF ; UNKNOWN
+ 0x1E6E0, // 1E6E0..1E6F5; TAI_YO
+ 0x1E6F6, // 1E6F6..1E6FD; UNKNOWN
+ 0x1E6FE, // 1E6FE..1E6FF; TAI_YO
+ 0x1E700, // 1E700..1E7DF; UNKNOWN
0x1E7E0, // 1E7E0..1E7E6; ETHIOPIC
0x1E7E7, // 1E7E7 ; UNKNOWN
0x1E7E8, // 1E7E8..1E7EB; ETHIOPIC
@@ -7189,15 +7319,13 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x1F252, // 1F252..1F25F; UNKNOWN
0x1F260, // 1F260..1F265; COMMON
0x1F266, // 1F266..1F2FF; UNKNOWN
- 0x1F300, // 1F300..1F6D7; COMMON
- 0x1F6D8, // 1F6D8..1F6DB; UNKNOWN
+ 0x1F300, // 1F300..1F6D8; COMMON
+ 0x1F6D9, // 1F6D9..1F6DB; UNKNOWN
0x1F6DC, // 1F6DC..1F6EC; COMMON
0x1F6ED, // 1F6ED..1F6EF; UNKNOWN
0x1F6F0, // 1F6F0..1F6FC; COMMON
0x1F6FD, // 1F6FD..1F6FF; UNKNOWN
- 0x1F700, // 1F700..1F776; COMMON
- 0x1F777, // 1F777..1F77A; UNKNOWN
- 0x1F77B, // 1F77B..1F7D9; COMMON
+ 0x1F700, // 1F700..1F7D9; COMMON
0x1F7DA, // 1F7DA..1F7DF; UNKNOWN
0x1F7E0, // 1F7E0..1F7EB; COMMON
0x1F7EC, // 1F7EC..1F7EF; UNKNOWN
@@ -7216,35 +7344,37 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x1F8B0, // 1F8B0..1F8BB; COMMON
0x1F8BC, // 1F8BC..1F8BF; UNKNOWN
0x1F8C0, // 1F8C0..1F8C1; COMMON
- 0x1F8C2, // 1F8C2..1F8FF; UNKNOWN
- 0x1F900, // 1F900..1FA53; COMMON
- 0x1FA54, // 1FA54..1FA5F; UNKNOWN
+ 0x1F8C2, // 1F8C2..1F8CF; UNKNOWN
+ 0x1F8D0, // 1F8D0..1F8D8; COMMON
+ 0x1F8D9, // 1F8D9..1F8FF; UNKNOWN
+ 0x1F900, // 1F900..1FA57; COMMON
+ 0x1FA58, // 1FA58..1FA5F; UNKNOWN
0x1FA60, // 1FA60..1FA6D; COMMON
0x1FA6E, // 1FA6E..1FA6F; UNKNOWN
0x1FA70, // 1FA70..1FA7C; COMMON
0x1FA7D, // 1FA7D..1FA7F; UNKNOWN
- 0x1FA80, // 1FA80..1FA89; COMMON
- 0x1FA8A, // 1FA8A..1FA8E; UNKNOWN
- 0x1FA8F, // 1FA8F..1FAC6; COMMON
- 0x1FAC7, // 1FAC7..1FACD; UNKNOWN
- 0x1FACE, // 1FACE..1FADC; COMMON
+ 0x1FA80, // 1FA80..1FA8A; COMMON
+ 0x1FA8B, // 1FA8B..1FA8D; UNKNOWN
+ 0x1FA8E, // 1FA8E..1FAC6; COMMON
+ 0x1FAC7, // 1FAC7 ; UNKNOWN
+ 0x1FAC8, // 1FAC8 ; COMMON
+ 0x1FAC9, // 1FAC9..1FACC; UNKNOWN
+ 0x1FACD, // 1FACD..1FADC; COMMON
0x1FADD, // 1FADD..1FADE; UNKNOWN
- 0x1FADF, // 1FADF..1FAE9; COMMON
- 0x1FAEA, // 1FAEA..1FAEF; UNKNOWN
- 0x1FAF0, // 1FAF0..1FAF8; COMMON
+ 0x1FADF, // 1FADF..1FAEA; COMMON
+ 0x1FAEB, // 1FAEB..1FAEE; UNKNOWN
+ 0x1FAEF, // 1FAEF..1FAF8; COMMON
0x1FAF9, // 1FAF9..1FAFF; UNKNOWN
0x1FB00, // 1FB00..1FB92; COMMON
0x1FB93, // 1FB93 ; UNKNOWN
- 0x1FB94, // 1FB94..1FBF9; COMMON
- 0x1FBFA, // 1FBFA..1FFFF; UNKNOWN
+ 0x1FB94, // 1FB94..1FBFA; COMMON
+ 0x1FBFB, // 1FBFB..1FFFF; UNKNOWN
0x20000, // 20000..2A6DF; HAN
0x2A6E0, // 2A6E0..2A6FF; UNKNOWN
- 0x2A700, // 2A700..2B739; HAN
- 0x2B73A, // 2B73A..2B73F; UNKNOWN
- 0x2B740, // 2B740..2B81D; HAN
+ 0x2A700, // 2A700..2B81D; HAN
0x2B81E, // 2B81E..2B81F; UNKNOWN
- 0x2B820, // 2B820..2CEA1; HAN
- 0x2CEA2, // 2CEA2..2CEAF; UNKNOWN
+ 0x2B820, // 2B820..2CEAD; HAN
+ 0x2CEAE, // 2CEAE..2CEAF; UNKNOWN
0x2CEB0, // 2CEB0..2EBE0; HAN
0x2EBE1, // 2EBE1..2EBEF; UNKNOWN
0x2EBF0, // 2EBF0..2EE5D; HAN
@@ -7253,8 +7383,8 @@ class Character implements java.io.Serializable, Comparable, Constabl
0x2FA1E, // 2FA1E..2FFFF; UNKNOWN
0x30000, // 30000..3134A; HAN
0x3134B, // 3134B..3134F; UNKNOWN
- 0x31350, // 31350..323AF; HAN
- 0x323B0, // 323B0..E0000; UNKNOWN
+ 0x31350, // 31350..33479; HAN
+ 0x3347A, // 3347A..E0000; UNKNOWN
0xE0001, // E0001 ; COMMON
0xE0002, // E0002..E001F; UNKNOWN
0xE0020, // E0020..E007F; COMMON
@@ -7359,9 +7489,7 @@ class Character implements java.io.Serializable, Comparable, Constabl
UNKNOWN, // 085F
SYRIAC, // 0860..086A
UNKNOWN, // 086B..086F
- ARABIC, // 0870..088E
- UNKNOWN, // 088F
- ARABIC, // 0890..0891
+ ARABIC, // 0870..0891
UNKNOWN, // 0892..0896
ARABIC, // 0897..08E1
COMMON, // 08E2
@@ -7536,8 +7664,8 @@ class Character implements java.io.Serializable, Comparable, Constabl
TELUGU, // 0C55..0C56
UNKNOWN, // 0C57
TELUGU, // 0C58..0C5A
- UNKNOWN, // 0C5B..0C5C
- TELUGU, // 0C5D
+ UNKNOWN, // 0C5B
+ TELUGU, // 0C5C..0C5D
UNKNOWN, // 0C5E..0C5F
TELUGU, // 0C60..0C63
UNKNOWN, // 0C64..0C65
@@ -7561,8 +7689,8 @@ class Character implements java.io.Serializable, Comparable, Constabl
KANNADA, // 0CCA..0CCD
UNKNOWN, // 0CCE..0CD4
KANNADA, // 0CD5..0CD6
- UNKNOWN, // 0CD7..0CDC
- KANNADA, // 0CDD..0CDE
+ UNKNOWN, // 0CD7..0CDB
+ KANNADA, // 0CDC..0CDE
UNKNOWN, // 0CDF
KANNADA, // 0CE0..0CE3
UNKNOWN, // 0CE4..0CE5
@@ -7773,8 +7901,10 @@ class Character implements java.io.Serializable, Comparable, Constabl
UNKNOWN, // 1A9A..1A9F
TAI_THAM, // 1AA0..1AAD
UNKNOWN, // 1AAE..1AAF
- INHERITED, // 1AB0..1ACE
- UNKNOWN, // 1ACF..1AFF
+ INHERITED, // 1AB0..1ADD
+ UNKNOWN, // 1ADE..1ADF
+ INHERITED, // 1AE0..1AEB
+ UNKNOWN, // 1AEC..1AFF
BALINESE, // 1B00..1B4C
UNKNOWN, // 1B4D
BALINESE, // 1B4E..1B7F
@@ -7866,8 +7996,8 @@ class Character implements java.io.Serializable, Comparable, Constabl
UNKNOWN, // 208F
LATIN, // 2090..209C
UNKNOWN, // 209D..209F
- COMMON, // 20A0..20C0
- UNKNOWN, // 20C1..20CF
+ COMMON, // 20A0..20C1
+ UNKNOWN, // 20C2..20CF
INHERITED, // 20D0..20F0
UNKNOWN, // 20F1..20FF
COMMON, // 2100..2125
@@ -7890,9 +8020,7 @@ class Character implements java.io.Serializable, Comparable, Constabl
BRAILLE, // 2800..28FF
COMMON, // 2900..2B73
UNKNOWN, // 2B74..2B75
- COMMON, // 2B76..2B95
- UNKNOWN, // 2B96
- COMMON, // 2B97..2BFF
+ COMMON, // 2B76..2BFF
GLAGOLITIC, // 2C00..2C5F
LATIN, // 2C60..2C7F
COPTIC, // 2C80..2CF3
@@ -7993,15 +8121,9 @@ class Character implements java.io.Serializable, Comparable, Constabl
COMMON, // A700..A721
LATIN, // A722..A787
COMMON, // A788..A78A
- LATIN, // A78B..A7CD
- UNKNOWN, // A7CE..A7CF
- LATIN, // A7D0..A7D1
- UNKNOWN, // A7D2
- LATIN, // A7D3
- UNKNOWN, // A7D4
- LATIN, // A7D5..A7DC
- UNKNOWN, // A7DD..A7F1
- LATIN, // A7F2..A7FF
+ LATIN, // A78B..A7DC
+ UNKNOWN, // A7DD..A7F0
+ LATIN, // A7F1..A7FF
SYLOTI_NAGRI, // A800..A82C
UNKNOWN, // A82D..A82F
COMMON, // A830..A839
@@ -8089,15 +8211,9 @@ class Character implements java.io.Serializable, Comparable, Constabl
HEBREW, // FB43..FB44
UNKNOWN, // FB45
HEBREW, // FB46..FB4F
- ARABIC, // FB50..FBC2
- UNKNOWN, // FBC3..FBD2
- ARABIC, // FBD3..FD3D
+ ARABIC, // FB50..FD3D
COMMON, // FD3E..FD3F
- ARABIC, // FD40..FD8F
- UNKNOWN, // FD90..FD91
- ARABIC, // FD92..FDC7
- UNKNOWN, // FDC8..FDCE
- ARABIC, // FDCF
+ ARABIC, // FD40..FDCF
UNKNOWN, // FDD0..FDEF
ARABIC, // FDF0..FDFF
INHERITED, // FE00..FE0F
@@ -8266,7 +8382,8 @@ class Character implements java.io.Serializable, Comparable, Constabl
LYDIAN, // 10920..10939
UNKNOWN, // 1093A..1093E
LYDIAN, // 1093F
- UNKNOWN, // 10940..1097F
+ SIDETIC, // 10940..10959
+ UNKNOWN, // 1095A..1097F
MEROITIC_HIEROGLYPHS, // 10980..1099F
MEROITIC_CURSIVE, // 109A0..109B7
UNKNOWN, // 109B8..109BB
@@ -8336,9 +8453,11 @@ class Character implements java.io.Serializable, Comparable, Constabl
UNKNOWN, // 10EAE..10EAF
YEZIDI, // 10EB0..10EB1
UNKNOWN, // 10EB2..10EC1
- ARABIC, // 10EC2..10EC4
- UNKNOWN, // 10EC5..10EFB
- ARABIC, // 10EFC..10EFF
+ ARABIC, // 10EC2..10EC7
+ UNKNOWN, // 10EC8..10ECF
+ ARABIC, // 10ED0..10ED8
+ UNKNOWN, // 10ED9..10EF9
+ ARABIC, // 10EFA..10EFF
OLD_SOGDIAN, // 10F00..10F27
UNKNOWN, // 10F28..10F2F
SOGDIAN, // 10F30..10F59
@@ -8508,7 +8627,9 @@ class Character implements java.io.Serializable, Comparable, Constabl
PAU_CIN_HAU, // 11AC0..11AF8
UNKNOWN, // 11AF9..11AFF
DEVANAGARI, // 11B00..11B09
- UNKNOWN, // 11B0A..11BBF
+ UNKNOWN, // 11B0A..11B5F
+ SHARADA, // 11B60..11B67
+ UNKNOWN, // 11B68..11BBF
SUNUWAR, // 11BC0..11BE1
UNKNOWN, // 11BE2..11BEF
SUNUWAR, // 11BF0..11BF9
@@ -8552,7 +8673,11 @@ class Character implements java.io.Serializable, Comparable, Constabl
GUNJALA_GONDI, // 11D93..11D98
UNKNOWN, // 11D99..11D9F
GUNJALA_GONDI, // 11DA0..11DA9
- UNKNOWN, // 11DAA..11EDF
+ UNKNOWN, // 11DAA..11DAF
+ TOLONG_SIKI, // 11DB0..11DDB
+ UNKNOWN, // 11DDC..11DDF
+ TOLONG_SIKI, // 11DE0..11DE9
+ UNKNOWN, // 11DEA..11EDF
MAKASAR, // 11EE0..11EF8
UNKNOWN, // 11EF9..11EFF
KAWI, // 11F00..11F10
@@ -8612,7 +8737,11 @@ class Character implements java.io.Serializable, Comparable, Constabl
KIRAT_RAI, // 16D40..16D79
UNKNOWN, // 16D7A..16E3F
MEDEFAIDRIN, // 16E40..16E9A
- UNKNOWN, // 16E9B..16EFF
+ UNKNOWN, // 16E9B..16E9F
+ BERIA_ERFE, // 16EA0..16EB8
+ UNKNOWN, // 16EB9..16EBA
+ BERIA_ERFE, // 16EBB..16ED3
+ UNKNOWN, // 16ED4..16EFF
MIAO, // 16F00..16F4A
UNKNOWN, // 16F4B..16F4E
MIAO, // 16F4F..16F87
@@ -8624,16 +8753,16 @@ class Character implements java.io.Serializable, Comparable, Constabl
HAN, // 16FE2..16FE3
KHITAN_SMALL_SCRIPT, // 16FE4
UNKNOWN, // 16FE5..16FEF
- HAN, // 16FF0..16FF1
- UNKNOWN, // 16FF2..16FFF
- TANGUT, // 17000..187F7
- UNKNOWN, // 187F8..187FF
- TANGUT, // 18800..18AFF
+ HAN, // 16FF0..16FF6
+ UNKNOWN, // 16FF7..16FFF
+ TANGUT, // 17000..18AFF
KHITAN_SMALL_SCRIPT, // 18B00..18CD5
UNKNOWN, // 18CD6..18CFE
KHITAN_SMALL_SCRIPT, // 18CFF
- TANGUT, // 18D00..18D08
- UNKNOWN, // 18D09..1AFEF
+ TANGUT, // 18D00..18D1E
+ UNKNOWN, // 18D1F..18D7F
+ TANGUT, // 18D80..18DF2
+ UNKNOWN, // 18DF3..1AFEF
KATAKANA, // 1AFF0..1AFF3
UNKNOWN, // 1AFF4
KATAKANA, // 1AFF5..1AFFB
@@ -8665,10 +8794,14 @@ class Character implements java.io.Serializable, Comparable, Constabl
DUPLOYAN, // 1BC9C..1BC9F
COMMON, // 1BCA0..1BCA3
UNKNOWN, // 1BCA4..1CBFF
- COMMON, // 1CC00..1CCF9
- UNKNOWN, // 1CCFA..1CCFF
+ COMMON, // 1CC00..1CCFC
+ UNKNOWN, // 1CCFD..1CCFF
COMMON, // 1CD00..1CEB3
- UNKNOWN, // 1CEB4..1CEFF
+ UNKNOWN, // 1CEB4..1CEB9
+ COMMON, // 1CEBA..1CED0
+ UNKNOWN, // 1CED1..1CEDF
+ COMMON, // 1CEE0..1CEF0
+ UNKNOWN, // 1CEF1..1CEFF
INHERITED, // 1CF00..1CF2D
UNKNOWN, // 1CF2E..1CF2F
INHERITED, // 1CF30..1CF46
@@ -8783,7 +8916,13 @@ class Character implements java.io.Serializable, Comparable, Constabl
OL_ONAL, // 1E5D0..1E5FA
UNKNOWN, // 1E5FB..1E5FE
OL_ONAL, // 1E5FF
- UNKNOWN, // 1E600..1E7DF
+ UNKNOWN, // 1E600..1E6BF
+ TAI_YO, // 1E6C0..1E6DE
+ UNKNOWN, // 1E6DF
+ TAI_YO, // 1E6E0..1E6F5
+ UNKNOWN, // 1E6F6..1E6FD
+ TAI_YO, // 1E6FE..1E6FF
+ UNKNOWN, // 1E700..1E7DF
ETHIOPIC, // 1E7E0..1E7E6
UNKNOWN, // 1E7E7
ETHIOPIC, // 1E7E8..1E7EB
@@ -8900,15 +9039,13 @@ class Character implements java.io.Serializable, Comparable, Constabl
UNKNOWN, // 1F252..1F25F
COMMON, // 1F260..1F265
UNKNOWN, // 1F266..1F2FF
- COMMON, // 1F300..1F6D7
- UNKNOWN, // 1F6D8..1F6DB
+ COMMON, // 1F300..1F6D8
+ UNKNOWN, // 1F6D9..1F6DB
COMMON, // 1F6DC..1F6EC
UNKNOWN, // 1F6ED..1F6EF
COMMON, // 1F6F0..1F6FC
UNKNOWN, // 1F6FD..1F6FF
- COMMON, // 1F700..1F776
- UNKNOWN, // 1F777..1F77A
- COMMON, // 1F77B..1F7D9
+ COMMON, // 1F700..1F7D9
UNKNOWN, // 1F7DA..1F7DF
COMMON, // 1F7E0..1F7EB
UNKNOWN, // 1F7EC..1F7EF
@@ -8927,35 +9064,37 @@ class Character implements java.io.Serializable, Comparable, Constabl
COMMON, // 1F8B0..1F8BB
UNKNOWN, // 1F8BC..1F8BF
COMMON, // 1F8C0..1F8C1
- UNKNOWN, // 1F8C2..1F8FF
- COMMON, // 1F900..1FA53
- UNKNOWN, // 1FA54..1FA5F
+ UNKNOWN, // 1F8C2..1F8CF
+ COMMON, // 1F8D0..1F8D8
+ UNKNOWN, // 1F8D9..1F8FF
+ COMMON, // 1F900..1FA57
+ UNKNOWN, // 1FA58..1FA5F
COMMON, // 1FA60..1FA6D
UNKNOWN, // 1FA6E..1FA6F
COMMON, // 1FA70..1FA7C
UNKNOWN, // 1FA7D..1FA7F
- COMMON, // 1FA80..1FA89
- UNKNOWN, // 1FA8A..1FA8E
- COMMON, // 1FA8F..1FAC6
- UNKNOWN, // 1FAC7..1FACD
- COMMON, // 1FACE..1FADC
+ COMMON, // 1FA80..1FA8A
+ UNKNOWN, // 1FA8B..1FA8D
+ COMMON, // 1FA8E..1FAC6
+ UNKNOWN, // 1FAC7
+ COMMON, // 1FAC8
+ UNKNOWN, // 1FAC9..1FACC
+ COMMON, // 1FACD..1FADC
UNKNOWN, // 1FADD..1FADE
- COMMON, // 1FADF..1FAE9
- UNKNOWN, // 1FAEA..1FAEF
- COMMON, // 1FAF0..1FAF8
+ COMMON, // 1FADF..1FAEA
+ UNKNOWN, // 1FAEB..1FAEE
+ COMMON, // 1FAEF..1FAF8
UNKNOWN, // 1FAF9..1FAFF
COMMON, // 1FB00..1FB92
UNKNOWN, // 1FB93
- COMMON, // 1FB94..1FBF9
- UNKNOWN, // 1FBFA..1FFFF
+ COMMON, // 1FB94..1FBFA
+ UNKNOWN, // 1FBFB..1FFFF
HAN, // 20000..2A6DF
UNKNOWN, // 2A6E0..2A6FF
- HAN, // 2A700..2B739
- UNKNOWN, // 2B73A..2B73F
- HAN, // 2B740..2B81D
+ HAN, // 2A700..2B81D
UNKNOWN, // 2B81E..2B81F
- HAN, // 2B820..2CEA1
- UNKNOWN, // 2CEA2..2CEAF
+ HAN, // 2B820..2CEAD
+ UNKNOWN, // 2CEAE..2CEAF
HAN, // 2CEB0..2EBE0
UNKNOWN, // 2EBE1..2EBEF
HAN, // 2EBF0..2EE5D
@@ -8964,8 +9103,8 @@ class Character implements java.io.Serializable, Comparable, Constabl
UNKNOWN, // 2FA1E..2FFFF
HAN, // 30000..3134A
UNKNOWN, // 3134B..3134F
- HAN, // 31350..323AF
- UNKNOWN, // 323B0..E0000
+ HAN, // 31350..33479
+ UNKNOWN, // 3347A..E0000
COMMON, // E0001
UNKNOWN, // E0002..E001F
COMMON, // E0020..E007F
@@ -8989,6 +9128,7 @@ class Character implements java.io.Serializable, Comparable, Constabl
aliases.put("BASS", BASSA_VAH);
aliases.put("BATK", BATAK);
aliases.put("BENG", BENGALI);
+ aliases.put("BERF", BERIA_ERFE);
aliases.put("BHKS", BHAIKSUKI);
aliases.put("BOPO", BOPOMOFO);
aliases.put("BRAH", BRAHMI);
@@ -9107,6 +9247,7 @@ class Character implements java.io.Serializable, Comparable, Constabl
aliases.put("SHAW", SHAVIAN);
aliases.put("SHRD", SHARADA);
aliases.put("SIDD", SIDDHAM);
+ aliases.put("SIDT", SIDETIC);
aliases.put("SIND", KHUDAWADI);
aliases.put("SINH", SINHALA);
aliases.put("SOGD", SOGDIAN);
@@ -9124,6 +9265,7 @@ class Character implements java.io.Serializable, Comparable, Constabl
aliases.put("TAML", TAMIL);
aliases.put("TANG", TANGUT);
aliases.put("TAVT", TAI_VIET);
+ aliases.put("TAYO", TAI_YO);
aliases.put("TELU", TELUGU);
aliases.put("TFNG", TIFINAGH);
aliases.put("TGLG", TAGALOG);
@@ -9133,6 +9275,7 @@ class Character implements java.io.Serializable, Comparable, Constabl
aliases.put("TIRH", TIRHUTA);
aliases.put("TNSA", TANGSA);
aliases.put("TODR", TODHRI);
+ aliases.put("TOLS", TOLONG_SIKI);
aliases.put("TOTO", TOTO);
aliases.put("TUTG", TULU_TIGALARI);
aliases.put("UGAR", UGARITIC);
diff --git a/src/java.base/share/classes/java/lang/InterruptedException.java b/src/java.base/share/classes/java/lang/InterruptedException.java
index ef13e5f94e3..e8cf3d28bc5 100644
--- a/src/java.base/share/classes/java/lang/InterruptedException.java
+++ b/src/java.base/share/classes/java/lang/InterruptedException.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1995, 2022, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1995, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,24 +26,19 @@
package java.lang;
/**
- * Thrown when a thread is waiting, sleeping, or otherwise occupied,
- * and the thread is interrupted, either before or during the activity.
- * Occasionally a method may wish to test whether the current
- * thread has been interrupted, and if so, to immediately throw
- * this exception. The following code can be used to achieve
- * this effect:
- * {@snippet lang=java :
- * if (Thread.interrupted()) // Clears interrupted status!
- * throw new InterruptedException();
- * }
+ * Thrown when a thread executing a blocking method is {@linkplain Thread#interrupt()
+ * interrupted}. {@link Thread#sleep(long) Thread.sleep}, {@link Object#wait()
+ * Object.wait} and many other blocking methods throw this exception if interrupted.
+ *
+ * Blocking methods that throw {@code InterruptedException} clear the thread's
+ * interrupted status before throwing the exception. Code that catches {@code
+ * InterruptedException} should rethrow the exception, or restore the current thread's
+ * interrupted status, with {@link Thread#currentThread()
+ * Thread.currentThread()}.{@link Thread#interrupt() interrupt()}, before continuing
+ * normally or handling it by throwing another type of exception.
*
* @author Frank Yellin
- * @see java.lang.Object#wait()
- * @see java.lang.Object#wait(long)
- * @see java.lang.Object#wait(long, int)
- * @see java.lang.Thread#sleep(long)
- * @see java.lang.Thread#interrupt()
- * @see java.lang.Thread#interrupted()
+ * @see Thread##thread-interruption Thread Interruption
* @since 1.0
*/
public class InterruptedException extends Exception {
diff --git a/src/java.base/share/classes/java/lang/LazyConstant.java b/src/java.base/share/classes/java/lang/LazyConstant.java
new file mode 100644
index 00000000000..34f3d754a10
--- /dev/null
+++ b/src/java.base/share/classes/java/lang/LazyConstant.java
@@ -0,0 +1,307 @@
+/*
+ * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package java.lang;
+
+import jdk.internal.javac.PreviewFeature;
+import jdk.internal.lang.LazyConstantImpl;
+
+import java.io.Serializable;
+import java.util.*;
+import java.util.function.Function;
+import java.util.function.IntFunction;
+import java.util.function.Supplier;
+
+/**
+ * A lazy constant is a holder of contents that can be set at most once.
+ *
+ * A lazy constant is created using the factory method
+ * {@linkplain LazyConstant#of(Supplier) LazyConstant.of({@code })}.
+ * When created, the lazy constant is not initialized, meaning it has no contents.
+ * The lazy constant (of type {@code T}) can then be initialized
+ * (and its contents retrieved) by calling {@linkplain #get() get()}. The first time
+ * {@linkplain #get() get()} is called, the underlying computing function
+ * (provided at construction) will be invoked and the result will be used to initialize
+ * the constant. Once a lazy constant is initialized, its contents can never change
+ * and will be retrieved over and over again upon subsequent {@linkplain #get() get()}
+ * invocations.
+ *
+ * Consider the following example where a lazy constant field "{@code logger}" holds
+ * an object of type {@code Logger}:
+ *
+ * {@snippet lang = java:
+ * public class Component {
+ *
+ * // Creates a new uninitialized lazy constant
+ * private final LazyConstant logger =
+ * // @link substring="of" target="#of" :
+ * LazyConstant.of( () -> Logger.create(Component.class) );
+ *
+ * public void process() {
+ * logger.get().info("Process started");
+ * // ...
+ * }
+ * }
+ *}
+ *
+ * Initially, the lazy constant is not initialized. When {@code logger.get()}
+ * is first invoked, it evaluates the computing function and initializes the constant to
+ * the result; the result is then returned to the client. Hence, {@linkplain #get() get()}
+ * guarantees that the constant is initialized before it returns, barring
+ * any exceptions.
+ *
+ * Furthermore, {@linkplain #get() get()} guarantees that, out of several threads trying to
+ * invoke the computing function simultaneously, {@linkplain ##thread-safety only one is
+ * ever selected} for computation. This property is crucial as evaluation of the computing
+ * function may have side effects, for example, the call above to {@code Logger.create()}
+ * may result in storage resources being prepared.
+ *
+ *
Exception handling
+ * If the computing function returns {@code null}, a {@linkplain NullPointerException}
+ * is thrown. Hence, a lazy constant can never hold a {@code null} value. Clients who
+ * want to use a nullable constant can wrap the value into an {@linkplain Optional} holder.
+ *
+ * If the computing function recursively invokes itself (directly or indirectly via
+ * the lazy constant), an {@linkplain IllegalStateException} is thrown, and the lazy
+ * constant is not initialized.
+ *
+ *
Composing lazy constants
+ * A lazy constant can depend on other lazy constants, forming a dependency graph
+ * that can be lazily computed but where access to individual elements can still be
+ * performant. In the following example, a single {@code Foo} and a {@code Bar}
+ * instance (that is dependent on the {@code Foo} instance) are lazily created, both of
+ * which are held by lazy constants:
+ *
+ * {@snippet lang = java:
+ * public final class DependencyUtil {
+ *
+ * private DependencyUtil() {}
+ *
+ * public static class Foo {
+ * // ...
+ * }
+ *
+ * public static class Bar {
+ * public Bar(Foo foo) {
+ * // ...
+ * }
+ * }
+ *
+ * private static final LazyConstant FOO = LazyConstant.of( Foo::new );
+ * private static final LazyConstant BAR = LazyConstant.of( () -> new Bar(FOO.get()) );
+ *
+ * public static Foo foo() {
+ * return FOO.get();
+ * }
+ *
+ * public static Bar bar() {
+ * return BAR.get();
+ * }
+ *
+ * }
+ *}
+ * Calling {@code BAR.get()} will create the {@code Bar} singleton if it is not already
+ * created. Upon such a creation, a dependent {@code Foo} will first be created if
+ * the {@code Foo} does not already exist.
+ *
+ * Thread Safety
+ * A lazy constant is guaranteed to be initialized atomically and at most once. If
+ * competing threads are racing to initialize a lazy constant, only one updating thread
+ * runs the computing function (which runs on the caller's thread and is hereafter denoted
+ * the computing thread), while the other threads are blocked until the constant
+ * is initialized, after which the other threads observe the lazy constant is initialized
+ * and leave the constant unchanged and will never invoke any computation.
+ *
+ * The invocation of the computing function and the resulting initialization of
+ * the constant {@linkplain java.util.concurrent##MemoryVisibility happens-before}
+ * the initialized constant's content is read. Hence, the initialized constant's content,
+ * including any {@code final} fields of any newly created objects, is safely published.
+ *
+ * Thread interruption does not cancel the initialization of a lazy constant. In other
+ * words, if the computing thread is interrupted, {@code LazyConstant::get} doesn't clear
+ * the interrupted thread’s status, nor does it throw an {@linkplain InterruptedException}.
+ *
+ * If the computing function blocks indefinitely, other threads operating on this
+ * lazy constant may block indefinitely; no timeouts or cancellations are provided.
+ *
+ *
+ * The contents of a lazy constant can never change after the lazy constant has been
+ * initialized. Therefore, a JVM implementation may, for an initialized lazy constant,
+ * elide all future reads of that lazy constant's contents and instead use the contents
+ * that has been previously observed. We call this optimization constant folding.
+ * This is only possible if there is a direct reference from a {@code static final} field
+ * to a lazy constant or if there is a chain from a {@code static final} field -- via one
+ * or more trusted fields (i.e., {@code static final} fields,
+ * {@linkplain Record record} fields, or final instance fields in hidden classes) --
+ * to a lazy constant.
+ *
+ * Miscellaneous
+ * Except for {@linkplain Object#equals(Object) equals(obj)} and
+ * {@linkplain #orElse(Object) orElse(other)} parameters, all method parameters
+ * must be non-null, or a {@link NullPointerException} will be thrown.
+ *
+ * @apiNote Once a lazy constant is initialized, its contents cannot ever be removed.
+ * This can be a source of an unintended memory leak. More specifically,
+ * a lazy constant {@linkplain java.lang.ref##reachability strongly references}
+ * it contents. Hence, the contents of a lazy constant will be reachable as long
+ * as the lazy constant itself is reachable.
+ *
+ * While it's possible to store an array inside a lazy constant, doing so will
+ * not result in improved access performance of the array elements. Instead, a
+ * {@linkplain List#ofLazy(int, IntFunction) lazy list} of arbitrary depth can
+ * be used, which provides constant components.
+ *
+ * The {@code LazyConstant} type is not {@link Serializable}.
+ *
+ * Use in static initializers may interact with class initialization order;
+ * cyclic initialization may result in initialization errors as described
+ * in section {@jls 12.4} of The Java Language Specification.
+ *
+ * @implNote
+ * A lazy constant is free to synchronize on itself. Hence, care must be
+ * taken when directly or indirectly synchronizing on a lazy constant.
+ * A lazy constant is unmodifiable but its contents may or may not be
+ * immutable (e.g., it may hold an {@linkplain ArrayList}).
+ *
+ * @param type of the constant
+ *
+ * @since 26
+ *
+ * @see Optional
+ * @see Supplier
+ * @see List#ofLazy(int, IntFunction)
+ * @see Map#ofLazy(Set, Function)
+ * @jls 12.4 Initialization of Classes and Interfaces
+ * @jls 17.4.5 Happens-before Order
+ */
+@PreviewFeature(feature = PreviewFeature.Feature.LAZY_CONSTANTS)
+public sealed interface LazyConstant
+ extends Supplier
+ permits LazyConstantImpl {
+
+ /**
+ * {@return the contents of this lazy constant if initialized, otherwise,
+ * returns {@code other}}
+ *
+ * This method never triggers initialization of this lazy constant and will observe
+ * initialization by other threads atomically (i.e., it returns the contents
+ * if and only if the initialization has already completed).
+ *
+ * @param other value to return if the content is not initialized
+ * (can be {@code null})
+ */
+ T orElse(T other);
+
+ /**
+ * {@return the contents of this initialized constant. If not initialized, first
+ * computes and initializes this constant using the computing function}
+ *
+ * After this method returns successfully, the constant is guaranteed to be
+ * initialized.
+ *
+ * If the computing function throws, the throwable is relayed to the caller and
+ * the lazy constant remains uninitialized; a subsequent call to get() may then
+ * attempt the computation again.
+ */
+ T get();
+
+ /**
+ * {@return {@code true} if the constant is initialized, {@code false} otherwise}
+ *
+ * This method never triggers initialization of this lazy constant and will observe
+ * changes in the initialization state made by other threads atomically.
+ */
+ boolean isInitialized();
+
+ // Object methods
+
+ /**
+ * {@return if this lazy constant is the same as the provided {@code obj}}
+ *
+ * In other words, equals compares the identity of this lazy constant and {@code obj}
+ * to determine equality. Hence, two lazy constants with the same contents are
+ * not equal.
+ *
+ * This method never triggers initialization of this lazy constant.
+ */
+ @Override
+ boolean equals(Object obj);
+
+ /**
+ * {@return the {@linkplain System#identityHashCode(Object) identity hash code} for
+ * this lazy constant}
+ *
+ * This method never triggers initialization of this lazy constant.
+ */
+ @Override
+ int hashCode();
+
+ /**
+ * {@return a string suitable for debugging}
+ *
+ * This method never triggers initialization of this lazy constant and will observe
+ * initialization by other threads atomically (i.e., it observes the
+ * contents if and only if the initialization has already completed).
+ *
+ * If this lazy constant is initialized, an implementation-dependent string
+ * containing the {@linkplain Object#toString()} of the
+ * contents will be returned; otherwise, an implementation-dependent string is
+ * returned that indicates this lazy constant is not yet initialized.
+ */
+ @Override
+ String toString();
+
+ // Factory
+
+ /**
+ * {@return a lazy constant whose contents is to be computed later via the provided
+ * {@code computingFunction}}
+ *
+ * The returned lazy constant strongly references the provided
+ * {@code computingFunction} at least until initialization completes successfully.
+ *
+ * If the provided computing function is already an instance of
+ * {@code LazyConstant}, the method is free to return the provided computing function
+ * directly.
+ *
+ * @implNote after initialization completes successfully, the computing function is
+ * no longer strongly referenced and becomes eligible for
+ * garbage collection.
+ *
+ * @param computingFunction in the form of a {@linkplain Supplier} to be used
+ * to initialize the constant
+ * @param type of the constant
+ *
+ */
+ @SuppressWarnings("unchecked")
+ static LazyConstant of(Supplier extends T> computingFunction) {
+ Objects.requireNonNull(computingFunction);
+ if (computingFunction instanceof LazyConstant extends T> lc) {
+ return (LazyConstant) lc;
+ }
+ return LazyConstantImpl.ofLazy(computingFunction);
+ }
+
+}
diff --git a/src/java.base/share/classes/java/lang/Module.java b/src/java.base/share/classes/java/lang/Module.java
index 065e1ac4620..cd2b8095ee4 100644
--- a/src/java.base/share/classes/java/lang/Module.java
+++ b/src/java.base/share/classes/java/lang/Module.java
@@ -37,6 +37,7 @@ import java.lang.module.ModuleDescriptor.Version;
import java.lang.module.ResolvedModule;
import java.lang.reflect.AccessFlag;
import java.lang.reflect.AnnotatedElement;
+import java.lang.reflect.Field;
import java.net.URI;
import java.net.URL;
import java.security.CodeSource;
@@ -115,6 +116,10 @@ public final class Module implements AnnotatedElement {
@Stable
private boolean enableNativeAccess;
+ // true if this module is allowed to mutate final instance fields
+ @Stable
+ private boolean enableFinalMutation;
+
/**
* Creates a new named Module. The resulting Module will be defined to the
* VM but will not read any other modules, will not have any exports setup
@@ -262,7 +267,6 @@ public final class Module implements AnnotatedElement {
* in the outer Module class as that would create a circular initializer dependency.
*/
private static final class EnableNativeAccess {
-
private EnableNativeAccess() {}
private static final Unsafe UNSAFE = Unsafe.getUnsafe();
@@ -331,12 +335,52 @@ public final class Module implements AnnotatedElement {
}
/**
- * Update all unnamed modules to allow access to restricted methods.
+ * Enable code in all unnamed modules to access restricted methods.
*/
- static void implAddEnableNativeAccessToAllUnnamed() {
+ static void addEnableNativeAccessToAllUnnamed() {
EnableNativeAccess.trySetEnableNativeAccess(ALL_UNNAMED_MODULE);
}
+ /**
+ * This class exists to avoid using Unsafe during early initialization of Module.
+ */
+ private static final class EnableFinalMutation {
+ private static final Unsafe UNSAFE = Unsafe.getUnsafe();
+ private static final long ENABLE_FINAL_MUTATION_OFFSET =
+ UNSAFE.objectFieldOffset(Module.class, "enableFinalMutation");
+
+ private static boolean isEnableFinalMutation(Module module) {
+ return UNSAFE.getBooleanVolatile(module, ENABLE_FINAL_MUTATION_OFFSET);
+ }
+
+ private static boolean tryEnableFinalMutation(Module module) {
+ return UNSAFE.compareAndSetBoolean(module, ENABLE_FINAL_MUTATION_OFFSET, false, true);
+ }
+ }
+
+ /**
+ * Enable code in all unnamed modules to mutate final instance fields.
+ */
+ static void addEnableFinalMutationToAllUnnamed() {
+ EnableFinalMutation.tryEnableFinalMutation(ALL_UNNAMED_MODULE);
+ }
+
+ /**
+ * Enable code in this named module to mutate final instance fields.
+ */
+ boolean tryEnableFinalMutation() {
+ Module m = isNamed() ? this : ALL_UNNAMED_MODULE;
+ return EnableFinalMutation.tryEnableFinalMutation(m);
+ }
+
+ /**
+ * Return true if code in this module is allowed to mutate final instance fields.
+ */
+ boolean isFinalMutationEnabled() {
+ Module m = isNamed() ? this : ALL_UNNAMED_MODULE;
+ return EnableFinalMutation.isEnableFinalMutation(m);
+ }
+
// --
// special Module to mean "all unnamed modules"
@@ -718,8 +762,50 @@ public final class Module implements AnnotatedElement {
}
/**
- * Returns {@code true} if this module exports or opens a package to
- * the given module via its module declaration or CLI options.
+ * Returns {@code true} if this module statically exports a package to the given module.
+ * If the package is exported to the given module via {@code addExports} then this method
+ * returns {@code false}.
+ */
+ boolean isStaticallyExported(String pn, Module other) {
+ return isStaticallyExportedOrOpened(pn, other, false);
+ }
+
+ /**
+ * Returns {@code true} if this module statically opens a package to the given module.
+ * If the package is opened to the given module via {@code addOpens} then this method
+ * returns {@code false}.
+ */
+ boolean isStaticallyOpened(String pn, Module other) {
+ return isStaticallyExportedOrOpened(pn, other, true);
+ }
+
+ /**
+ * Returns {@code true} if this module exports or opens a package to the
+ * given module via its module declaration or CLI options.
+ */
+ private boolean isStaticallyExportedOrOpened(String pn, Module other, boolean open) {
+ // all packages in unnamed modules are exported and open
+ if (!isNamed())
+ return true;
+
+ // all packages are exported/open to self
+ if (other == this && descriptor.packages().contains(pn))
+ return true;
+
+ // all packages in open and automatic modules are exported/open
+ if (descriptor.isOpen() || descriptor.isAutomatic())
+ return descriptor.packages().contains(pn);
+
+ // exported/opened via module descriptor
+ if (isExplicitlyExportedOrOpened(pn, other, open))
+ return true;
+
+ return false;
+ }
+
+ /**
+ * Returns {@code true} if this module exports or opens a package to the
+ * given module via its module declaration or CLI options.
*/
private boolean isExplicitlyExportedOrOpened(String pn, Module other, boolean open) {
// test if package is open to everyone or
@@ -818,11 +904,16 @@ public final class Module implements AnnotatedElement {
return isReflectivelyExportedOrOpened(pn, other, true);
}
-
/**
* If the caller's module is this module then update this module to export
* the given package to the given module.
*
+ * Exporting a package with this method does not allow the given module to
+ * {@linkplain Field#set(Object, Object) reflectively set} or {@linkplain
+ * java.lang.invoke.MethodHandles.Lookup#unreflectSetter(Field) obtain a method
+ * handle with write access} to a public final field declared in a public class
+ * in the package.
+ *
*
This method has no effect if the package is already exported (or
* open) to the given module.
*
@@ -860,21 +951,27 @@ public final class Module implements AnnotatedElement {
if (caller != this) {
throw new IllegalCallerException(caller + " != " + this);
}
- implAddExportsOrOpens(pn, other, /*open*/false, /*syncVM*/true);
+ implAddExports(pn, other);
}
return this;
}
/**
- * If this module has opened a package to at least the caller
- * module then update this module to open the package to the given module.
- * Opening a package with this method allows all types in the package,
+ * If this module has opened the given package to at least the caller
+ * module, then update this module to also open the package to the given module.
+ *
+ * Opening a package with this method allows all types in the package,
* and all their members, not just public types and their public members,
- * to be reflected on by the given module when using APIs that support
- * private access or a way to bypass or suppress default Java language
+ * to be reflected on by the given module when using APIs that either support
+ * private access or provide a way to bypass or suppress Java language
* access control checks.
*
+ *
Opening a package with this method does not allow the given module to
+ * {@linkplain Field#set(Object, Object) reflectively set} or {@linkplain
+ * java.lang.invoke.MethodHandles.Lookup#unreflectSetter(Field) obtain a method
+ * handle with write access} to a final field declared in a class in the package.
+ *
*
This method has no effect if the package is already open
* to the given module.
*
@@ -913,7 +1010,7 @@ public final class Module implements AnnotatedElement {
Module caller = getCallerModule(Reflection.getCallerClass());
if (caller != this && (caller == null || !isOpen(pn, caller)))
throw new IllegalCallerException(pn + " is not open to " + caller);
- implAddExportsOrOpens(pn, other, /*open*/true, /*syncVM*/true);
+ implAddOpens(pn, other);
}
return this;
@@ -923,28 +1020,29 @@ public final class Module implements AnnotatedElement {
/**
* Updates this module to export a package unconditionally.
*
- * @apiNote This method is for JDK tests only.
+ * @apiNote Used by Proxy and other dynamic modules.
*/
void implAddExports(String pn) {
- implAddExportsOrOpens(pn, Module.EVERYONE_MODULE, false, true);
+ implAddExportsOrOpens(pn, Module.EVERYONE_MODULE, false, true, true);
}
/**
* Updates this module to export a package to another module.
*
- * @apiNote Used by Instrumentation::redefineModule and --add-exports
+ * @apiNote Used by addExports, Instrumentation::redefineModule, and --add-exports
*/
void implAddExports(String pn, Module other) {
- implAddExportsOrOpens(pn, other, false, true);
+ implAddExportsOrOpens(pn, other, false, VM.isBooted(), true);
}
/**
* Updates this module to export a package to all unnamed modules.
*
- * @apiNote Used by the --add-exports command line option.
+ * @apiNote Used by the --add-exports command line option and the launcher when
+ * an executable JAR file has the "Add-Exports" attribute in its main manifest.
*/
void implAddExportsToAllUnnamed(String pn) {
- implAddExportsOrOpens(pn, Module.ALL_UNNAMED_MODULE, false, true);
+ implAddExportsOrOpens(pn, Module.ALL_UNNAMED_MODULE, false, false, true);
}
/**
@@ -954,7 +1052,7 @@ public final class Module implements AnnotatedElement {
* @apiNote This method is for VM white-box testing.
*/
void implAddExportsNoSync(String pn) {
- implAddExportsOrOpens(pn.replace('/', '.'), Module.EVERYONE_MODULE, false, false);
+ implAddExportsOrOpens(pn.replace('/', '.'), Module.EVERYONE_MODULE, false, true, false);
}
/**
@@ -964,7 +1062,7 @@ public final class Module implements AnnotatedElement {
* @apiNote This method is for VM white-box testing.
*/
void implAddExportsNoSync(String pn, Module other) {
- implAddExportsOrOpens(pn.replace('/', '.'), other, false, false);
+ implAddExportsOrOpens(pn.replace('/', '.'), other, false, true, false);
}
/**
@@ -973,35 +1071,40 @@ public final class Module implements AnnotatedElement {
* @apiNote This method is for JDK tests only.
*/
void implAddOpens(String pn) {
- implAddExportsOrOpens(pn, Module.EVERYONE_MODULE, true, true);
+ implAddExportsOrOpens(pn, Module.EVERYONE_MODULE, true, true, true);
}
/**
* Updates this module to open a package to another module.
*
- * @apiNote Used by Instrumentation::redefineModule and --add-opens
+ * @apiNote Used by addOpens, Instrumentation::redefineModule, and --add-opens
*/
void implAddOpens(String pn, Module other) {
- implAddExportsOrOpens(pn, other, true, true);
+ implAddExportsOrOpens(pn, other, true, VM.isBooted(), true);
}
/**
* Updates this module to open a package to all unnamed modules.
*
- * @apiNote Used by the --add-opens command line option.
+ * @apiNote Used by the --add-opens command line option and the launcher when
+ * an executable JAR file has the "Add-Opens" attribute in its main manifest.
*/
void implAddOpensToAllUnnamed(String pn) {
- implAddExportsOrOpens(pn, Module.ALL_UNNAMED_MODULE, true, true);
+ implAddExportsOrOpens(pn, Module.ALL_UNNAMED_MODULE, true, false, true);
}
/**
* Updates a module to export or open a module to another module.
- *
- * If {@code syncVM} is {@code true} then the VM is notified.
+ * @param pn package name
+ * @param other the module to export/open the package to
+ * @param open true to open, false to export
+ * @param reflectively true if exported/opened reflectively
+ * @param syncVM true to update the VM
*/
private void implAddExportsOrOpens(String pn,
Module other,
boolean open,
+ boolean reflectively,
boolean syncVM) {
Objects.requireNonNull(other);
Objects.requireNonNull(pn);
@@ -1031,7 +1134,7 @@ public final class Module implements AnnotatedElement {
}
}
- if (VM.isBooted()) {
+ if (reflectively) {
// add package name to ReflectionData.exports if absent
Map map = ReflectionData.exports
.computeIfAbsent(this, other,
diff --git a/src/java.base/share/classes/java/lang/ModuleLayer.java b/src/java.base/share/classes/java/lang/ModuleLayer.java
index 5dfd93796d2..9d922f787a6 100644
--- a/src/java.base/share/classes/java/lang/ModuleLayer.java
+++ b/src/java.base/share/classes/java/lang/ModuleLayer.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@ package java.lang;
import java.lang.module.Configuration;
import java.lang.module.ModuleDescriptor;
import java.lang.module.ResolvedModule;
+import java.lang.reflect.Field;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Collections;
@@ -252,6 +253,12 @@ public final class ModuleLayer {
* module {@code target}. This method is a no-op if {@code source}
* already exports the package to at least {@code target}.
*
+ * Exporting a package with this method does not allow the target module to
+ * {@linkplain Field#set(Object, Object) reflectively set} or {@linkplain
+ * java.lang.invoke.MethodHandles.Lookup#unreflectSetter(Field) obtain a method
+ * handle with write access} to a public final field declared in a public class
+ * in the package.
+ *
* @param source
* The source module
* @param pn
@@ -278,6 +285,11 @@ public final class ModuleLayer {
* module {@code target}. This method is a no-op if {@code source}
* already opens the package to at least {@code target}.
*
+ *
Opening a package with this method does not allow the target module
+ * to {@linkplain Field#set(Object, Object) reflectively set} or {@linkplain
+ * java.lang.invoke.MethodHandles.Lookup#unreflectSetter(Field) obtain a method
+ * handle with write access} to a final field declared in a class in the package.
+ *
* @param source
* The source module
* @param pn
diff --git a/src/java.base/share/classes/java/lang/StableValue.java b/src/java.base/share/classes/java/lang/StableValue.java
deleted file mode 100644
index 1815cb1a5b1..00000000000
--- a/src/java.base/share/classes/java/lang/StableValue.java
+++ /dev/null
@@ -1,756 +0,0 @@
-/*
- * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package java.lang;
-
-import jdk.internal.access.SharedSecrets;
-import jdk.internal.javac.PreviewFeature;
-import jdk.internal.lang.stable.StableEnumFunction;
-import jdk.internal.lang.stable.StableFunction;
-import jdk.internal.lang.stable.StableIntFunction;
-import jdk.internal.lang.stable.StableSupplier;
-import jdk.internal.lang.stable.StableUtil;
-import jdk.internal.lang.stable.StableValueImpl;
-
-import java.io.Serializable;
-import java.util.Collection;
-import java.util.EnumSet;
-import java.util.List;
-import java.util.Map;
-import java.util.NoSuchElementException;
-import java.util.Objects;
-import java.util.RandomAccess;
-import java.util.Set;
-import java.util.function.Function;
-import java.util.function.IntFunction;
-import java.util.function.Supplier;
-
-/**
- * A stable value is a holder of contents that can be set at most once.
- *
- * A {@code StableValue} is typically created using the factory method
- * {@linkplain StableValue#of() {@code StableValue.of()}}. When created this way,
- * the stable value is unset, which means it holds no contents.
- * Its contents, of type {@code T}, can be set by calling
- * {@linkplain #trySet(Object) trySet()}, {@linkplain #setOrThrow(Object) setOrThrow()},
- * or {@linkplain #orElseSet(Supplier) orElseSet()}. Once set, the contents
- * can never change and can be retrieved by calling {@linkplain #orElseThrow() orElseThrow()}
- * , {@linkplain #orElse(Object) orElse()}, or {@linkplain #orElseSet(Supplier) orElseSet()}.
- *
- * Consider the following example where a stable value field "{@code logger}" is a
- * shallowly immutable holder of contents of type {@code Logger} and that is initially
- * created as unset, which means it holds no contents. Later in the example, the
- * state of the "{@code logger}" field is checked and if it is still unset,
- * the contents is set:
- *
- * {@snippet lang = java:
- * public class Component {
- *
- * // Creates a new unset stable value with no contents
- * // @link substring="of" target="#of" :
- * private final StableValue logger = StableValue.of();
- *
- * private Logger getLogger() {
- * if (!logger.isSet()) {
- * logger.trySet(Logger.create(Component.class));
- * }
- * return logger.orElseThrow();
- * }
- *
- * public void process() {
- * getLogger().info("Process started");
- * // ...
- * }
- * }
- *}
- *
- * If {@code getLogger()} is called from several threads, several instances of
- * {@code Logger} might be created. However, the contents can only be set at most once
- * meaning the first writer wins.
- *
- * In order to guarantee that, even under races, only one instance of {@code Logger} is
- * ever created, the {@linkplain #orElseSet(Supplier) orElseSet()} method can be used
- * instead, where the contents are lazily computed, and atomically set, via a
- * {@linkplain Supplier supplier}. In the example below, the supplier is provided in the
- * form of a lambda expression:
- *
- * {@snippet lang = java:
- * public class Component {
- *
- * // Creates a new unset stable value with no contents
- * // @link substring="of" target="#of" :
- * private final StableValue logger = StableValue.of();
- *
- * private Logger getLogger() {
- * return logger.orElseSet( () -> Logger.create(Component.class) );
- * }
- *
- * public void process() {
- * getLogger().info("Process started");
- * // ...
- * }
- * }
- *}
- *
- * The {@code getLogger()} method calls {@code logger.orElseSet()} on the stable value to
- * retrieve its contents. If the stable value is unset, then {@code orElseSet()}
- * evaluates the given supplier, and sets the contents to the result; the result is then
- * returned to the client. In other words, {@code orElseSet()} guarantees that a
- * stable value's contents is set before it returns.
- *
- * Furthermore, {@code orElseSet()} guarantees that out of one or more suppliers provided,
- * only at most one is ever evaluated, and that one is only ever evaluated once,
- * even when {@code logger.orElseSet()} is invoked concurrently. This property is crucial
- * as evaluation of the supplier may have side effects, for example, the call above to
- * {@code Logger.create()} may result in storage resources being prepared.
- *
- *
Stable Functions
- * Stable values provide the foundation for higher-level functional abstractions. A
- * stable supplier is a supplier that computes a value and then caches it into
- * a backing stable value storage for subsequent use. A stable supplier is created via the
- * {@linkplain StableValue#supplier(Supplier) StableValue.supplier()} factory, by
- * providing an underlying {@linkplain Supplier} which is invoked when the stable supplier
- * is first accessed:
- *
- * {@snippet lang = java:
- * public class Component {
- *
- * private final Supplier logger =
- * // @link substring="supplier" target="#supplier(Supplier)" :
- * StableValue.supplier( () -> Logger.getLogger(Component.class) );
- *
- * public void process() {
- * logger.get().info("Process started");
- * // ...
- * }
- * }
- *}
- * A stable supplier encapsulates access to its backing stable value storage. This means
- * that code inside {@code Component} can obtain the logger object directly from the
- * stable supplier, without having to go through an accessor method like {@code getLogger()}.
- *
- * A stable int function is a function that takes an {@code int} parameter and
- * uses it to compute a result that is then cached by the backing stable value storage
- * for that parameter value. A stable {@link IntFunction} is created via the
- * {@linkplain StableValue#intFunction(int, IntFunction) StableValue.intFunction()}
- * factory. Upon creation, the input range (i.e. {@code [0, size)}) is specified together
- * with an underlying {@linkplain IntFunction} which is invoked at most once per input
- * value. In effect, the stable int function will act like a cache for the underlying
- * {@linkplain IntFunction}:
- *
- * {@snippet lang = java:
- * final class PowerOf2Util {
- *
- * private PowerOf2Util() {}
- *
- * private static final int SIZE = 6;
- * private static final IntFunction UNDERLYING_POWER_OF_TWO =
- * v -> 1 << v;
- *
- * private static final IntFunction POWER_OF_TWO =
- * // @link substring="intFunction" target="#intFunction(int,IntFunction)" :
- * StableValue.intFunction(SIZE, UNDERLYING_POWER_OF_TWO);
- *
- * public static int powerOfTwo(int a) {
- * return POWER_OF_TWO.apply(a);
- * }
- * }
- *
- * int result = PowerOf2Util.powerOfTwo(4); // May eventually constant fold to 16 at runtime
- *
- *}
- * The {@code PowerOf2Util.powerOfTwo()} function is a
|---|