mirror of
https://github.com/openjdk/jdk.git
synced 2026-02-21 07:45:11 +00:00
Merge branch 'master' into 8345954
This commit is contained in:
commit
4e3c930673
2
.github/workflows/build-alpine-linux.yml
vendored
2
.github/workflows/build-alpine-linux.yml
vendored
@ -97,7 +97,7 @@ jobs:
|
||||
--with-zlib=system
|
||||
--with-jmod-compress=zip-1
|
||||
--with-external-symbols-in-bundles=none
|
||||
--with-debug-info-level=1
|
||||
--with-native-debug-symbols-level=1
|
||||
${{ inputs.extra-conf-options }} ${{ inputs.configure-arguments }} || (
|
||||
echo "Dumping config.log:" &&
|
||||
cat config.log &&
|
||||
|
||||
2
.github/workflows/build-cross-compile.yml
vendored
2
.github/workflows/build-cross-compile.yml
vendored
@ -180,7 +180,7 @@ jobs:
|
||||
--with-sysroot=sysroot
|
||||
--with-jmod-compress=zip-1
|
||||
--with-external-symbols-in-bundles=none
|
||||
--with-debug-info-level=1
|
||||
--with-native-debug-symbols-level=1
|
||||
CC=${{ matrix.gnu-arch }}-linux-gnu${{ matrix.gnu-abi}}-gcc-${{ inputs.gcc-major-version }}
|
||||
CXX=${{ matrix.gnu-arch }}-linux-gnu${{ matrix.gnu-abi}}-g++-${{ inputs.gcc-major-version }}
|
||||
${{ inputs.extra-conf-options }} ${{ inputs.configure-arguments }} || (
|
||||
|
||||
2
.github/workflows/build-linux.yml
vendored
2
.github/workflows/build-linux.yml
vendored
@ -144,7 +144,7 @@ jobs:
|
||||
--with-zlib=system
|
||||
--with-jmod-compress=zip-1
|
||||
--with-external-symbols-in-bundles=none
|
||||
--with-debug-info-level=1
|
||||
--with-native-debug-symbols-level=1
|
||||
${{ inputs.extra-conf-options }} ${{ inputs.configure-arguments }} || (
|
||||
echo "Dumping config.log:" &&
|
||||
cat config.log &&
|
||||
|
||||
2
.github/workflows/build-macos.yml
vendored
2
.github/workflows/build-macos.yml
vendored
@ -111,7 +111,7 @@ jobs:
|
||||
--with-zlib=system
|
||||
--with-jmod-compress=zip-1
|
||||
--with-external-symbols-in-bundles=none
|
||||
--with-debug-info-level=1
|
||||
--with-native-debug-symbols-level=1
|
||||
${{ inputs.extra-conf-options }} ${{ inputs.configure-arguments }} || (
|
||||
echo "Dumping config.log:" &&
|
||||
cat config.log &&
|
||||
|
||||
@ -69,20 +69,20 @@ AC_DEFUN([FLAGS_SETUP_DEBUG_SYMBOLS],
|
||||
# Debug prefix mapping if supported by compiler
|
||||
DEBUG_PREFIX_CFLAGS=
|
||||
|
||||
UTIL_ARG_WITH(NAME: debug-info-level, TYPE: string,
|
||||
UTIL_ARG_WITH(NAME: native-debug-symbols-level, TYPE: string,
|
||||
DEFAULT: "",
|
||||
RESULT: DEBUG_INFO_LEVEL,
|
||||
DESC: [Sets the debug info level, when debug info generation is enabled (GCC and Clang only)],
|
||||
DEFAULT_DESC: [default])
|
||||
AC_SUBST(DEBUG_INFO_LEVEL)
|
||||
RESULT: DEBUG_SYMBOLS_LEVEL,
|
||||
DESC: [set the native debug symbol level (GCC and Clang only)],
|
||||
DEFAULT_DESC: [toolchain default])
|
||||
AC_SUBST(DEBUG_SYMBOLS_LEVEL)
|
||||
|
||||
if test "x${TOOLCHAIN_TYPE}" = xgcc || \
|
||||
test "x${TOOLCHAIN_TYPE}" = xclang; then
|
||||
DEBUG_INFO_LEVEL_FLAGS="-g"
|
||||
if test "x${DEBUG_INFO_LEVEL}" != "x"; then
|
||||
DEBUG_INFO_LEVEL_FLAGS="-g${DEBUG_INFO_LEVEL}"
|
||||
FLAGS_COMPILER_CHECK_ARGUMENTS(ARGUMENT: [${DEBUG_INFO_LEVEL_FLAGS}],
|
||||
IF_FALSE: AC_MSG_ERROR("Debug info level ${DEBUG_INFO_LEVEL} is not supported"))
|
||||
DEBUG_SYMBOLS_LEVEL_FLAGS="-g"
|
||||
if test "x${DEBUG_SYMBOLS_LEVEL}" != "x"; then
|
||||
DEBUG_SYMBOLS_LEVEL_FLAGS="-g${DEBUG_SYMBOLS_LEVEL}"
|
||||
FLAGS_COMPILER_CHECK_ARGUMENTS(ARGUMENT: [${DEBUG_SYMBOLS_LEVEL_FLAGS}],
|
||||
IF_FALSE: AC_MSG_ERROR("Debug info level ${DEBUG_SYMBOLS_LEVEL} is not supported"))
|
||||
fi
|
||||
fi
|
||||
|
||||
@ -111,8 +111,8 @@ AC_DEFUN([FLAGS_SETUP_DEBUG_SYMBOLS],
|
||||
fi
|
||||
|
||||
# Debug info level should follow the debug format to be effective.
|
||||
CFLAGS_DEBUG_SYMBOLS="-gdwarf-4 ${DEBUG_INFO_LEVEL_FLAGS}"
|
||||
ASFLAGS_DEBUG_SYMBOLS="${DEBUG_INFO_LEVEL_FLAGS}"
|
||||
CFLAGS_DEBUG_SYMBOLS="-gdwarf-4 ${DEBUG_SYMBOLS_LEVEL_FLAGS}"
|
||||
ASFLAGS_DEBUG_SYMBOLS="${DEBUG_SYMBOLS_LEVEL_FLAGS}"
|
||||
elif test "x$TOOLCHAIN_TYPE" = xclang; then
|
||||
if test "x$ALLOW_ABSOLUTE_PATHS_IN_OUTPUT" = "xfalse"; then
|
||||
# Check if compiler supports -fdebug-prefix-map. If so, use that to make
|
||||
@ -132,8 +132,8 @@ AC_DEFUN([FLAGS_SETUP_DEBUG_SYMBOLS],
|
||||
IF_FALSE: [GDWARF_FLAGS=""])
|
||||
|
||||
# Debug info level should follow the debug format to be effective.
|
||||
CFLAGS_DEBUG_SYMBOLS="${GDWARF_FLAGS} ${DEBUG_INFO_LEVEL_FLAGS}"
|
||||
ASFLAGS_DEBUG_SYMBOLS="${DEBUG_INFO_LEVEL_FLAGS}"
|
||||
CFLAGS_DEBUG_SYMBOLS="${GDWARF_FLAGS} ${DEBUG_SYMBOLS_LEVEL_FLAGS}"
|
||||
ASFLAGS_DEBUG_SYMBOLS="${DEBUG_SYMBOLS_LEVEL_FLAGS}"
|
||||
elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
|
||||
CFLAGS_DEBUG_SYMBOLS="-Z7"
|
||||
fi
|
||||
|
||||
@ -286,7 +286,7 @@ public class ClassGenerator {
|
||||
diagnosticFlags.isEmpty() ?
|
||||
StubKind.DIAGNOSTIC_FLAGS_EMPTY.format() :
|
||||
StubKind.DIAGNOSTIC_FLAGS_NON_EMPTY.format(diagnosticFlags),
|
||||
StubKind.LINT_CATEGORY.format("\"" + lintCategory + "\""),
|
||||
StubKind.LINT_CATEGORY.format(toLintFieldName(lintCategory)),
|
||||
"\"" + keyParts[0] + "\"",
|
||||
"\"" + Stream.of(keyParts).skip(2).collect(Collectors.joining(".")) + "\"",
|
||||
javadoc);
|
||||
@ -314,7 +314,7 @@ public class ClassGenerator {
|
||||
diagnosticFlags.isEmpty() ?
|
||||
StubKind.DIAGNOSTIC_FLAGS_EMPTY.format() :
|
||||
StubKind.DIAGNOSTIC_FLAGS_NON_EMPTY.format(diagnosticFlags),
|
||||
StubKind.LINT_CATEGORY.format("\"" + lintCategory + "\""),
|
||||
StubKind.LINT_CATEGORY.format(toLintFieldName(lintCategory)),
|
||||
"\"" + keyParts[0] + "\"",
|
||||
"\"" + Stream.of(keyParts).skip(2).collect(Collectors.joining(".")) + "\"",
|
||||
argNames.stream().collect(Collectors.joining(", ")));
|
||||
@ -329,6 +329,11 @@ public class ClassGenerator {
|
||||
}
|
||||
}
|
||||
|
||||
String toLintFieldName(String lintCategory) {
|
||||
return lintCategory.toUpperCase()
|
||||
.replaceAll("-", "_");
|
||||
}
|
||||
|
||||
/**
|
||||
* Form the name of a factory method/field given a resource key.
|
||||
*/
|
||||
|
||||
@ -87,7 +87,7 @@ suppress.warnings=\
|
||||
@SuppressWarnings("rawtypes")\n
|
||||
|
||||
lint.category=\
|
||||
LintCategory.get({0}).get()
|
||||
LintCategory.{0}
|
||||
|
||||
diagnostic.flags.empty=\
|
||||
EnumSet.noneOf(DiagnosticFlag.class)
|
||||
|
||||
@ -167,11 +167,6 @@ void VM_Version::common_initialize() {
|
||||
(unaligned_scalar.value() == MISALIGNED_SCALAR_FAST));
|
||||
}
|
||||
|
||||
if (FLAG_IS_DEFAULT(AlignVector)) {
|
||||
FLAG_SET_DEFAULT(AlignVector,
|
||||
unaligned_vector.value() != MISALIGNED_VECTOR_FAST);
|
||||
}
|
||||
|
||||
#ifdef __riscv_ztso
|
||||
// Hotspot is compiled with TSO support, it will only run on hardware which
|
||||
// supports Ztso
|
||||
@ -242,6 +237,11 @@ void VM_Version::c2_initialize() {
|
||||
}
|
||||
}
|
||||
|
||||
if (FLAG_IS_DEFAULT(AlignVector)) {
|
||||
FLAG_SET_DEFAULT(AlignVector,
|
||||
unaligned_vector.value() != MISALIGNED_VECTOR_FAST);
|
||||
}
|
||||
|
||||
// NOTE: Make sure codes dependent on UseRVV are put after MaxVectorSize initialize,
|
||||
// as there are extra checks inside it which could disable UseRVV
|
||||
// in some situations.
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -6086,7 +6086,7 @@ void MacroAssembler::generate_fill(BasicType t, bool aligned,
|
||||
vpbroadcastd(xtmp, xtmp, Assembler::AVX_512bit);
|
||||
|
||||
subptr(count, 16 << shift);
|
||||
jccb(Assembler::less, L_check_fill_32_bytes);
|
||||
jcc(Assembler::less, L_check_fill_32_bytes);
|
||||
align(16);
|
||||
|
||||
BIND(L_fill_64_bytes_loop_avx3);
|
||||
|
||||
@ -66,9 +66,6 @@
|
||||
#endif
|
||||
|
||||
// open(2) flags
|
||||
#ifndef O_CLOEXEC
|
||||
#define O_CLOEXEC 02000000
|
||||
#endif
|
||||
#ifndef O_TMPFILE
|
||||
#define O_TMPFILE (020000000 | O_DIRECTORY)
|
||||
#endif
|
||||
|
||||
@ -4878,31 +4878,8 @@ int os::open(const char *path, int oflag, int mode) {
|
||||
// All file descriptors that are opened in the Java process and not
|
||||
// specifically destined for a subprocess should have the close-on-exec
|
||||
// flag set. If we don't set it, then careless 3rd party native code
|
||||
// might fork and exec without closing all appropriate file descriptors,
|
||||
// and this in turn might:
|
||||
//
|
||||
// - cause end-of-file to fail to be detected on some file
|
||||
// descriptors, resulting in mysterious hangs, or
|
||||
//
|
||||
// - might cause an fopen in the subprocess to fail on a system
|
||||
// suffering from bug 1085341.
|
||||
//
|
||||
// (Yes, the default setting of the close-on-exec flag is a Unix
|
||||
// design flaw)
|
||||
//
|
||||
// See:
|
||||
// 1085341: 32-bit stdio routines should support file descriptors >255
|
||||
// 4843136: (process) pipe file descriptor from Runtime.exec not being closed
|
||||
// 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
|
||||
//
|
||||
// Modern Linux kernels (after 2.6.23 2007) support O_CLOEXEC with open().
|
||||
// O_CLOEXEC is preferable to using FD_CLOEXEC on an open file descriptor
|
||||
// because it saves a system call and removes a small window where the flag
|
||||
// is unset. On ancient Linux kernels the O_CLOEXEC flag will be ignored
|
||||
// and we fall back to using FD_CLOEXEC (see below).
|
||||
#ifdef O_CLOEXEC
|
||||
// might fork and exec without closing all appropriate file descriptors.
|
||||
oflag |= O_CLOEXEC;
|
||||
#endif
|
||||
|
||||
int fd = ::open(path, oflag, mode);
|
||||
if (fd == -1) return -1;
|
||||
@ -4925,21 +4902,6 @@ int os::open(const char *path, int oflag, int mode) {
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef FD_CLOEXEC
|
||||
// Validate that the use of the O_CLOEXEC flag on open above worked.
|
||||
// With recent kernels, we will perform this check exactly once.
|
||||
static sig_atomic_t O_CLOEXEC_is_known_to_work = 0;
|
||||
if (!O_CLOEXEC_is_known_to_work) {
|
||||
int flags = ::fcntl(fd, F_GETFD);
|
||||
if (flags != -1) {
|
||||
if ((flags & FD_CLOEXEC) != 0)
|
||||
O_CLOEXEC_is_known_to_work = 1;
|
||||
else
|
||||
::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return fd;
|
||||
}
|
||||
|
||||
|
||||
@ -458,12 +458,10 @@ char* os::map_memory_to_file(char* base, size_t size, int fd) {
|
||||
warning("Failed mmap to file. (%s)", os::strerror(errno));
|
||||
return nullptr;
|
||||
}
|
||||
if (base != nullptr && addr != base) {
|
||||
if (!os::release_memory(addr, size)) {
|
||||
warning("Could not release memory on unsuccessful file mapping");
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// The requested address should be the same as the returned address when using MAP_FIXED
|
||||
// as per POSIX.
|
||||
assert(base == nullptr || addr == base, "base should equal addr when using MAP_FIXED");
|
||||
return addr;
|
||||
}
|
||||
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2021 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2026 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -946,7 +946,7 @@ static int create_sharedmem_file(const char* dirname, const char* filename, size
|
||||
if (result == -1 ) break;
|
||||
if (!os::write(fd, &zero_int, 1)) {
|
||||
if (errno == ENOSPC) {
|
||||
warning("Insufficient space for shared memory file:\n %s\nTry using the -Djava.io.tmpdir= option to select an alternate temp location.\n", filename);
|
||||
warning("Insufficient space for shared memory file: %s/%s\n", dirname, filename);
|
||||
}
|
||||
result = OS_ERR;
|
||||
break;
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -951,6 +951,32 @@ struct enum_sigcode_desc_t {
|
||||
const char* s_desc;
|
||||
};
|
||||
|
||||
#if defined(LINUX)
|
||||
// Additional kernel si_code definitions that are only exported by
|
||||
// more recent glibc distributions, so we have to hard-code the values.
|
||||
#ifndef BUS_MCEERR_AR // glibc 2.17
|
||||
#define BUS_MCEERR_AR 4
|
||||
#define BUS_MCEERR_AO 5
|
||||
#endif
|
||||
|
||||
#ifndef SEGV_PKUERR // glibc 2.27
|
||||
#define SEGV_PKUERR 4
|
||||
#endif
|
||||
|
||||
#ifndef SYS_SECCOMP // glibc 2.28
|
||||
#define SYS_SECCOMP 1
|
||||
#endif
|
||||
|
||||
#ifndef TRAP_BRANCH // glibc 2.30
|
||||
#define TRAP_BRANCH 3
|
||||
#endif
|
||||
|
||||
#ifndef TRAP_HWBKPT // not glibc version specific - gdb related
|
||||
#define TRAP_HWBKPT 4
|
||||
#endif
|
||||
|
||||
#endif // LINUX
|
||||
|
||||
static bool get_signal_code_description(const siginfo_t* si, enum_sigcode_desc_t* out) {
|
||||
|
||||
const struct {
|
||||
@ -976,6 +1002,7 @@ static bool get_signal_code_description(const siginfo_t* si, enum_sigcode_desc_t
|
||||
{ SIGSEGV, SEGV_ACCERR, "SEGV_ACCERR", "Invalid permissions for mapped object." },
|
||||
#if defined(LINUX)
|
||||
{ SIGSEGV, SEGV_BNDERR, "SEGV_BNDERR", "Failed address bound checks." },
|
||||
{ SIGSEGV, SEGV_PKUERR, "SEGV_PKUERR", "Protection key checking failure." },
|
||||
#endif
|
||||
#if defined(AIX)
|
||||
// no explanation found what keyerr would be
|
||||
@ -984,8 +1011,18 @@ static bool get_signal_code_description(const siginfo_t* si, enum_sigcode_desc_t
|
||||
{ SIGBUS, BUS_ADRALN, "BUS_ADRALN", "Invalid address alignment." },
|
||||
{ SIGBUS, BUS_ADRERR, "BUS_ADRERR", "Nonexistent physical address." },
|
||||
{ SIGBUS, BUS_OBJERR, "BUS_OBJERR", "Object-specific hardware error." },
|
||||
#if defined(LINUX)
|
||||
{ SIGBUS, BUS_MCEERR_AR,"BUS_MCEERR_AR","Hardware memory error consumed on a machine check: action required." },
|
||||
{ SIGBUS, BUS_MCEERR_AO,"BUS_MCEERR_AO","Hardware memory error detected in process but not consumed: action optional." },
|
||||
|
||||
{ SIGSYS, SYS_SECCOMP, "SYS_SECCOMP", "Secure computing (seccomp) filter failure." },
|
||||
#endif
|
||||
{ SIGTRAP, TRAP_BRKPT, "TRAP_BRKPT", "Process breakpoint." },
|
||||
{ SIGTRAP, TRAP_TRACE, "TRAP_TRACE", "Process trace trap." },
|
||||
#if defined(LINUX)
|
||||
{ SIGTRAP, TRAP_BRANCH, "TRAP_BRANCH", "Process taken branch trap." },
|
||||
{ SIGTRAP, TRAP_HWBKPT, "TRAP_HWBKPT", "Hardware breakpoint/watchpoint." },
|
||||
#endif
|
||||
{ SIGCHLD, CLD_EXITED, "CLD_EXITED", "Child has exited." },
|
||||
{ SIGCHLD, CLD_KILLED, "CLD_KILLED", "Child has terminated abnormally and did not create a core file." },
|
||||
{ SIGCHLD, CLD_DUMPED, "CLD_DUMPED", "Child has terminated abnormally and created a core file." },
|
||||
@ -993,6 +1030,7 @@ static bool get_signal_code_description(const siginfo_t* si, enum_sigcode_desc_t
|
||||
{ SIGCHLD, CLD_STOPPED, "CLD_STOPPED", "Child has stopped." },
|
||||
{ SIGCHLD, CLD_CONTINUED,"CLD_CONTINUED","Stopped child has continued." },
|
||||
#ifdef SIGPOLL
|
||||
{ SIGPOLL, POLL_IN, "POLL_IN", "Data input available." },
|
||||
{ SIGPOLL, POLL_OUT, "POLL_OUT", "Output buffers available." },
|
||||
{ SIGPOLL, POLL_MSG, "POLL_MSG", "Input message available." },
|
||||
{ SIGPOLL, POLL_ERR, "POLL_ERR", "I/O error." },
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2013 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -26,8 +26,7 @@
|
||||
#ifndef OS_CPU_AIX_PPC_PREFETCH_AIX_PPC_INLINE_HPP
|
||||
#define OS_CPU_AIX_PPC_PREFETCH_AIX_PPC_INLINE_HPP
|
||||
|
||||
#include "runtime/prefetch.hpp"
|
||||
|
||||
// Included in runtime/prefetch.inline.hpp
|
||||
|
||||
inline void Prefetch::read(const void *loc, intx interval) {
|
||||
#if !defined(USE_XLC_BUILTINS)
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@ -27,8 +27,7 @@
|
||||
#ifndef OS_CPU_BSD_AARCH64_PREFETCH_BSD_AARCH64_INLINE_HPP
|
||||
#define OS_CPU_BSD_AARCH64_PREFETCH_BSD_AARCH64_INLINE_HPP
|
||||
|
||||
#include "runtime/prefetch.hpp"
|
||||
|
||||
// Included in runtime/prefetch.inline.hpp
|
||||
|
||||
inline void Prefetch::read (const void *loc, intx interval) {
|
||||
if (interval >= 0)
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,8 +25,7 @@
|
||||
#ifndef OS_CPU_BSD_X86_PREFETCH_BSD_X86_INLINE_HPP
|
||||
#define OS_CPU_BSD_X86_PREFETCH_BSD_X86_INLINE_HPP
|
||||
|
||||
#include "runtime/prefetch.hpp"
|
||||
|
||||
// Included in runtime/prefetch.inline.hpp
|
||||
|
||||
inline void Prefetch::read (const void *loc, intx interval) {
|
||||
__asm__ ("prefetcht0 (%0,%1,1)" : : "r" (loc), "r" (interval));
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2007, 2008 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -26,7 +26,7 @@
|
||||
#ifndef OS_CPU_BSD_ZERO_PREFETCH_BSD_ZERO_INLINE_HPP
|
||||
#define OS_CPU_BSD_ZERO_PREFETCH_BSD_ZERO_INLINE_HPP
|
||||
|
||||
#include "runtime/prefetch.hpp"
|
||||
// Included in runtime/prefetch.inline.hpp
|
||||
|
||||
inline void Prefetch::read(const void* loc, intx interval) {
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -26,8 +26,7 @@
|
||||
#ifndef OS_CPU_LINUX_AARCH64_PREFETCH_LINUX_AARCH64_INLINE_HPP
|
||||
#define OS_CPU_LINUX_AARCH64_PREFETCH_LINUX_AARCH64_INLINE_HPP
|
||||
|
||||
#include "runtime/prefetch.hpp"
|
||||
|
||||
// Included in runtime/prefetch.inline.hpp
|
||||
|
||||
inline void Prefetch::read (const void *loc, intx interval) {
|
||||
if (interval >= 0)
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,7 +25,7 @@
|
||||
#ifndef OS_CPU_LINUX_ARM_PREFETCH_LINUX_ARM_INLINE_HPP
|
||||
#define OS_CPU_LINUX_ARM_PREFETCH_LINUX_ARM_INLINE_HPP
|
||||
|
||||
#include "runtime/prefetch.hpp"
|
||||
// Included in runtime/prefetch.inline.hpp
|
||||
|
||||
inline void Prefetch::read (const void *loc, intx interval) {
|
||||
#if defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_5TE__)
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2013 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -26,8 +26,7 @@
|
||||
#ifndef OS_CPU_LINUX_PPC_PREFETCH_LINUX_PPC_INLINE_HPP
|
||||
#define OS_CPU_LINUX_PPC_PREFETCH_LINUX_PPC_INLINE_HPP
|
||||
|
||||
#include "runtime/prefetch.hpp"
|
||||
|
||||
// Included in runtime/prefetch.inline.hpp
|
||||
|
||||
inline void Prefetch::read(const void *loc, intx interval) {
|
||||
__asm__ __volatile__ (
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -26,7 +26,7 @@
|
||||
#ifndef OS_CPU_LINUX_RISCV_VM_PREFETCH_LINUX_RISCV_INLINE_HPP
|
||||
#define OS_CPU_LINUX_RISCV_VM_PREFETCH_LINUX_RISCV_INLINE_HPP
|
||||
|
||||
#include "runtime/prefetch.hpp"
|
||||
// Included in runtime/prefetch.inline.hpp
|
||||
|
||||
inline void Prefetch::read (const void *loc, intx interval) {
|
||||
if (interval >= 0 && UseZicbop) {
|
||||
|
||||
@ -30,7 +30,7 @@
|
||||
#include "code/nativeInst.hpp"
|
||||
#include "code/vtableStubs.hpp"
|
||||
#include "compiler/disassembler.hpp"
|
||||
#include "cppstdlib/cstdlib.h"
|
||||
#include "cppstdlib/cstdlib.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "jvm.h"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -26,7 +26,7 @@
|
||||
#ifndef OS_CPU_LINUX_S390_PREFETCH_LINUX_S390_INLINE_HPP
|
||||
#define OS_CPU_LINUX_S390_PREFETCH_LINUX_S390_INLINE_HPP
|
||||
|
||||
#include "runtime/prefetch.hpp"
|
||||
// Included in runtime/prefetch.inline.hpp
|
||||
|
||||
inline void Prefetch::read(const void* loc, intx interval) {
|
||||
// No prefetch instructions on z/Architecture -> implement trivially.
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,8 +25,7 @@
|
||||
#ifndef OS_CPU_LINUX_X86_PREFETCH_LINUX_X86_INLINE_HPP
|
||||
#define OS_CPU_LINUX_X86_PREFETCH_LINUX_X86_INLINE_HPP
|
||||
|
||||
#include "runtime/prefetch.hpp"
|
||||
|
||||
// Included in runtime/prefetch.inline.hpp
|
||||
|
||||
inline void Prefetch::read (const void *loc, intx interval) {
|
||||
__asm__ ("prefetcht0 (%0,%1,1)" : : "r" (loc), "r" (interval));
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2007, 2008 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -26,7 +26,7 @@
|
||||
#ifndef OS_CPU_LINUX_ZERO_PREFETCH_LINUX_ZERO_INLINE_HPP
|
||||
#define OS_CPU_LINUX_ZERO_PREFETCH_LINUX_ZERO_INLINE_HPP
|
||||
|
||||
#include "runtime/prefetch.hpp"
|
||||
// Included in runtime/prefetch.inline.hpp
|
||||
|
||||
inline void Prefetch::read(const void* loc, intx interval) {
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020, Microsoft Corporation. All rights reserved.
|
||||
* Copyright (c) 2020, 2026, Microsoft Corporation. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,8 +25,7 @@
|
||||
#ifndef OS_CPU_WINDOWS_AARCH64_PREFETCH_WINDOWS_AARCH64_INLINE_HPP
|
||||
#define OS_CPU_WINDOWS_AARCH64_PREFETCH_WINDOWS_AARCH64_INLINE_HPP
|
||||
|
||||
#include "runtime/prefetch.hpp"
|
||||
|
||||
// Included in runtime/prefetch.inline.hpp
|
||||
|
||||
inline void Prefetch::read (const void *loc, intx interval) {
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,7 +25,7 @@
|
||||
#ifndef OS_CPU_WINDOWS_X86_PREFETCH_WINDOWS_X86_INLINE_HPP
|
||||
#define OS_CPU_WINDOWS_X86_PREFETCH_WINDOWS_X86_INLINE_HPP
|
||||
|
||||
#include "runtime/prefetch.hpp"
|
||||
// Included in runtime/prefetch.inline.hpp
|
||||
|
||||
inline void Prefetch::read (const void *loc, intx interval) {}
|
||||
inline void Prefetch::write(void *loc, intx interval) {}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2022, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -116,6 +116,10 @@ bool AOTConstantPoolResolver::is_class_resolution_deterministic(InstanceKlass* c
|
||||
return false;
|
||||
}
|
||||
} else if (resolved_class->is_objArray_klass()) {
|
||||
if (CDSConfig::is_dumping_dynamic_archive()) {
|
||||
// This is difficult to handle. See JDK-8374639
|
||||
return false;
|
||||
}
|
||||
Klass* elem = ObjArrayKlass::cast(resolved_class)->bottom_klass();
|
||||
if (elem->is_instance_klass()) {
|
||||
return is_class_resolution_deterministic(cp_holder, InstanceKlass::cast(elem));
|
||||
|
||||
@ -696,7 +696,7 @@ template <typename T> void AOTMappedHeapWriter::relocate_field_in_buffer(T* fiel
|
||||
// We use zero-based, 0-shift encoding, so the narrowOop is just the lower
|
||||
// 32 bits of request_referent
|
||||
intptr_t addr = cast_from_oop<intptr_t>(request_referent);
|
||||
*((narrowOop*)field_addr_in_buffer) = checked_cast<narrowOop>(addr);
|
||||
*((narrowOop*)field_addr_in_buffer) = CompressedOops::narrow_oop_cast(addr);
|
||||
} else {
|
||||
store_requested_oop_in_buffer<T>(field_addr_in_buffer, request_referent);
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -2159,7 +2159,6 @@ void AOTMetaspace::initialize_shared_spaces() {
|
||||
intptr_t* buffer = (intptr_t*)dynamic_mapinfo->serialized_data();
|
||||
ReadClosure rc(&buffer, (intptr_t)SharedBaseAddress);
|
||||
DynamicArchive::serialize(&rc);
|
||||
DynamicArchive::setup_array_klasses();
|
||||
}
|
||||
|
||||
LogStreamHandle(Info, aot) lsh;
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -984,8 +984,6 @@ void ArchiveBuilder::make_klasses_shareable() {
|
||||
|
||||
#undef STATS_FORMAT
|
||||
#undef STATS_PARAMS
|
||||
|
||||
DynamicArchive::make_array_klasses_shareable();
|
||||
}
|
||||
|
||||
void ArchiveBuilder::make_training_data_shareable() {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -48,6 +48,7 @@
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/metaspaceClosure.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/array.hpp"
|
||||
#include "oops/klass.inline.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
@ -95,7 +96,6 @@ public:
|
||||
void sort_methods(InstanceKlass* ik) const;
|
||||
void remark_pointers_for_instance_klass(InstanceKlass* k, bool should_mark) const;
|
||||
void write_archive(char* serialized_data, AOTClassLocationConfig* cl_config);
|
||||
void gather_array_klasses();
|
||||
|
||||
public:
|
||||
// Do this before and after the archive dump to see if any corruption
|
||||
@ -132,7 +132,6 @@ public:
|
||||
|
||||
init_header();
|
||||
gather_source_objs();
|
||||
gather_array_klasses();
|
||||
reserve_buffer();
|
||||
|
||||
log_info(cds, dynamic)("Copying %d klasses and %d symbols",
|
||||
@ -159,7 +158,6 @@ public:
|
||||
ArchiveBuilder::OtherROAllocMark mark;
|
||||
SystemDictionaryShared::write_to_archive(false);
|
||||
cl_config = AOTClassLocationConfig::dumptime()->write_to_archive();
|
||||
DynamicArchive::dump_array_klasses();
|
||||
|
||||
serialized_data = ro_region()->top();
|
||||
WriteClosure wc(ro_region());
|
||||
@ -175,8 +173,6 @@ public:
|
||||
|
||||
write_archive(serialized_data, cl_config);
|
||||
release_header();
|
||||
DynamicArchive::post_dump();
|
||||
|
||||
post_dump();
|
||||
|
||||
verify_universe("After CDS dynamic dump");
|
||||
@ -185,30 +181,6 @@ public:
|
||||
virtual void iterate_roots(MetaspaceClosure* it) {
|
||||
AOTArtifactFinder::all_cached_classes_do(it);
|
||||
SystemDictionaryShared::dumptime_classes_do(it);
|
||||
iterate_primitive_array_klasses(it);
|
||||
}
|
||||
|
||||
void iterate_primitive_array_klasses(MetaspaceClosure* it) {
|
||||
for (int i = T_BOOLEAN; i <= T_LONG; i++) {
|
||||
assert(is_java_primitive((BasicType)i), "sanity");
|
||||
Klass* k = Universe::typeArrayKlass((BasicType)i); // this give you "[I", etc
|
||||
assert(AOTMetaspace::in_aot_cache_static_region((void*)k),
|
||||
"one-dimensional primitive array should be in static archive");
|
||||
ArrayKlass* ak = ArrayKlass::cast(k);
|
||||
while (ak != nullptr && ak->in_aot_cache()) {
|
||||
Klass* next_k = ak->array_klass_or_null();
|
||||
if (next_k != nullptr) {
|
||||
ak = ArrayKlass::cast(next_k);
|
||||
} else {
|
||||
ak = nullptr;
|
||||
}
|
||||
}
|
||||
if (ak != nullptr) {
|
||||
assert(ak->dimension() > 1, "sanity");
|
||||
// this is the lowest dimension that's not in the static archive
|
||||
it->push(&ak);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@ -367,26 +339,6 @@ void DynamicArchiveBuilder::write_archive(char* serialized_data, AOTClassLocatio
|
||||
log_info(cds, dynamic)("%d klasses; %d symbols", klasses()->length(), symbols()->length());
|
||||
}
|
||||
|
||||
void DynamicArchiveBuilder::gather_array_klasses() {
|
||||
for (int i = 0; i < klasses()->length(); i++) {
|
||||
if (klasses()->at(i)->is_objArray_klass()) {
|
||||
ObjArrayKlass* oak = ObjArrayKlass::cast(klasses()->at(i));
|
||||
Klass* elem = oak->element_klass();
|
||||
if (AOTMetaspace::in_aot_cache_static_region(elem)) {
|
||||
// Only capture the array klass whose element_klass is in the static archive.
|
||||
// During run time, setup (see DynamicArchive::setup_array_klasses()) is needed
|
||||
// so that the element_klass can find its array klasses from the dynamic archive.
|
||||
DynamicArchive::append_array_klass(oak);
|
||||
} else {
|
||||
// The element_klass and its array klasses are in the same archive.
|
||||
assert(!AOTMetaspace::in_aot_cache_static_region(oak),
|
||||
"we should not gather klasses that are already in the static archive");
|
||||
}
|
||||
}
|
||||
}
|
||||
log_debug(aot)("Total array klasses gathered for dynamic archive: %d", DynamicArchive::num_array_klasses());
|
||||
}
|
||||
|
||||
class VM_PopulateDynamicDumpSharedSpace: public VM_Heap_Sync_Operation {
|
||||
DynamicArchiveBuilder _builder;
|
||||
public:
|
||||
@ -403,76 +355,9 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
// _array_klasses and _dynamic_archive_array_klasses only hold the array klasses
|
||||
// which have element klass in the static archive.
|
||||
GrowableArray<ObjArrayKlass*>* DynamicArchive::_array_klasses = nullptr;
|
||||
Array<ObjArrayKlass*>* DynamicArchive::_dynamic_archive_array_klasses = nullptr;
|
||||
|
||||
void DynamicArchive::serialize(SerializeClosure* soc) {
|
||||
SymbolTable::serialize_shared_table_header(soc, false);
|
||||
SystemDictionaryShared::serialize_dictionary_headers(soc, false);
|
||||
soc->do_ptr(&_dynamic_archive_array_klasses);
|
||||
}
|
||||
|
||||
void DynamicArchive::append_array_klass(ObjArrayKlass* ak) {
|
||||
if (_array_klasses == nullptr) {
|
||||
_array_klasses = new (mtClassShared) GrowableArray<ObjArrayKlass*>(50, mtClassShared);
|
||||
}
|
||||
_array_klasses->append(ak);
|
||||
}
|
||||
|
||||
void DynamicArchive::dump_array_klasses() {
|
||||
assert(CDSConfig::is_dumping_dynamic_archive(), "sanity");
|
||||
if (_array_klasses != nullptr) {
|
||||
ArchiveBuilder* builder = ArchiveBuilder::current();
|
||||
int num_array_klasses = _array_klasses->length();
|
||||
_dynamic_archive_array_klasses =
|
||||
ArchiveBuilder::new_ro_array<ObjArrayKlass*>(num_array_klasses);
|
||||
for (int i = 0; i < num_array_klasses; i++) {
|
||||
builder->write_pointer_in_buffer(_dynamic_archive_array_klasses->adr_at(i), _array_klasses->at(i));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void DynamicArchive::setup_array_klasses() {
|
||||
if (_dynamic_archive_array_klasses != nullptr) {
|
||||
for (int i = 0; i < _dynamic_archive_array_klasses->length(); i++) {
|
||||
ObjArrayKlass* oak = _dynamic_archive_array_klasses->at(i);
|
||||
Klass* elm = oak->element_klass();
|
||||
assert(AOTMetaspace::in_aot_cache_static_region((void*)elm), "must be");
|
||||
|
||||
if (elm->is_instance_klass()) {
|
||||
assert(InstanceKlass::cast(elm)->array_klasses() == nullptr, "must be");
|
||||
InstanceKlass::cast(elm)->set_array_klasses(oak);
|
||||
} else {
|
||||
assert(elm->is_array_klass(), "sanity");
|
||||
assert(ArrayKlass::cast(elm)->higher_dimension() == nullptr, "must be");
|
||||
ArrayKlass::cast(elm)->set_higher_dimension(oak);
|
||||
}
|
||||
}
|
||||
log_debug(aot)("Total array klasses read from dynamic archive: %d", _dynamic_archive_array_klasses->length());
|
||||
}
|
||||
}
|
||||
|
||||
void DynamicArchive::make_array_klasses_shareable() {
|
||||
if (_array_klasses != nullptr) {
|
||||
int num_array_klasses = _array_klasses->length();
|
||||
for (int i = 0; i < num_array_klasses; i++) {
|
||||
ObjArrayKlass* k = ArchiveBuilder::current()->get_buffered_addr(_array_klasses->at(i));
|
||||
k->remove_unshareable_info();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void DynamicArchive::post_dump() {
|
||||
if (_array_klasses != nullptr) {
|
||||
delete _array_klasses;
|
||||
_array_klasses = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
int DynamicArchive::num_array_klasses() {
|
||||
return _array_klasses != nullptr ? _array_klasses->length() : 0;
|
||||
}
|
||||
|
||||
void DynamicArchive::dump_impl(bool jcmd_request, const char* archive_name, TRAPS) {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -26,13 +26,8 @@
|
||||
#define SHARE_CDS_DYNAMICARCHIVE_HPP
|
||||
|
||||
#include "cds/filemap.hpp"
|
||||
#include "classfile/compactHashtable.hpp"
|
||||
#include "memory/allStatic.hpp"
|
||||
#include "memory/memRegion.hpp"
|
||||
#include "oops/array.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
#include "utilities/exceptions.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
#if INCLUDE_CDS
|
||||
@ -59,22 +54,13 @@ public:
|
||||
};
|
||||
|
||||
class DynamicArchive : AllStatic {
|
||||
private:
|
||||
static GrowableArray<ObjArrayKlass*>* _array_klasses;
|
||||
static Array<ObjArrayKlass*>* _dynamic_archive_array_klasses;
|
||||
public:
|
||||
static void dump_for_jcmd(const char* archive_name, TRAPS);
|
||||
static void dump_at_exit(JavaThread* current);
|
||||
static void dump_impl(bool jcmd_request, const char* archive_name, TRAPS);
|
||||
static bool is_mapped() { return FileMapInfo::dynamic_info() != nullptr; }
|
||||
static bool validate(FileMapInfo* dynamic_info);
|
||||
static void dump_array_klasses();
|
||||
static void setup_array_klasses();
|
||||
static void append_array_klass(ObjArrayKlass* oak);
|
||||
static void serialize(SerializeClosure* soc);
|
||||
static void make_array_klasses_shareable();
|
||||
static void post_dump();
|
||||
static int num_array_klasses();
|
||||
};
|
||||
#endif // INCLUDE_CDS
|
||||
#endif // SHARE_CDS_DYNAMICARCHIVE_HPP
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -4297,10 +4297,6 @@ int jdk_internal_foreign_abi_NativeEntryPoint::_downcall_stub_address_offset;
|
||||
macro(_method_type_offset, k, "methodType", java_lang_invoke_MethodType_signature, false); \
|
||||
macro(_downcall_stub_address_offset, k, "downcallStubAddress", long_signature, false);
|
||||
|
||||
bool jdk_internal_foreign_abi_NativeEntryPoint::is_instance(oop obj) {
|
||||
return obj != nullptr && is_subclass(obj->klass());
|
||||
}
|
||||
|
||||
void jdk_internal_foreign_abi_NativeEntryPoint::compute_offsets() {
|
||||
InstanceKlass* k = vmClasses::NativeEntryPoint_klass();
|
||||
NEP_FIELDS_DO(FIELD_COMPUTE_OFFSET);
|
||||
@ -4337,10 +4333,6 @@ int jdk_internal_foreign_abi_ABIDescriptor::_scratch2_offset;
|
||||
macro(_scratch1_offset, k, "scratch1", jdk_internal_foreign_abi_VMStorage_signature, false); \
|
||||
macro(_scratch2_offset, k, "scratch2", jdk_internal_foreign_abi_VMStorage_signature, false);
|
||||
|
||||
bool jdk_internal_foreign_abi_ABIDescriptor::is_instance(oop obj) {
|
||||
return obj != nullptr && is_subclass(obj->klass());
|
||||
}
|
||||
|
||||
void jdk_internal_foreign_abi_ABIDescriptor::compute_offsets() {
|
||||
InstanceKlass* k = vmClasses::ABIDescriptor_klass();
|
||||
ABIDescriptor_FIELDS_DO(FIELD_COMPUTE_OFFSET);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1179,13 +1179,6 @@ class jdk_internal_foreign_abi_NativeEntryPoint: AllStatic {
|
||||
static oop method_type(oop entry);
|
||||
static jlong downcall_stub_address(oop entry);
|
||||
|
||||
// Testers
|
||||
static bool is_subclass(Klass* klass) {
|
||||
return vmClasses::NativeEntryPoint_klass() != nullptr &&
|
||||
klass->is_subclass_of(vmClasses::NativeEntryPoint_klass());
|
||||
}
|
||||
static bool is_instance(oop obj);
|
||||
|
||||
// Accessors for code generation:
|
||||
static int method_type_offset_in_bytes() { return _method_type_offset; }
|
||||
static int downcall_stub_address_offset_in_bytes() { return _downcall_stub_address_offset; }
|
||||
@ -1216,13 +1209,6 @@ class jdk_internal_foreign_abi_ABIDescriptor: AllStatic {
|
||||
static jint shadowSpace(oop entry);
|
||||
static oop scratch1(oop entry);
|
||||
static oop scratch2(oop entry);
|
||||
|
||||
// Testers
|
||||
static bool is_subclass(Klass* klass) {
|
||||
return vmClasses::ABIDescriptor_klass() != nullptr &&
|
||||
klass->is_subclass_of(vmClasses::ABIDescriptor_klass());
|
||||
}
|
||||
static bool is_instance(oop obj);
|
||||
};
|
||||
|
||||
class jdk_internal_foreign_abi_VMStorage: AllStatic {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1371,6 +1371,7 @@ void AOTCodeAddressTable::init_extrs() {
|
||||
SET_ADDRESS(_extrs, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
|
||||
#endif
|
||||
#if INCLUDE_ZGC
|
||||
SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_oop_field_preloaded_addr());
|
||||
SET_ADDRESS(_extrs, ZBarrierSetRuntime::load_barrier_on_phantom_oop_field_preloaded_addr());
|
||||
#if defined(AMD64)
|
||||
SET_ADDRESS(_extrs, &ZPointerLoadShift);
|
||||
|
||||
@ -62,8 +62,6 @@ jint EpsilonHeap::initialize() {
|
||||
|
||||
// Enable monitoring
|
||||
_monitoring_support = new EpsilonMonitoringSupport(this);
|
||||
_last_counter_update = 0;
|
||||
_last_heap_print = 0;
|
||||
|
||||
// Install barrier set
|
||||
BarrierSet::set_barrier_set(new EpsilonBarrierSet());
|
||||
@ -77,6 +75,7 @@ jint EpsilonHeap::initialize() {
|
||||
void EpsilonHeap::initialize_serviceability() {
|
||||
_pool = new EpsilonMemoryPool(this);
|
||||
_memory_manager.add_pool(_pool);
|
||||
_monitoring_support->mark_ready();
|
||||
}
|
||||
|
||||
GrowableArray<GCMemoryManager*> EpsilonHeap::memory_managers() {
|
||||
@ -101,7 +100,7 @@ EpsilonHeap* EpsilonHeap::heap() {
|
||||
return named_heap<EpsilonHeap>(CollectedHeap::Epsilon);
|
||||
}
|
||||
|
||||
HeapWord* EpsilonHeap::allocate_work(size_t size, bool verbose) {
|
||||
HeapWord* EpsilonHeap::allocate_work(size_t size) {
|
||||
assert(is_object_aligned(size), "Allocation size should be aligned: %zu", size);
|
||||
|
||||
HeapWord* res = nullptr;
|
||||
@ -151,19 +150,23 @@ HeapWord* EpsilonHeap::allocate_work(size_t size, bool verbose) {
|
||||
|
||||
size_t used = _space->used();
|
||||
|
||||
// Allocation successful, update counters
|
||||
if (verbose) {
|
||||
size_t last = _last_counter_update;
|
||||
if ((used - last >= _step_counter_update) && AtomicAccess::cmpxchg(&_last_counter_update, last, used) == last) {
|
||||
// Allocation successful, update counters and print status.
|
||||
// At this point, some diagnostic subsystems might not yet be initialized.
|
||||
// We pretend the printout happened either way. This keeps allocation path
|
||||
// from obsessively checking the subsystems' status on every allocation.
|
||||
size_t last_counter = _last_counter_update.load_relaxed();
|
||||
if ((used - last_counter >= _step_counter_update) &&
|
||||
_last_counter_update.compare_set(last_counter, used)) {
|
||||
if (_monitoring_support->is_ready()) {
|
||||
_monitoring_support->update_counters();
|
||||
}
|
||||
}
|
||||
|
||||
// ...and print the occupancy line, if needed
|
||||
if (verbose) {
|
||||
size_t last = _last_heap_print;
|
||||
if ((used - last >= _step_heap_print) && AtomicAccess::cmpxchg(&_last_heap_print, last, used) == last) {
|
||||
print_heap_info(used);
|
||||
size_t last_heap = _last_heap_print.load_relaxed();
|
||||
if ((used - last_heap >= _step_heap_print) &&
|
||||
_last_heap_print.compare_set(last_heap, used)) {
|
||||
print_heap_info(used);
|
||||
if (Metaspace::initialized()) {
|
||||
print_metaspace_info();
|
||||
}
|
||||
}
|
||||
@ -265,8 +268,7 @@ HeapWord* EpsilonHeap::mem_allocate(size_t size) {
|
||||
}
|
||||
|
||||
HeapWord* EpsilonHeap::allocate_loaded_archive_space(size_t size) {
|
||||
// Cannot use verbose=true because Metaspace is not initialized
|
||||
return allocate_work(size, /* verbose = */false);
|
||||
return allocate_work(size);
|
||||
}
|
||||
|
||||
void EpsilonHeap::collect(GCCause::Cause cause) {
|
||||
|
||||
@ -31,6 +31,7 @@
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
#include "gc/shared/space.hpp"
|
||||
#include "memory/virtualspace.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "services/memoryManager.hpp"
|
||||
|
||||
class EpsilonHeap : public CollectedHeap {
|
||||
@ -45,8 +46,8 @@ private:
|
||||
size_t _step_counter_update;
|
||||
size_t _step_heap_print;
|
||||
int64_t _decay_time_ns;
|
||||
volatile size_t _last_counter_update;
|
||||
volatile size_t _last_heap_print;
|
||||
Atomic<size_t> _last_counter_update;
|
||||
Atomic<size_t> _last_heap_print;
|
||||
|
||||
void print_tracing_info() const override;
|
||||
void stop() override {};
|
||||
@ -83,7 +84,7 @@ public:
|
||||
bool requires_barriers(stackChunkOop obj) const override { return false; }
|
||||
|
||||
// Allocation
|
||||
HeapWord* allocate_work(size_t size, bool verbose = true);
|
||||
HeapWord* allocate_work(size_t size);
|
||||
HeapWord* mem_allocate(size_t size) override;
|
||||
HeapWord* allocate_new_tlab(size_t min_size,
|
||||
size_t requested_size,
|
||||
|
||||
@ -99,6 +99,7 @@ EpsilonMonitoringSupport::EpsilonMonitoringSupport(EpsilonHeap* heap) {
|
||||
}
|
||||
|
||||
void EpsilonMonitoringSupport::update_counters() {
|
||||
assert(is_ready(), "Must be ready");
|
||||
MemoryService::track_memory_usage();
|
||||
|
||||
if (UsePerfData) {
|
||||
@ -110,3 +111,11 @@ void EpsilonMonitoringSupport::update_counters() {
|
||||
MetaspaceCounters::update_performance_counters();
|
||||
}
|
||||
}
|
||||
|
||||
bool EpsilonMonitoringSupport::is_ready() {
|
||||
return _ready.load_acquire();
|
||||
}
|
||||
|
||||
void EpsilonMonitoringSupport::mark_ready() {
|
||||
_ready.release_store(true);
|
||||
}
|
||||
|
||||
@ -26,6 +26,7 @@
|
||||
#define SHARE_GC_EPSILON_EPSILONMONITORINGSUPPORT_HPP
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
|
||||
class EpsilonGenerationCounters;
|
||||
class EpsilonSpaceCounters;
|
||||
@ -35,9 +36,12 @@ class EpsilonMonitoringSupport : public CHeapObj<mtGC> {
|
||||
private:
|
||||
EpsilonGenerationCounters* _heap_counters;
|
||||
EpsilonSpaceCounters* _space_counters;
|
||||
Atomic<bool> _ready;
|
||||
|
||||
public:
|
||||
EpsilonMonitoringSupport(EpsilonHeap* heap);
|
||||
bool is_ready();
|
||||
void mark_ready();
|
||||
void update_counters();
|
||||
};
|
||||
|
||||
|
||||
@ -36,6 +36,7 @@
|
||||
#include "gc/shared/fullGCForwarding.hpp"
|
||||
#include "gc/shared/gcArguments.hpp"
|
||||
#include "gc/shared/workerPolicy.hpp"
|
||||
#include "runtime/flags/jvmFlagLimit.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/globals_extension.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
@ -190,7 +191,8 @@ void G1Arguments::initialize() {
|
||||
}
|
||||
FLAG_SET_DEFAULT(G1ConcRefinementThreads, 0);
|
||||
} else if (FLAG_IS_DEFAULT(G1ConcRefinementThreads)) {
|
||||
FLAG_SET_ERGO(G1ConcRefinementThreads, ParallelGCThreads);
|
||||
const JVMTypedFlagLimit<uint>* conc_refinement_threads_limits = JVMFlagLimit::get_range_at(FLAG_MEMBER_ENUM(G1ConcRefinementThreads))->cast<uint>();
|
||||
FLAG_SET_ERGO(G1ConcRefinementThreads, MIN2(ParallelGCThreads, conc_refinement_threads_limits->max()));
|
||||
}
|
||||
|
||||
if (FLAG_IS_DEFAULT(ConcGCThreads) || ConcGCThreads == 0) {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -686,7 +686,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
|
||||
// the check before we do the actual allocation. The reason for doing it
|
||||
// before the allocation is that we avoid having to keep track of the newly
|
||||
// allocated memory while we do a GC.
|
||||
if (policy()->need_to_start_conc_mark("concurrent humongous allocation",
|
||||
// Only try that if we can actually perform a GC.
|
||||
if (is_init_completed() && policy()->need_to_start_conc_mark("concurrent humongous allocation",
|
||||
word_size)) {
|
||||
try_collect(word_size, GCCause::_g1_humongous_allocation, collection_counters(this));
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -46,7 +46,7 @@
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/prefetch.hpp"
|
||||
#include "runtime/prefetch.inline.hpp"
|
||||
#include "runtime/threads.hpp"
|
||||
#include "runtime/threadSMR.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,6 +28,7 @@
|
||||
#include "gc/serial/serialHeap.inline.hpp"
|
||||
#include "gc/shared/space.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
#include "runtime/prefetch.inline.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
|
||||
void CardTableRS::scan_old_to_young_refs(TenuredGeneration* tg, HeapWord* saved_top) {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -34,7 +34,6 @@
|
||||
#include "memory/virtualspace.hpp"
|
||||
#include "runtime/mutex.hpp"
|
||||
#include "runtime/perfData.hpp"
|
||||
#include "runtime/prefetch.inline.hpp"
|
||||
|
||||
// A Generation models a heap area for similarly-aged objects.
|
||||
// It will contain one ore more spaces holding the actual objects.
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -70,6 +70,7 @@
|
||||
#include "runtime/init.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/prefetch.inline.hpp"
|
||||
#include "runtime/threads.hpp"
|
||||
#include "runtime/vmThread.hpp"
|
||||
#include "services/memoryManager.hpp"
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -33,6 +33,7 @@
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/method.inline.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/frame.inline.hpp"
|
||||
#include "runtime/javaThread.hpp"
|
||||
#include "runtime/threads.hpp"
|
||||
@ -196,8 +197,8 @@ int BarrierSetNMethod::nmethod_stub_entry_barrier(address* return_address_ptr) {
|
||||
// Diagnostic option to force deoptimization 1 in 10 times. It is otherwise
|
||||
// a very rare event.
|
||||
if (DeoptimizeNMethodBarriersALot && !nm->is_osr_method()) {
|
||||
static volatile uint32_t counter=0;
|
||||
if (AtomicAccess::add(&counter, 1u) % 10 == 0) {
|
||||
static Atomic<uint32_t> counter{0};
|
||||
if (counter.add_then_fetch(1u) % 10 == 0) {
|
||||
may_enter = false;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -23,7 +23,7 @@
|
||||
*/
|
||||
|
||||
#include "gc/shared/concurrentGCThread.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/init.hpp"
|
||||
#include "runtime/jniHandles.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
@ -48,7 +48,7 @@ void ConcurrentGCThread::run() {
|
||||
|
||||
// Signal thread has terminated
|
||||
MonitorLocker ml(Terminator_lock);
|
||||
AtomicAccess::release_store(&_has_terminated, true);
|
||||
_has_terminated.release_store(true);
|
||||
ml.notify_all();
|
||||
}
|
||||
|
||||
@ -57,21 +57,21 @@ void ConcurrentGCThread::stop() {
|
||||
assert(!has_terminated(), "Invalid state");
|
||||
|
||||
// Signal thread to terminate
|
||||
AtomicAccess::release_store_fence(&_should_terminate, true);
|
||||
_should_terminate.release_store_fence(true);
|
||||
|
||||
stop_service();
|
||||
|
||||
// Wait for thread to terminate
|
||||
MonitorLocker ml(Terminator_lock);
|
||||
while (!_has_terminated) {
|
||||
while (!_has_terminated.load_relaxed()) {
|
||||
ml.wait();
|
||||
}
|
||||
}
|
||||
|
||||
bool ConcurrentGCThread::should_terminate() const {
|
||||
return AtomicAccess::load_acquire(&_should_terminate);
|
||||
return _should_terminate.load_acquire();
|
||||
}
|
||||
|
||||
bool ConcurrentGCThread::has_terminated() const {
|
||||
return AtomicAccess::load_acquire(&_has_terminated);
|
||||
return _has_terminated.load_acquire();
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,14 +25,15 @@
|
||||
#ifndef SHARE_GC_SHARED_CONCURRENTGCTHREAD_HPP
|
||||
#define SHARE_GC_SHARED_CONCURRENTGCTHREAD_HPP
|
||||
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/javaThread.hpp"
|
||||
#include "runtime/nonJavaThread.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
class ConcurrentGCThread: public NamedThread {
|
||||
private:
|
||||
volatile bool _should_terminate;
|
||||
volatile bool _has_terminated;
|
||||
Atomic<bool> _should_terminate;
|
||||
Atomic<bool> _has_terminated;
|
||||
|
||||
protected:
|
||||
void create_and_start(ThreadPriority prio = NearMaxPriority);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,7 +28,7 @@
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/interfaceSupport.inline.hpp"
|
||||
#include "runtime/javaThread.inline.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
@ -60,16 +60,13 @@ public:
|
||||
};
|
||||
|
||||
Monitor* GCLocker::_lock;
|
||||
volatile bool GCLocker::_is_gc_request_pending;
|
||||
Atomic<bool> GCLocker::_is_gc_request_pending{false};
|
||||
|
||||
DEBUG_ONLY(uint64_t GCLocker::_verify_in_cr_count;)
|
||||
DEBUG_ONLY(Atomic<uint64_t> GCLocker::_verify_in_cr_count{0};)
|
||||
|
||||
void GCLocker::initialize() {
|
||||
assert(JNICritical_lock != nullptr, "inv");
|
||||
_lock = JNICritical_lock;
|
||||
_is_gc_request_pending = false;
|
||||
|
||||
DEBUG_ONLY(_verify_in_cr_count = 0;)
|
||||
}
|
||||
|
||||
bool GCLocker::is_active() {
|
||||
@ -84,11 +81,11 @@ bool GCLocker::is_active() {
|
||||
void GCLocker::block() {
|
||||
// _lock is held from the beginning of block() to the end of of unblock().
|
||||
_lock->lock();
|
||||
assert(AtomicAccess::load(&_is_gc_request_pending) == false, "precondition");
|
||||
assert(_is_gc_request_pending.load_relaxed() == false, "precondition");
|
||||
|
||||
GCLockerTimingDebugLogger logger("Thread blocked to start GC.");
|
||||
|
||||
AtomicAccess::store(&_is_gc_request_pending, true);
|
||||
_is_gc_request_pending.store_relaxed(true);
|
||||
|
||||
// The _is_gc_request_pending and _jni_active_critical (inside
|
||||
// in_critical_atomic()) variables form a Dekker duality. On the GC side, the
|
||||
@ -112,14 +109,14 @@ void GCLocker::block() {
|
||||
#ifdef ASSERT
|
||||
// Matching the storestore in GCLocker::exit.
|
||||
OrderAccess::loadload();
|
||||
assert(AtomicAccess::load(&_verify_in_cr_count) == 0, "inv");
|
||||
assert(_verify_in_cr_count.load_relaxed() == 0, "inv");
|
||||
#endif
|
||||
}
|
||||
|
||||
void GCLocker::unblock() {
|
||||
assert(AtomicAccess::load(&_is_gc_request_pending) == true, "precondition");
|
||||
assert(_is_gc_request_pending.load_relaxed() == true, "precondition");
|
||||
|
||||
AtomicAccess::store(&_is_gc_request_pending, false);
|
||||
_is_gc_request_pending.store_relaxed(false);
|
||||
_lock->unlock();
|
||||
}
|
||||
|
||||
@ -139,7 +136,7 @@ void GCLocker::enter_slow(JavaThread* current_thread) {
|
||||
// Same as fast path.
|
||||
OrderAccess::fence();
|
||||
|
||||
if (!AtomicAccess::load(&_is_gc_request_pending)) {
|
||||
if (!_is_gc_request_pending.load_relaxed()) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,6 +27,7 @@
|
||||
|
||||
#include "gc/shared/gcCause.hpp"
|
||||
#include "memory/allStatic.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/mutex.hpp"
|
||||
|
||||
// GCLocker provides synchronization between the garbage collector (GC) and
|
||||
@ -43,11 +44,11 @@
|
||||
|
||||
class GCLocker: public AllStatic {
|
||||
static Monitor* _lock;
|
||||
static volatile bool _is_gc_request_pending;
|
||||
static Atomic<bool> _is_gc_request_pending;
|
||||
|
||||
#ifdef ASSERT
|
||||
// Debug-only: to track the number of java threads in critical-region.
|
||||
static uint64_t _verify_in_cr_count;
|
||||
static Atomic<uint64_t> _verify_in_cr_count;
|
||||
#endif
|
||||
static void enter_slow(JavaThread* current_thread);
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -38,13 +38,13 @@ void GCLocker::enter(JavaThread* current_thread) {
|
||||
// Matching the fence in GCLocker::block.
|
||||
OrderAccess::fence();
|
||||
|
||||
if (AtomicAccess::load(&_is_gc_request_pending)) {
|
||||
if (_is_gc_request_pending.load_relaxed()) {
|
||||
current_thread->exit_critical();
|
||||
// slow-path
|
||||
enter_slow(current_thread);
|
||||
}
|
||||
|
||||
DEBUG_ONLY(AtomicAccess::add(&_verify_in_cr_count, (uint64_t)1);)
|
||||
DEBUG_ONLY(_verify_in_cr_count.add_then_fetch(1u);)
|
||||
} else {
|
||||
current_thread->enter_critical();
|
||||
}
|
||||
@ -55,7 +55,7 @@ void GCLocker::exit(JavaThread* current_thread) {
|
||||
|
||||
#ifdef ASSERT
|
||||
if (current_thread->in_last_critical()) {
|
||||
AtomicAccess::add(&_verify_in_cr_count, (uint64_t)-1);
|
||||
_verify_in_cr_count.sub_then_fetch(1u);
|
||||
// Matching the loadload in GCLocker::block.
|
||||
OrderAccess::storestore();
|
||||
}
|
||||
|
||||
@ -700,7 +700,7 @@ void OopStorage::Block::release_entries(uintx releasing, OopStorage* owner) {
|
||||
// then someone else has made such a claim and the deferred update has not
|
||||
// yet been processed and will include our change, so we don't need to do
|
||||
// anything further.
|
||||
if (_deferred_updates_next.compare_exchange(nullptr, this) == nullptr) {
|
||||
if (_deferred_updates_next.compare_set(nullptr, this)) {
|
||||
// Successfully claimed. Push, with self-loop for end-of-list.
|
||||
Block* head = owner->_deferred_updates.load_relaxed();
|
||||
while (true) {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,7 +25,7 @@
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/shared/pretouchTask.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
@ -52,11 +52,11 @@ size_t PretouchTask::chunk_size() {
|
||||
|
||||
void PretouchTask::work(uint worker_id) {
|
||||
while (true) {
|
||||
char* cur_start = AtomicAccess::load(&_cur_addr);
|
||||
char* cur_start = _cur_addr.load_relaxed();
|
||||
char* cur_end = cur_start + MIN2(_chunk_size, pointer_delta(_end_addr, cur_start, 1));
|
||||
if (cur_start >= cur_end) {
|
||||
break;
|
||||
} else if (cur_start == AtomicAccess::cmpxchg(&_cur_addr, cur_start, cur_end)) {
|
||||
} else if (_cur_addr.compare_set(cur_start, cur_end)) {
|
||||
os::pretouch_memory(cur_start, cur_end, _page_size);
|
||||
} // Else attempt to claim chunk failed, so try again.
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -26,9 +26,11 @@
|
||||
#define SHARE_GC_SHARED_PRETOUCH_HPP
|
||||
|
||||
#include "gc/shared/workerThread.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
class PretouchTask : public WorkerTask {
|
||||
char* volatile _cur_addr;
|
||||
Atomic<char*> _cur_addr;
|
||||
char* const _end_addr;
|
||||
size_t _page_size;
|
||||
size_t _chunk_size;
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -30,7 +30,7 @@
|
||||
|
||||
uint SuspendibleThreadSet::_nthreads = 0;
|
||||
uint SuspendibleThreadSet::_nthreads_stopped = 0;
|
||||
volatile bool SuspendibleThreadSet::_suspend_all = false;
|
||||
Atomic<bool> SuspendibleThreadSet::_suspend_all{false};
|
||||
double SuspendibleThreadSet::_suspend_all_start = 0.0;
|
||||
|
||||
static Semaphore* _synchronize_wakeup = nullptr;
|
||||
@ -96,7 +96,7 @@ void SuspendibleThreadSet::synchronize() {
|
||||
{
|
||||
MonitorLocker ml(STS_lock, Mutex::_no_safepoint_check_flag);
|
||||
assert(!should_yield(), "Only one at a time");
|
||||
AtomicAccess::store(&_suspend_all, true);
|
||||
_suspend_all.store_relaxed(true);
|
||||
if (is_synchronized()) {
|
||||
return;
|
||||
}
|
||||
@ -127,6 +127,6 @@ void SuspendibleThreadSet::desynchronize() {
|
||||
MonitorLocker ml(STS_lock, Mutex::_no_safepoint_check_flag);
|
||||
assert(should_yield(), "STS not synchronizing");
|
||||
assert(is_synchronized(), "STS not synchronized");
|
||||
AtomicAccess::store(&_suspend_all, false);
|
||||
_suspend_all.store_relaxed(false);
|
||||
ml.notify_all();
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -26,7 +26,7 @@
|
||||
#define SHARE_GC_SHARED_SUSPENDIBLETHREADSET_HPP
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
|
||||
// A SuspendibleThreadSet is a set of threads that can be suspended.
|
||||
// A thread can join and later leave the set, and periodically yield.
|
||||
@ -43,7 +43,7 @@ class SuspendibleThreadSet : public AllStatic {
|
||||
private:
|
||||
static uint _nthreads;
|
||||
static uint _nthreads_stopped;
|
||||
static volatile bool _suspend_all;
|
||||
static Atomic<bool> _suspend_all;
|
||||
static double _suspend_all_start;
|
||||
|
||||
static bool is_synchronized();
|
||||
@ -59,7 +59,7 @@ private:
|
||||
|
||||
public:
|
||||
// Returns true if an suspension is in progress.
|
||||
static bool should_yield() { return AtomicAccess::load(&_suspend_all); }
|
||||
static bool should_yield() { return _suspend_all.load_relaxed(); }
|
||||
|
||||
// Suspends the current thread if a suspension is in progress.
|
||||
static void yield() {
|
||||
|
||||
@ -183,8 +183,8 @@ protected:
|
||||
_age.store_relaxed(new_age);
|
||||
}
|
||||
|
||||
Age cmpxchg_age(Age old_age, Age new_age) {
|
||||
return _age.compare_exchange(old_age, new_age);
|
||||
bool par_set_age(Age old_age, Age new_age) {
|
||||
return _age.compare_set(old_age, new_age);
|
||||
}
|
||||
|
||||
idx_t age_top_relaxed() const {
|
||||
@ -345,7 +345,7 @@ protected:
|
||||
|
||||
using TaskQueueSuper<N, MT>::age_relaxed;
|
||||
using TaskQueueSuper<N, MT>::set_age_relaxed;
|
||||
using TaskQueueSuper<N, MT>::cmpxchg_age;
|
||||
using TaskQueueSuper<N, MT>::par_set_age;
|
||||
using TaskQueueSuper<N, MT>::age_top_relaxed;
|
||||
|
||||
using TaskQueueSuper<N, MT>::increment_index;
|
||||
|
||||
@ -170,8 +170,7 @@ bool GenericTaskQueue<E, MT, N>::pop_local_slow(uint localBot, Age oldAge) {
|
||||
if (localBot == oldAge.top()) {
|
||||
// No competing pop_global has yet incremented "top"; we'll try to
|
||||
// install new_age, thus claiming the element.
|
||||
Age tempAge = cmpxchg_age(oldAge, newAge);
|
||||
if (tempAge == oldAge) {
|
||||
if (par_set_age(oldAge, newAge)) {
|
||||
// We win.
|
||||
assert_not_underflow(localBot, age_top_relaxed());
|
||||
TASKQUEUE_STATS_ONLY(stats.record_pop_slow());
|
||||
@ -283,12 +282,12 @@ typename GenericTaskQueue<E, MT, N>::PopResult GenericTaskQueue<E, MT, N>::pop_g
|
||||
idx_t new_top = increment_index(oldAge.top());
|
||||
idx_t new_tag = oldAge.tag() + ((new_top == 0) ? 1 : 0);
|
||||
Age newAge(new_top, new_tag);
|
||||
Age resAge = cmpxchg_age(oldAge, newAge);
|
||||
bool result = par_set_age(oldAge, newAge);
|
||||
|
||||
// Note that using "bottom" here might fail, since a pop_local might
|
||||
// have decremented it.
|
||||
assert_not_underflow(localBot, newAge.top());
|
||||
return resAge == oldAge ? PopResult::Success : PopResult::Contended;
|
||||
return result ? PopResult::Success : PopResult::Contended;
|
||||
}
|
||||
|
||||
inline int randomParkAndMiller(int *seed0) {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -42,7 +42,7 @@ WorkerTaskDispatcher::WorkerTaskDispatcher() :
|
||||
void WorkerTaskDispatcher::coordinator_distribute_task(WorkerTask* task, uint num_workers) {
|
||||
// No workers are allowed to read the state variables until they have been signaled.
|
||||
_task = task;
|
||||
_not_finished = num_workers;
|
||||
_not_finished.store_relaxed(num_workers);
|
||||
|
||||
// Dispatch 'num_workers' number of tasks.
|
||||
_start_semaphore.signal(num_workers);
|
||||
@ -51,9 +51,12 @@ void WorkerTaskDispatcher::coordinator_distribute_task(WorkerTask* task, uint nu
|
||||
_end_semaphore.wait();
|
||||
|
||||
// No workers are allowed to read the state variables after the coordinator has been signaled.
|
||||
assert(_not_finished == 0, "%d not finished workers?", _not_finished);
|
||||
#ifdef ASSERT
|
||||
uint not_finished = _not_finished.load_relaxed();
|
||||
assert(not_finished == 0, "%u not finished workers?", not_finished);
|
||||
#endif // ASSERT
|
||||
_task = nullptr;
|
||||
_started = 0;
|
||||
_started.store_relaxed(0);
|
||||
}
|
||||
|
||||
void WorkerTaskDispatcher::worker_run_task() {
|
||||
@ -61,7 +64,7 @@ void WorkerTaskDispatcher::worker_run_task() {
|
||||
_start_semaphore.wait();
|
||||
|
||||
// Get and set worker id.
|
||||
const uint worker_id = AtomicAccess::fetch_then_add(&_started, 1u);
|
||||
const uint worker_id = _started.fetch_then_add(1u);
|
||||
WorkerThread::set_worker_id(worker_id);
|
||||
|
||||
// Run task.
|
||||
@ -70,7 +73,7 @@ void WorkerTaskDispatcher::worker_run_task() {
|
||||
|
||||
// Mark that the worker is done with the task.
|
||||
// The worker is not allowed to read the state variables after this line.
|
||||
const uint not_finished = AtomicAccess::sub(&_not_finished, 1u);
|
||||
const uint not_finished = _not_finished.sub_then_fetch(1u);
|
||||
|
||||
// The last worker signals to the coordinator that all work is completed.
|
||||
if (not_finished == 0) {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,6 +27,7 @@
|
||||
|
||||
#include "gc/shared/gcId.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/nonJavaThread.hpp"
|
||||
#include "runtime/semaphore.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
@ -58,8 +59,8 @@ class WorkerTaskDispatcher {
|
||||
// The task currently being dispatched to the WorkerThreads.
|
||||
WorkerTask* _task;
|
||||
|
||||
volatile uint _started;
|
||||
volatile uint _not_finished;
|
||||
Atomic<uint> _started;
|
||||
Atomic<uint> _not_finished;
|
||||
|
||||
// Semaphore used to start the WorkerThreads.
|
||||
Semaphore _start_semaphore;
|
||||
|
||||
@ -183,8 +183,8 @@ void ShenandoahAdaptiveHeuristics::record_success_concurrent() {
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahAdaptiveHeuristics::record_success_degenerated() {
|
||||
ShenandoahHeuristics::record_success_degenerated();
|
||||
void ShenandoahAdaptiveHeuristics::record_degenerated() {
|
||||
ShenandoahHeuristics::record_degenerated();
|
||||
// Adjust both trigger's parameters in the case of a degenerated GC because
|
||||
// either of them should have triggered earlier to avoid this case.
|
||||
adjust_margin_of_error(DEGENERATE_PENALTY_SD);
|
||||
|
||||
@ -114,7 +114,7 @@ public:
|
||||
|
||||
virtual void record_cycle_start() override;
|
||||
virtual void record_success_concurrent() override;
|
||||
virtual void record_success_degenerated() override;
|
||||
virtual void record_degenerated() override;
|
||||
virtual void record_success_full() override;
|
||||
|
||||
virtual bool should_start_gc() override;
|
||||
|
||||
@ -243,7 +243,7 @@ void ShenandoahHeuristics::record_success_concurrent() {
|
||||
adjust_penalty(Concurrent_Adjust);
|
||||
}
|
||||
|
||||
void ShenandoahHeuristics::record_success_degenerated() {
|
||||
void ShenandoahHeuristics::record_degenerated() {
|
||||
adjust_penalty(Degenerated_Penalty);
|
||||
}
|
||||
|
||||
|
||||
@ -218,7 +218,7 @@ public:
|
||||
|
||||
virtual void record_success_concurrent();
|
||||
|
||||
virtual void record_success_degenerated();
|
||||
virtual void record_degenerated();
|
||||
|
||||
virtual void record_success_full();
|
||||
|
||||
|
||||
@ -766,10 +766,10 @@ void ShenandoahOldHeuristics::record_success_concurrent() {
|
||||
this->ShenandoahHeuristics::record_success_concurrent();
|
||||
}
|
||||
|
||||
void ShenandoahOldHeuristics::record_success_degenerated() {
|
||||
void ShenandoahOldHeuristics::record_degenerated() {
|
||||
// Forget any triggers that occurred while OLD GC was ongoing. If we really need to start another, it will retrigger.
|
||||
clear_triggers();
|
||||
this->ShenandoahHeuristics::record_success_degenerated();
|
||||
this->ShenandoahHeuristics::record_degenerated();
|
||||
}
|
||||
|
||||
void ShenandoahOldHeuristics::record_success_full() {
|
||||
|
||||
@ -201,7 +201,7 @@ public:
|
||||
|
||||
void record_success_concurrent() override;
|
||||
|
||||
void record_success_degenerated() override;
|
||||
void record_degenerated() override;
|
||||
|
||||
void record_success_full() override;
|
||||
|
||||
|
||||
@ -313,8 +313,12 @@ void ShenandoahDegenGC::op_degenerated() {
|
||||
policy->record_degenerated(_generation->is_young(), _abbreviated, progress);
|
||||
if (progress) {
|
||||
heap->notify_gc_progress();
|
||||
_generation->heuristics()->record_degenerated();
|
||||
} else if (!heap->mode()->is_generational() || policy->generational_should_upgrade_degenerated_gc()) {
|
||||
// Upgrade to full GC, register full-GC impact on heuristics.
|
||||
op_degenerated_futile();
|
||||
} else {
|
||||
_generation->heuristics()->record_degenerated();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -338,7 +338,7 @@ void ShenandoahRegionPartitions::make_all_regions_unavailable() {
|
||||
_empty_region_counts[partition_id] = 0;
|
||||
_used[partition_id] = 0;
|
||||
_humongous_waste[partition_id] = 0;
|
||||
_available[partition_id] = FreeSetUnderConstruction;
|
||||
_available[partition_id] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@ -2495,6 +2495,10 @@ void ShenandoahFreeSet::move_regions_from_collector_to_mutator(size_t max_xfer_r
|
||||
void ShenandoahFreeSet::prepare_to_rebuild(size_t &young_trashed_regions, size_t &old_trashed_regions,
|
||||
size_t &first_old_region, size_t &last_old_region, size_t &old_region_count) {
|
||||
shenandoah_assert_heaplocked();
|
||||
assert(rebuild_lock() != nullptr, "sanity");
|
||||
rebuild_lock()->lock(false);
|
||||
// This resets all state information, removing all regions from all sets.
|
||||
clear();
|
||||
log_debug(gc, free)("Rebuilding FreeSet");
|
||||
|
||||
// This places regions that have alloc_capacity into the old_collector set if they identify as is_old() or the
|
||||
@ -2524,6 +2528,9 @@ void ShenandoahFreeSet::finish_rebuild(size_t young_trashed_regions, size_t old_
|
||||
_total_young_regions = _heap->num_regions() - old_region_count;
|
||||
_total_global_regions = _heap->num_regions();
|
||||
establish_old_collector_alloc_bias();
|
||||
|
||||
// Release the rebuild lock now. What remains in this function is read-only
|
||||
rebuild_lock()->unlock();
|
||||
_partitions.assert_bounds(true);
|
||||
log_status();
|
||||
}
|
||||
@ -3058,7 +3065,7 @@ void ShenandoahFreeSet::log_status() {
|
||||
size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes();
|
||||
// capacity() is capacity of mutator
|
||||
// used() is used of mutator
|
||||
size_t free = capacity() - used();
|
||||
size_t free = capacity_holding_lock() - used_holding_lock();
|
||||
// Since certain regions that belonged to the Mutator free partition at the time of most recent rebuild may have been
|
||||
// retired, the sum of used and capacities within regions that are still in the Mutator free partition may not match
|
||||
// my internally tracked values of used() and free().
|
||||
|
||||
@ -28,9 +28,13 @@
|
||||
|
||||
#include "gc/shenandoah/shenandoahHeap.hpp"
|
||||
#include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
|
||||
#include "gc/shenandoah/shenandoahLock.hpp"
|
||||
#include "gc/shenandoah/shenandoahSimpleBitMap.hpp"
|
||||
#include "logging/logStream.hpp"
|
||||
|
||||
typedef ShenandoahLock ShenandoahRebuildLock;
|
||||
typedef ShenandoahLocker ShenandoahRebuildLocker;
|
||||
|
||||
// Each ShenandoahHeapRegion is associated with a ShenandoahFreeSetPartitionId.
|
||||
enum class ShenandoahFreeSetPartitionId : uint8_t {
|
||||
Mutator, // Region is in the Mutator free set: available memory is available to mutators.
|
||||
@ -139,8 +143,6 @@ public:
|
||||
ShenandoahRegionPartitions(size_t max_regions, ShenandoahFreeSet* free_set);
|
||||
~ShenandoahRegionPartitions() {}
|
||||
|
||||
static const size_t FreeSetUnderConstruction = SIZE_MAX;
|
||||
|
||||
inline idx_t max() const { return _max; }
|
||||
|
||||
// At initialization, reset OldCollector tallies
|
||||
@ -352,6 +354,16 @@ public:
|
||||
return _available[int(which_partition)];
|
||||
}
|
||||
|
||||
// Return available_in assuming caller does not hold the heap lock but does hold the rebuild_lock.
|
||||
// The returned value may be "slightly stale" because we do not assure that every fetch of this value
|
||||
// sees the most recent update of this value. Requiring the caller to hold the rebuild_lock assures
|
||||
// that we don't see "bogus" values that are "worse than stale". During rebuild of the freeset, the
|
||||
// value of _available is not reliable.
|
||||
inline size_t available_in_locked_for_rebuild(ShenandoahFreeSetPartitionId which_partition) const {
|
||||
assert (which_partition < NumPartitions, "selected free set must be valid");
|
||||
return _available[int(which_partition)];
|
||||
}
|
||||
|
||||
// Returns bytes of humongous waste
|
||||
inline size_t humongous_waste(ShenandoahFreeSetPartitionId which_partition) const {
|
||||
assert (which_partition < NumPartitions, "selected free set must be valid");
|
||||
@ -359,23 +371,6 @@ public:
|
||||
return _humongous_waste[int(which_partition)];
|
||||
}
|
||||
|
||||
// Return available_in assuming caller does not hold the heap lock. In production builds, available is
|
||||
// returned without acquiring the lock. In debug builds, the global heap lock is acquired in order to
|
||||
// enforce a consistency assert.
|
||||
inline size_t available_in_not_locked(ShenandoahFreeSetPartitionId which_partition) const {
|
||||
assert (which_partition < NumPartitions, "selected free set must be valid");
|
||||
shenandoah_assert_not_heaplocked();
|
||||
#ifdef ASSERT
|
||||
ShenandoahHeapLocker locker(ShenandoahHeap::heap()->lock());
|
||||
assert((_available[int(which_partition)] == FreeSetUnderConstruction) ||
|
||||
(_available[int(which_partition)] == _capacity[int(which_partition)] - _used[int(which_partition)]),
|
||||
"Expect available (%zu) equals capacity (%zu) - used (%zu) for partition %s",
|
||||
_available[int(which_partition)], _capacity[int(which_partition)], _used[int(which_partition)],
|
||||
partition_membership_name(idx_t(which_partition)));
|
||||
#endif
|
||||
return _available[int(which_partition)];
|
||||
}
|
||||
|
||||
inline void set_capacity_of(ShenandoahFreeSetPartitionId which_partition, size_t value);
|
||||
|
||||
inline void set_used_by(ShenandoahFreeSetPartitionId which_partition, size_t value) {
|
||||
@ -440,6 +435,15 @@ private:
|
||||
ShenandoahHeap* const _heap;
|
||||
ShenandoahRegionPartitions _partitions;
|
||||
|
||||
// This locks the rebuild process (in combination with the global heap lock). Whenever we rebuild the free set,
|
||||
// we first acquire the global heap lock and then we acquire this _rebuild_lock in a nested context. Threads that
|
||||
// need to check available, acquire only the _rebuild_lock to make sure that they are not obtaining the value of
|
||||
// available for a partially reconstructed free-set.
|
||||
//
|
||||
// Note that there is rank ordering of nested locks to prevent deadlock. All threads that need to acquire both
|
||||
// locks will acquire them in the same order: first the global heap lock and then the rebuild lock.
|
||||
ShenandoahRebuildLock _rebuild_lock;
|
||||
|
||||
size_t _total_humongous_waste;
|
||||
|
||||
HeapWord* allocate_aligned_plab(size_t size, ShenandoahAllocRequest& req, ShenandoahHeapRegion* r);
|
||||
@ -635,10 +639,12 @@ private:
|
||||
void log_status();
|
||||
|
||||
public:
|
||||
static const size_t FreeSetUnderConstruction = ShenandoahRegionPartitions::FreeSetUnderConstruction;
|
||||
|
||||
ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions);
|
||||
|
||||
ShenandoahRebuildLock* rebuild_lock() {
|
||||
return &_rebuild_lock;
|
||||
}
|
||||
|
||||
inline size_t max_regions() const { return _partitions.max(); }
|
||||
ShenandoahFreeSetPartitionId membership(size_t index) const { return _partitions.membership(index); }
|
||||
inline void shrink_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId partition,
|
||||
@ -776,9 +782,29 @@ public:
|
||||
// adjusts available with respect to lock holders. However, sequential calls to these three functions may produce
|
||||
// inconsistent data: available may not equal capacity - used because the intermediate states of any "atomic"
|
||||
// locked action can be seen by these unlocked functions.
|
||||
inline size_t capacity() const { return _partitions.capacity_of(ShenandoahFreeSetPartitionId::Mutator); }
|
||||
inline size_t used() const { return _partitions.used_by(ShenandoahFreeSetPartitionId::Mutator); }
|
||||
inline size_t available() const { return _partitions.available_in_not_locked(ShenandoahFreeSetPartitionId::Mutator); }
|
||||
inline size_t capacity_holding_lock() const {
|
||||
shenandoah_assert_heaplocked();
|
||||
return _partitions.capacity_of(ShenandoahFreeSetPartitionId::Mutator);
|
||||
}
|
||||
inline size_t capacity_not_holding_lock() {
|
||||
shenandoah_assert_not_heaplocked();
|
||||
ShenandoahRebuildLocker locker(rebuild_lock());
|
||||
return _partitions.capacity_of(ShenandoahFreeSetPartitionId::Mutator);
|
||||
}
|
||||
inline size_t used_holding_lock() const {
|
||||
shenandoah_assert_heaplocked();
|
||||
return _partitions.used_by(ShenandoahFreeSetPartitionId::Mutator);
|
||||
}
|
||||
inline size_t used_not_holding_lock() {
|
||||
shenandoah_assert_not_heaplocked();
|
||||
ShenandoahRebuildLocker locker(rebuild_lock());
|
||||
return _partitions.used_by(ShenandoahFreeSetPartitionId::Mutator);
|
||||
}
|
||||
inline size_t available() {
|
||||
shenandoah_assert_not_heaplocked();
|
||||
ShenandoahRebuildLocker locker(rebuild_lock());
|
||||
return _partitions.available_in_locked_for_rebuild(ShenandoahFreeSetPartitionId::Mutator);
|
||||
}
|
||||
|
||||
inline size_t total_humongous_waste() const { return _total_humongous_waste; }
|
||||
inline size_t humongous_waste_in_mutator() const { return _partitions.humongous_waste(ShenandoahFreeSetPartitionId::Mutator); }
|
||||
|
||||
@ -1113,18 +1113,17 @@ void ShenandoahFullGC::phase5_epilog() {
|
||||
ShenandoahPostCompactClosure post_compact;
|
||||
heap->heap_region_iterate(&post_compact);
|
||||
heap->collection_set()->clear();
|
||||
size_t young_cset_regions, old_cset_regions;
|
||||
size_t first_old, last_old, num_old;
|
||||
heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
|
||||
|
||||
// We also do not expand old generation size following Full GC because we have scrambled age populations and
|
||||
// no longer have objects separated by age into distinct regions.
|
||||
if (heap->mode()->is_generational()) {
|
||||
ShenandoahGenerationalFullGC::compute_balances();
|
||||
size_t young_cset_regions, old_cset_regions, first_old, last_old, num_old;
|
||||
ShenandoahFreeSet* free_set = heap->free_set();
|
||||
{
|
||||
free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
|
||||
// We also do not expand old generation size following Full GC because we have scrambled age populations and
|
||||
// no longer have objects separated by age into distinct regions.
|
||||
if (heap->mode()->is_generational()) {
|
||||
ShenandoahGenerationalFullGC::compute_balances();
|
||||
}
|
||||
free_set->finish_rebuild(young_cset_regions, old_cset_regions, num_old);
|
||||
}
|
||||
|
||||
heap->free_set()->finish_rebuild(young_cset_regions, old_cset_regions, num_old);
|
||||
|
||||
// Set mark incomplete because the marking bitmaps have been reset except pinned regions.
|
||||
_generation->set_mark_incomplete();
|
||||
|
||||
|
||||
@ -815,10 +815,9 @@ void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) {
|
||||
ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset :
|
||||
ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
|
||||
ShenandoahHeapLocker locker(heap->lock());
|
||||
size_t young_cset_regions, old_cset_regions;
|
||||
|
||||
// We are preparing for evacuation. At this time, we ignore cset region tallies.
|
||||
size_t first_old, last_old, num_old;
|
||||
size_t young_cset_regions, old_cset_regions, first_old, last_old, num_old;
|
||||
_free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
|
||||
|
||||
if (heap->mode()->is_generational()) {
|
||||
|
||||
@ -61,7 +61,12 @@ ShenandoahGenerationalControlThread::ShenandoahGenerationalControlThread() :
|
||||
|
||||
void ShenandoahGenerationalControlThread::run_service() {
|
||||
|
||||
// This is the only instance of request. It is important that request.generation
|
||||
// does not change between a concurrent cycle failure and the start of a degenerated
|
||||
// cycle. We initialize it with the young generation to handle the pathological case
|
||||
// where the very first cycle is degenerated (some tests exercise this path).
|
||||
ShenandoahGCRequest request;
|
||||
request.generation = _heap->young_generation();
|
||||
while (!should_terminate()) {
|
||||
|
||||
// Figure out if we have pending requests.
|
||||
@ -77,12 +82,10 @@ void ShenandoahGenerationalControlThread::run_service() {
|
||||
|
||||
// If the cycle was cancelled, continue the next iteration to deal with it. Otherwise,
|
||||
// if there was no other cycle requested, cleanup and wait for the next request.
|
||||
if (!_heap->cancelled_gc()) {
|
||||
MonitorLocker ml(&_control_lock, Mutex::_no_safepoint_check_flag);
|
||||
if (_requested_gc_cause == GCCause::_no_gc) {
|
||||
set_gc_mode(ml, none);
|
||||
ml.wait();
|
||||
}
|
||||
MonitorLocker ml(&_control_lock, Mutex::_no_safepoint_check_flag);
|
||||
if (_requested_gc_cause == GCCause::_no_gc) {
|
||||
set_gc_mode(ml, none);
|
||||
ml.wait();
|
||||
}
|
||||
}
|
||||
|
||||
@ -96,8 +99,7 @@ void ShenandoahGenerationalControlThread::stop_service() {
|
||||
log_debug(gc, thread)("Stopping control thread");
|
||||
MonitorLocker ml(&_control_lock, Mutex::_no_safepoint_check_flag);
|
||||
_heap->cancel_gc(GCCause::_shenandoah_stop_vm);
|
||||
_requested_gc_cause = GCCause::_shenandoah_stop_vm;
|
||||
notify_cancellation(ml, GCCause::_shenandoah_stop_vm);
|
||||
notify_control_thread(ml, GCCause::_shenandoah_stop_vm);
|
||||
// We can't wait here because it may interfere with the active cycle's ability
|
||||
// to reach a safepoint (this runs on a java thread).
|
||||
}
|
||||
@ -105,29 +107,39 @@ void ShenandoahGenerationalControlThread::stop_service() {
|
||||
void ShenandoahGenerationalControlThread::check_for_request(ShenandoahGCRequest& request) {
|
||||
// Hold the lock while we read request cause and generation
|
||||
MonitorLocker ml(&_control_lock, Mutex::_no_safepoint_check_flag);
|
||||
if (_heap->cancelled_gc()) {
|
||||
// The previous request was cancelled. Either it was cancelled for an allocation
|
||||
// failure (degenerated cycle), or old marking was cancelled to run a young collection.
|
||||
// In either case, the correct generation for the next cycle can be determined by
|
||||
// the cancellation cause.
|
||||
request.cause = _heap->clear_cancellation(GCCause::_shenandoah_concurrent_gc);
|
||||
if (request.cause == GCCause::_shenandoah_concurrent_gc) {
|
||||
|
||||
log_debug(gc, thread)("cancelled cause: %s, requested cause: %s",
|
||||
GCCause::to_string(_heap->cancelled_cause()), GCCause::to_string(_requested_gc_cause));
|
||||
|
||||
request.cause = _requested_gc_cause;
|
||||
if (ShenandoahCollectorPolicy::is_allocation_failure(request.cause)) {
|
||||
if (_degen_point == ShenandoahGC::_degenerated_unset) {
|
||||
request.generation = _heap->young_generation();
|
||||
_degen_point = ShenandoahGC::_degenerated_outside_cycle;
|
||||
} else {
|
||||
assert(request.generation != nullptr, "Must know which generation to use for degenerated cycle");
|
||||
}
|
||||
} else {
|
||||
request.cause = _requested_gc_cause;
|
||||
if (request.cause == GCCause::_shenandoah_concurrent_gc) {
|
||||
// This is a regulator request. It is also possible that the regulator "canceled" an old mark,
|
||||
// so we can clear that here. This clear operation will only clear the cancellation if it is
|
||||
// a regulator request.
|
||||
_heap->clear_cancellation(GCCause::_shenandoah_concurrent_gc);
|
||||
}
|
||||
request.generation = _requested_generation;
|
||||
|
||||
// Only clear these if we made a request from them. In the case of a cancelled gc,
|
||||
// we do not want to inadvertently lose this pending request.
|
||||
_requested_gc_cause = GCCause::_no_gc;
|
||||
_requested_generation = nullptr;
|
||||
}
|
||||
|
||||
log_debug(gc, thread)("request.cause: %s, request.generation: %s",
|
||||
GCCause::to_string(request.cause), request.generation == nullptr ? "None" : request.generation->name());
|
||||
|
||||
_requested_gc_cause = GCCause::_no_gc;
|
||||
_requested_generation = nullptr;
|
||||
|
||||
if (request.cause == GCCause::_no_gc || request.cause == GCCause::_shenandoah_stop_vm) {
|
||||
return;
|
||||
}
|
||||
|
||||
assert(request.generation != nullptr, "request.generation cannot be null, cause is: %s", GCCause::to_string(request.cause));
|
||||
GCMode mode;
|
||||
if (ShenandoahCollectorPolicy::is_allocation_failure(request.cause)) {
|
||||
mode = prepare_for_allocation_failure_gc(request);
|
||||
@ -140,11 +152,9 @@ void ShenandoahGenerationalControlThread::check_for_request(ShenandoahGCRequest&
|
||||
}
|
||||
|
||||
ShenandoahGenerationalControlThread::GCMode ShenandoahGenerationalControlThread::prepare_for_allocation_failure_gc(ShenandoahGCRequest &request) {
|
||||
|
||||
if (_degen_point == ShenandoahGC::_degenerated_unset) {
|
||||
_degen_point = ShenandoahGC::_degenerated_outside_cycle;
|
||||
request.generation = _heap->young_generation();
|
||||
} else if (request.generation->is_old()) {
|
||||
// Important: not all paths update the request.generation. This is intentional.
|
||||
// A degenerated cycle must use the same generation carried over from the previous request.
|
||||
if (request.generation->is_old()) {
|
||||
// This means we degenerated during the young bootstrap for the old generation
|
||||
// cycle. The following degenerated cycle should therefore also be young.
|
||||
request.generation = _heap->young_generation();
|
||||
@ -588,6 +598,8 @@ bool ShenandoahGenerationalControlThread::check_cancellation_or_degen(Shenandoah
|
||||
if (ShenandoahCollectorPolicy::is_allocation_failure(_heap->cancelled_cause())) {
|
||||
assert(_degen_point == ShenandoahGC::_degenerated_unset,
|
||||
"Should not be set yet: %s", ShenandoahGC::degen_point_to_string(_degen_point));
|
||||
MonitorLocker ml(&_control_lock, Mutex::_no_safepoint_check_flag);
|
||||
_requested_gc_cause = _heap->cancelled_cause();
|
||||
_degen_point = point;
|
||||
log_debug(gc, thread)("Cancellation detected:, reason: %s, degen point: %s",
|
||||
GCCause::to_string(_heap->cancelled_cause()),
|
||||
@ -633,9 +645,7 @@ void ShenandoahGenerationalControlThread::service_stw_degenerated_cycle(const Sh
|
||||
|
||||
void ShenandoahGenerationalControlThread::request_gc(GCCause::Cause cause) {
|
||||
if (ShenandoahCollectorPolicy::is_allocation_failure(cause)) {
|
||||
// GC should already be cancelled. Here we are just notifying the control thread to
|
||||
// wake up and handle the cancellation request, so we don't need to set _requested_gc_cause.
|
||||
notify_cancellation(cause);
|
||||
notify_control_thread(cause);
|
||||
} else if (ShenandoahCollectorPolicy::should_handle_requested_gc(cause)) {
|
||||
handle_requested_gc(cause);
|
||||
}
|
||||
@ -653,7 +663,8 @@ bool ShenandoahGenerationalControlThread::request_concurrent_gc(ShenandoahGenera
|
||||
MonitorLocker ml(&_control_lock, Mutex::_no_safepoint_check_flag);
|
||||
if (gc_mode() == servicing_old) {
|
||||
if (!preempt_old_marking(generation)) {
|
||||
log_debug(gc, thread)("Cannot start young, old collection is not preemptible");
|
||||
// Global should be able to cause old collection to be abandoned
|
||||
log_debug(gc, thread)("Cannot start %s, old collection is not preemptible", generation->name());
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -661,7 +672,7 @@ bool ShenandoahGenerationalControlThread::request_concurrent_gc(ShenandoahGenera
|
||||
log_info(gc)("Preempting old generation mark to allow %s GC", generation->name());
|
||||
while (gc_mode() == servicing_old) {
|
||||
ShenandoahHeap::heap()->cancel_gc(GCCause::_shenandoah_concurrent_gc);
|
||||
notify_cancellation(ml, GCCause::_shenandoah_concurrent_gc);
|
||||
notify_control_thread(ml, GCCause::_shenandoah_concurrent_gc, generation);
|
||||
ml.wait();
|
||||
}
|
||||
return true;
|
||||
@ -695,21 +706,34 @@ void ShenandoahGenerationalControlThread::notify_control_thread(GCCause::Cause c
|
||||
|
||||
void ShenandoahGenerationalControlThread::notify_control_thread(MonitorLocker& ml, GCCause::Cause cause, ShenandoahGeneration* generation) {
|
||||
assert(_control_lock.is_locked(), "Request lock must be held here");
|
||||
log_debug(gc, thread)("Notify control (%s): %s, %s", gc_mode_name(gc_mode()), GCCause::to_string(cause), generation->name());
|
||||
_requested_gc_cause = cause;
|
||||
_requested_generation = generation;
|
||||
ml.notify();
|
||||
if (ShenandoahCollectorPolicy::is_allocation_failure(_requested_gc_cause)) {
|
||||
// We have already observed a request to handle an allocation failure. We cannot allow
|
||||
// another request (System.gc or regulator) to subvert the degenerated cycle.
|
||||
log_debug(gc, thread)("Not overwriting gc cause %s with %s", GCCause::to_string(_requested_gc_cause), GCCause::to_string(cause));
|
||||
} else {
|
||||
log_debug(gc, thread)("Notify control (%s): %s, %s", gc_mode_name(gc_mode()), GCCause::to_string(cause), generation->name());
|
||||
_requested_gc_cause = cause;
|
||||
_requested_generation = generation;
|
||||
ml.notify();
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahGenerationalControlThread::notify_cancellation(GCCause::Cause cause) {
|
||||
void ShenandoahGenerationalControlThread::notify_control_thread(GCCause::Cause cause) {
|
||||
MonitorLocker ml(&_control_lock, Mutex::_no_safepoint_check_flag);
|
||||
notify_cancellation(ml, cause);
|
||||
notify_control_thread(ml, cause);
|
||||
}
|
||||
|
||||
void ShenandoahGenerationalControlThread::notify_cancellation(MonitorLocker& ml, GCCause::Cause cause) {
|
||||
assert(_heap->cancelled_gc(), "GC should already be cancelled");
|
||||
log_debug(gc,thread)("Notify control (%s): %s", gc_mode_name(gc_mode()), GCCause::to_string(cause));
|
||||
ml.notify();
|
||||
void ShenandoahGenerationalControlThread::notify_control_thread(MonitorLocker& ml, GCCause::Cause cause) {
|
||||
assert(_control_lock.is_locked(), "Request lock must be held here");
|
||||
if (ShenandoahCollectorPolicy::is_allocation_failure(_requested_gc_cause)) {
|
||||
// We have already observed a request to handle an allocation failure. We cannot allow
|
||||
// another request (System.gc or regulator) to subvert the degenerated cycle.
|
||||
log_debug(gc, thread)("Not overwriting gc cause %s with %s", GCCause::to_string(_requested_gc_cause), GCCause::to_string(cause));
|
||||
} else {
|
||||
log_debug(gc, thread)("Notify control (%s): %s", gc_mode_name(gc_mode()), GCCause::to_string(cause));
|
||||
_requested_gc_cause = cause;
|
||||
ml.notify();
|
||||
}
|
||||
}
|
||||
|
||||
bool ShenandoahGenerationalControlThread::preempt_old_marking(ShenandoahGeneration* generation) {
|
||||
|
||||
@ -135,16 +135,13 @@ private:
|
||||
// Return printable name for the given gc mode.
|
||||
static const char* gc_mode_name(GCMode mode);
|
||||
|
||||
// Takes the request lock and updates the requested cause and generation, then notifies the control thread.
|
||||
// The overloaded variant should be used when the _control_lock is already held.
|
||||
// These notify the control thread after updating _requested_gc_cause and (optionally) _requested_generation.
|
||||
// Updating the requested generation is not necessary for allocation failures nor when stopping the thread.
|
||||
void notify_control_thread(GCCause::Cause cause);
|
||||
void notify_control_thread(MonitorLocker& ml, GCCause::Cause cause);
|
||||
void notify_control_thread(GCCause::Cause cause, ShenandoahGeneration* generation);
|
||||
void notify_control_thread(MonitorLocker& ml, GCCause::Cause cause, ShenandoahGeneration* generation);
|
||||
|
||||
// Notifies the control thread, but does not update the requested cause or generation.
|
||||
// The overloaded variant should be used when the _control_lock is already held.
|
||||
void notify_cancellation(GCCause::Cause cause);
|
||||
void notify_cancellation(MonitorLocker& ml, GCCause::Cause cause);
|
||||
|
||||
// Configure the heap to age objects and regions if the aging period has elapsed.
|
||||
void maybe_set_aging_cycle();
|
||||
|
||||
|
||||
@ -44,13 +44,13 @@ public:
|
||||
void post_initialize_heuristics() override;
|
||||
|
||||
static ShenandoahGenerationalHeap* heap() {
|
||||
assert(ShenandoahCardBarrier, "Should have card barrier to use genenrational heap");
|
||||
assert(ShenandoahCardBarrier, "Should have card barrier to use generational heap");
|
||||
CollectedHeap* heap = Universe::heap();
|
||||
return cast(heap);
|
||||
}
|
||||
|
||||
static ShenandoahGenerationalHeap* cast(CollectedHeap* heap) {
|
||||
assert(ShenandoahCardBarrier, "Should have card barrier to use genenrational heap");
|
||||
assert(ShenandoahCardBarrier, "Should have card barrier to use generational heap");
|
||||
return checked_cast<ShenandoahGenerationalHeap*>(heap);
|
||||
}
|
||||
|
||||
|
||||
@ -426,8 +426,6 @@ jint ShenandoahHeap::initialize() {
|
||||
_affiliations[i] = ShenandoahAffiliation::FREE;
|
||||
}
|
||||
_free_set = new ShenandoahFreeSet(this, _num_regions);
|
||||
|
||||
|
||||
post_initialize_heuristics();
|
||||
// We are initializing free set. We ignore cset region tallies.
|
||||
size_t young_cset_regions, old_cset_regions, first_old, last_old, num_old;
|
||||
@ -1658,7 +1656,7 @@ void ShenandoahHeap::verify(VerifyOption vo) {
|
||||
}
|
||||
}
|
||||
size_t ShenandoahHeap::tlab_capacity() const {
|
||||
return _free_set->capacity();
|
||||
return _free_set->capacity_not_holding_lock();
|
||||
}
|
||||
|
||||
class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
|
||||
@ -2138,7 +2136,7 @@ GCTracer* ShenandoahHeap::tracer() {
|
||||
}
|
||||
|
||||
size_t ShenandoahHeap::tlab_used() const {
|
||||
return _free_set->used();
|
||||
return _free_set->used_not_holding_lock();
|
||||
}
|
||||
|
||||
bool ShenandoahHeap::try_cancel_gc(GCCause::Cause cause) {
|
||||
@ -2528,8 +2526,7 @@ void ShenandoahHeap::rebuild_free_set(bool concurrent) {
|
||||
ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
|
||||
ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
|
||||
ShenandoahHeapLocker locker(lock());
|
||||
size_t young_cset_regions, old_cset_regions;
|
||||
size_t first_old_region, last_old_region, old_region_count;
|
||||
size_t young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count;
|
||||
_free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count);
|
||||
// If there are no old regions, first_old_region will be greater than last_old_region
|
||||
assert((first_old_region > last_old_region) ||
|
||||
@ -2548,13 +2545,14 @@ void ShenandoahHeap::rebuild_free_set(bool concurrent) {
|
||||
// The computation of bytes_of_allocation_runway_before_gc_trigger is quite conservative so consider all of this
|
||||
// available for transfer to old. Note that transfer of humongous regions does not impact available.
|
||||
ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
|
||||
size_t allocation_runway = gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions);
|
||||
size_t allocation_runway =
|
||||
gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions);
|
||||
gen_heap->compute_old_generation_balance(allocation_runway, old_cset_regions);
|
||||
|
||||
// Total old_available may have been expanded to hold anticipated promotions. We trigger if the fragmented available
|
||||
// memory represents more than 16 regions worth of data. Note that fragmentation may increase when we promote regular
|
||||
// regions in place when many of these regular regions have an abundant amount of available memory within them. Fragmentation
|
||||
// will decrease as promote-by-copy consumes the available memory within these partially consumed regions.
|
||||
// regions in place when many of these regular regions have an abundant amount of available memory within them.
|
||||
// Fragmentation will decrease as promote-by-copy consumes the available memory within these partially consumed regions.
|
||||
//
|
||||
// We consider old-gen to have excessive fragmentation if more than 12.5% of old-gen is free memory that resides
|
||||
// within partially consumed regions of memory.
|
||||
|
||||
@ -30,7 +30,7 @@
|
||||
|
||||
ShenandoahMetricsSnapshot::ShenandoahMetricsSnapshot(ShenandoahFreeSet* free_set)
|
||||
: _free_set(free_set)
|
||||
, _used_before(free_set->used())
|
||||
, _used_before(free_set->used_not_holding_lock())
|
||||
, _if_before(free_set->internal_fragmentation())
|
||||
, _ef_before(free_set->external_fragmentation()) {
|
||||
}
|
||||
@ -38,7 +38,6 @@ ShenandoahMetricsSnapshot::ShenandoahMetricsSnapshot(ShenandoahFreeSet* free_set
|
||||
bool ShenandoahMetricsSnapshot::is_good_progress() const {
|
||||
// Under the critical threshold?
|
||||
const size_t free_actual = _free_set->available();
|
||||
assert(free_actual != ShenandoahFreeSet::FreeSetUnderConstruction, "Avoid this race");
|
||||
|
||||
// ShenandoahCriticalFreeThreshold is expressed as a percentage. We multiply this percentage by 1/100th
|
||||
// of the soft max capacity to determine whether the available memory within the mutator partition of the
|
||||
@ -52,7 +51,7 @@ bool ShenandoahMetricsSnapshot::is_good_progress() const {
|
||||
}
|
||||
|
||||
// Freed up enough?
|
||||
const size_t used_after = _free_set->used();
|
||||
const size_t used_after = _free_set->used_not_holding_lock();
|
||||
const size_t progress_actual = (_used_before > used_after) ? _used_before - used_after : 0;
|
||||
const size_t progress_expected = ShenandoahHeapRegion::region_size_bytes();
|
||||
const bool prog_used = progress_actual >= progress_expected;
|
||||
|
||||
@ -412,9 +412,12 @@ void ShenandoahOldGeneration::prepare_regions_and_collection_set(bool concurrent
|
||||
ShenandoahGCPhase phase(concurrent ?
|
||||
ShenandoahPhaseTimings::final_rebuild_freeset :
|
||||
ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
|
||||
ShenandoahFreeSet* free_set = heap->free_set();
|
||||
ShenandoahHeapLocker locker(heap->lock());
|
||||
size_t young_trash_regions, old_trash_regions;
|
||||
size_t first_old, last_old, num_old;
|
||||
|
||||
// This is completion of old-gen marking. We rebuild in order to reclaim immediate garbage and to
|
||||
// prepare for subsequent mixed evacuations.
|
||||
size_t young_trash_regions, old_trash_regions, first_old, last_old, num_old;
|
||||
heap->free_set()->prepare_to_rebuild(young_trash_regions, old_trash_regions, first_old, last_old, num_old);
|
||||
// At the end of old-gen, we may find that we have reclaimed immediate garbage, allowing a longer allocation runway.
|
||||
// We may also find that we have accumulated canddiate regions for mixed evacuation. If so, we will want to expand
|
||||
|
||||
@ -149,6 +149,13 @@ bool ShenandoahRegulatorThread::start_global_cycle() const {
|
||||
|
||||
bool ShenandoahRegulatorThread::request_concurrent_gc(ShenandoahGeneration* generation) const {
|
||||
double now = os::elapsedTime();
|
||||
|
||||
// This call may find the control thread waiting on workers which have suspended
|
||||
// to allow a safepoint to run. If this regulator thread does not yield, the safepoint
|
||||
// will not run. The worker threads won't progress, the control thread won't progress,
|
||||
// and the regulator thread may never yield. Therefore, we leave the suspendible
|
||||
// thread set before making this call.
|
||||
SuspendibleThreadSetLeaver leaver;
|
||||
bool accepted = _control_thread->request_concurrent_gc(generation);
|
||||
if (LogTarget(Debug, gc, thread)::is_enabled() && accepted) {
|
||||
double wait_time = os::elapsedTime() - now;
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -32,6 +32,7 @@
|
||||
#include "jfr/recorder/repository/jfrEmergencyDump.hpp"
|
||||
#include "jfr/recorder/repository/jfrRepository.hpp"
|
||||
#include "jfr/recorder/service/jfrOptionSet.hpp"
|
||||
#include "jfr/recorder/service/jfrRecorderService.hpp"
|
||||
#include "jfr/support/jfrClassDefineEvent.hpp"
|
||||
#include "jfr/support/jfrKlassExtension.hpp"
|
||||
#include "jfr/support/jfrResolution.hpp"
|
||||
@ -43,6 +44,7 @@
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/javaThread.hpp"
|
||||
|
||||
|
||||
bool Jfr::is_enabled() {
|
||||
return JfrRecorder::is_enabled();
|
||||
}
|
||||
@ -153,9 +155,9 @@ void Jfr::on_resolution(const Method* caller, const Method* target, TRAPS) {
|
||||
}
|
||||
#endif
|
||||
|
||||
void Jfr::on_vm_shutdown(bool emit_old_object_samples, bool emit_event_shutdown, bool halt) {
|
||||
void Jfr::on_vm_shutdown(bool exception_handler /* false */, bool halt /* false */, bool oom /* false */) {
|
||||
if (!halt && JfrRecorder::is_recording()) {
|
||||
JfrEmergencyDump::on_vm_shutdown(emit_old_object_samples, emit_event_shutdown);
|
||||
JfrEmergencyDump::on_vm_shutdown(exception_handler, oom);
|
||||
}
|
||||
}
|
||||
|
||||
@ -173,6 +175,12 @@ bool Jfr::on_start_flight_recording_option(const JavaVMOption** option, char* de
|
||||
return JfrOptionSet::parse_start_flight_recording_option(option, delimiter);
|
||||
}
|
||||
|
||||
void Jfr::on_report_java_out_of_memory() {
|
||||
if (CrashOnOutOfMemoryError && JfrRecorder::is_recording()) {
|
||||
JfrRecorderService::emit_leakprofiler_events_on_oom();
|
||||
}
|
||||
}
|
||||
|
||||
#if INCLUDE_CDS
|
||||
void Jfr::on_restoration(const Klass* k, JavaThread* jt) {
|
||||
assert(k != nullptr, "invariant");
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -71,7 +71,7 @@ class Jfr : AllStatic {
|
||||
static void on_resolution(const Method* caller, const Method* target, TRAPS);
|
||||
static void on_java_thread_start(JavaThread* starter, JavaThread* startee);
|
||||
static void on_set_current_thread(JavaThread* jt, oop thread);
|
||||
static void on_vm_shutdown(bool emit_old_object_samples, bool emit_event_shutdown, bool halt = false);
|
||||
static void on_vm_shutdown(bool exception_handler = false, bool halt = false, bool oom = false);
|
||||
static void on_vm_error_report(outputStream* st);
|
||||
static bool on_flight_recorder_option(const JavaVMOption** option, char* delimiter);
|
||||
static bool on_start_flight_recording_option(const JavaVMOption** option, char* delimiter);
|
||||
@ -79,6 +79,7 @@ class Jfr : AllStatic {
|
||||
static void initialize_main_thread(JavaThread* jt);
|
||||
static bool has_sample_request(JavaThread* jt);
|
||||
static void check_and_process_sample_request(JavaThread* jt);
|
||||
static void on_report_java_out_of_memory();
|
||||
CDS_ONLY(static void on_restoration(const Klass* k, JavaThread* jt);)
|
||||
};
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -364,8 +364,7 @@ JVM_ENTRY_NO_ENV(void, jfr_set_force_instrumentation(JNIEnv* env, jclass jvm, jb
|
||||
JVM_END
|
||||
|
||||
NO_TRANSITION(void, jfr_emit_old_object_samples(JNIEnv* env, jclass jvm, jlong cutoff_ticks, jboolean emit_all, jboolean skip_bfs))
|
||||
JfrRecorderService service;
|
||||
service.emit_leakprofiler_events(cutoff_ticks, emit_all == JNI_TRUE, skip_bfs == JNI_TRUE);
|
||||
JfrRecorderService::emit_leakprofiler_events(cutoff_ticks, emit_all == JNI_TRUE, skip_bfs == JNI_TRUE);
|
||||
NO_TRANSITION_END
|
||||
|
||||
JVM_ENTRY_NO_ENV(void, jfr_exclude_thread(JNIEnv* env, jclass jvm, jobject t))
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -232,41 +232,50 @@ void JfrSamplerThread::task_stacktrace(JfrSampleRequestType type, JavaThread** l
|
||||
JavaThread* start = nullptr;
|
||||
elapsedTimer sample_time;
|
||||
sample_time.start();
|
||||
ThreadsListHandle tlh;
|
||||
// Resolve a sample session relative start position index into the thread list array.
|
||||
// In cases where the last sampled thread is null or not-null but stale, find_index() returns -1.
|
||||
_cur_index = tlh.list()->find_index_of_JavaThread(*last_thread);
|
||||
JavaThread* current = _cur_index != -1 ? *last_thread : nullptr;
|
||||
{
|
||||
/*
|
||||
* Take the Threads_lock for three purposes:
|
||||
*
|
||||
* 1) Avoid sampling right through a safepoint,
|
||||
* which could result in touching oops in case of virtual threads.
|
||||
* 2) Prevent JFR from issuing an epoch rotation while the sampler thread
|
||||
* is actively processing a thread in state native, as both threads are outside the safepoint protocol.
|
||||
* 3) Some operating systems (BSD / Mac) require a process lock when sending a signal with pthread_kill.
|
||||
* Holding the Threads_lock prevents a JavaThread from calling os::create_thread(), which also takes the process lock.
|
||||
* In a sense, we provide a coarse signal mask, so we can always send the resume signal.
|
||||
*/
|
||||
MutexLocker tlock(Threads_lock);
|
||||
ThreadsListHandle tlh;
|
||||
// Resolve a sample session relative start position index into the thread list array.
|
||||
// In cases where the last sampled thread is null or not-null but stale, find_index() returns -1.
|
||||
_cur_index = tlh.list()->find_index_of_JavaThread(*last_thread);
|
||||
JavaThread* current = _cur_index != -1 ? *last_thread : nullptr;
|
||||
|
||||
while (num_samples < sample_limit) {
|
||||
current = next_thread(tlh.list(), start, current);
|
||||
if (current == nullptr) {
|
||||
break;
|
||||
}
|
||||
if (is_excluded(current)) {
|
||||
continue;
|
||||
}
|
||||
if (start == nullptr) {
|
||||
start = current; // remember the thread where we started to attempt sampling
|
||||
}
|
||||
bool success;
|
||||
if (JAVA_SAMPLE == type) {
|
||||
success = sample_java_thread(current);
|
||||
} else {
|
||||
assert(type == NATIVE_SAMPLE, "invariant");
|
||||
success = sample_native_thread(current);
|
||||
}
|
||||
if (success) {
|
||||
num_samples++;
|
||||
}
|
||||
if (SafepointSynchronize::is_at_safepoint()) {
|
||||
// For _thread_in_native, we cannot get the Threads_lock.
|
||||
// For _thread_in_Java, well, there are none.
|
||||
break;
|
||||
while (num_samples < sample_limit) {
|
||||
current = next_thread(tlh.list(), start, current);
|
||||
if (current == nullptr) {
|
||||
break;
|
||||
}
|
||||
if (is_excluded(current)) {
|
||||
continue;
|
||||
}
|
||||
if (start == nullptr) {
|
||||
start = current; // remember the thread where we started to attempt sampling
|
||||
}
|
||||
bool success;
|
||||
if (JAVA_SAMPLE == type) {
|
||||
success = sample_java_thread(current);
|
||||
} else {
|
||||
assert(type == NATIVE_SAMPLE, "invariant");
|
||||
success = sample_native_thread(current);
|
||||
}
|
||||
if (success) {
|
||||
num_samples++;
|
||||
}
|
||||
}
|
||||
|
||||
*last_thread = current; // remember the thread we last attempted to sample
|
||||
}
|
||||
|
||||
*last_thread = current; // remember the thread we last attempted to sample
|
||||
sample_time.stop();
|
||||
log_trace(jfr)("JFR thread sampling done in %3.7f secs with %d java %d native samples",
|
||||
sample_time.seconds(), type == JAVA_SAMPLE ? num_samples : 0, type == NATIVE_SAMPLE ? num_samples : 0);
|
||||
@ -297,6 +306,7 @@ class OSThreadSampler : public SuspendedThreadTask {
|
||||
// Sampling a thread in state _thread_in_Java
|
||||
// involves a platform-specific thread suspend and CPU context retrieval.
|
||||
bool JfrSamplerThread::sample_java_thread(JavaThread* jt) {
|
||||
assert_lock_strong(Threads_lock);
|
||||
if (jt->thread_state() != _thread_in_Java) {
|
||||
return false;
|
||||
}
|
||||
@ -328,6 +338,7 @@ static JfrSamplerThread* _sampler_thread = nullptr;
|
||||
// without thread suspension and CPU context retrieval,
|
||||
// if we carefully order the loads of the thread state.
|
||||
bool JfrSamplerThread::sample_native_thread(JavaThread* jt) {
|
||||
assert_lock_strong(Threads_lock);
|
||||
if (jt->thread_state() != _thread_in_native) {
|
||||
return false;
|
||||
}
|
||||
@ -343,24 +354,14 @@ bool JfrSamplerThread::sample_native_thread(JavaThread* jt) {
|
||||
|
||||
SafepointMechanism::arm_local_poll_release(jt);
|
||||
|
||||
// Take the Threads_lock for two purposes:
|
||||
// 1) Avoid sampling through a safepoint which could result
|
||||
// in touching oops in case of virtual threads.
|
||||
// 2) Prevent JFR from issuing an epoch rotation while the sampler thread
|
||||
// is actively processing a thread in native, as both threads are now
|
||||
// outside the safepoint protocol.
|
||||
|
||||
// OrderAccess::fence() as part of acquiring the lock prevents loads from floating up.
|
||||
JfrMutexTryLock threads_lock(Threads_lock);
|
||||
|
||||
if (!threads_lock.acquired() || !jt->has_last_Java_frame()) {
|
||||
// Remove the native sample request and release the potentially waiting thread.
|
||||
JfrSampleMonitor jsm(tl);
|
||||
return false;
|
||||
// Separate the arming of the poll (above) from the reading of JavaThread state (below).
|
||||
if (UseSystemMemoryBarrier) {
|
||||
SystemMemoryBarrier::emit();
|
||||
} else {
|
||||
OrderAccess::fence();
|
||||
}
|
||||
|
||||
if (jt->thread_state() != _thread_in_native) {
|
||||
assert_lock_strong(Threads_lock);
|
||||
if (jt->thread_state() != _thread_in_native || !jt->has_last_Java_frame()) {
|
||||
JfrSampleMonitor jsm(tl);
|
||||
if (jsm.is_waiting()) {
|
||||
// The thread has already returned from native,
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -38,6 +38,8 @@
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
#include "runtime/vmOperations.hpp"
|
||||
#include "runtime/vmThread.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
||||
@ -460,15 +462,6 @@ static void release_locks(Thread* thread) {
|
||||
assert(thread != nullptr, "invariant");
|
||||
assert(!thread->is_Java_thread() || JavaThread::cast(thread)->thread_state() == _thread_in_vm, "invariant");
|
||||
|
||||
#ifdef ASSERT
|
||||
Mutex* owned_lock = thread->owned_locks();
|
||||
while (owned_lock != nullptr) {
|
||||
Mutex* next = owned_lock->next();
|
||||
owned_lock->unlock();
|
||||
owned_lock = next;
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
if (Threads_lock->owned_by_self()) {
|
||||
Threads_lock->unlock();
|
||||
}
|
||||
@ -550,17 +543,14 @@ class JavaThreadInVMAndNative : public StackObj {
|
||||
}
|
||||
};
|
||||
|
||||
static void post_events(bool emit_old_object_samples, bool emit_event_shutdown, Thread* thread) {
|
||||
if (emit_old_object_samples) {
|
||||
LeakProfiler::emit_events(max_jlong, false, false);
|
||||
}
|
||||
if (emit_event_shutdown) {
|
||||
static void post_events(bool exception_handler, bool oom, Thread * thread) {
|
||||
if (exception_handler) {
|
||||
EventShutdown e;
|
||||
e.set_reason("VM Error");
|
||||
e.set_reason(oom ? "CrashOnOutOfMemoryError" : "VM Error");
|
||||
e.commit();
|
||||
}
|
||||
EventDumpReason event;
|
||||
event.set_reason(emit_old_object_samples ? "Out of Memory" : "Crash");
|
||||
event.set_reason(exception_handler && oom ? "CrashOnOutOfMemoryError" : exception_handler ? "Crash" : "Out of Memory");
|
||||
event.set_recordingId(-1);
|
||||
event.commit();
|
||||
}
|
||||
@ -594,20 +584,40 @@ static bool guard_reentrancy() {
|
||||
return false;
|
||||
}
|
||||
|
||||
void JfrEmergencyDump::on_vm_shutdown(bool emit_old_object_samples, bool emit_event_shutdown) {
|
||||
void JfrEmergencyDump::on_vm_shutdown(bool exception_handler, bool oom) {
|
||||
if (!guard_reentrancy()) {
|
||||
return;
|
||||
}
|
||||
|
||||
Thread* const thread = Thread::current_or_null_safe();
|
||||
assert(thread != nullptr, "invariant");
|
||||
if (thread->is_Watcher_thread()) {
|
||||
log_info(jfr, system)("The Watcher thread crashed so no jfr emergency dump will be generated.");
|
||||
return;
|
||||
}
|
||||
|
||||
// Ensure a JavaThread is _thread_in_vm when we make this call
|
||||
JavaThreadInVMAndNative jtivm(thread);
|
||||
post_events(exception_handler, oom, thread);
|
||||
|
||||
if (thread->is_Watcher_thread()) {
|
||||
// We cannot attempt an emergency dump using the Watcher thread
|
||||
// because we rely on the WatcherThread task "is_error_reported()",
|
||||
// to exit the VM after a hardcoded timeout, should the relatively
|
||||
// risky operation of an emergency dump fail (deadlock, livelock).
|
||||
log_warning(jfr, system)
|
||||
("The Watcher thread crashed so no jfr emergency dump will be generated.");
|
||||
return;
|
||||
}
|
||||
|
||||
if (thread->is_VM_thread()) {
|
||||
const VM_Operation* const operation = VMThread::vm_operation();
|
||||
if (operation != nullptr && operation->type() == VM_Operation::VMOp_JFROldObject) {
|
||||
// We will not be able to issue a rotation because the rotation lock
|
||||
// is held by the JFR Recorder Thread that issued the VM_Operation.
|
||||
log_warning(jfr, system)
|
||||
("The VM Thread crashed as part of emitting leak profiler events so no jfr emergency dump will be generated.");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
release_locks(thread);
|
||||
post_events(emit_old_object_samples, emit_event_shutdown, thread);
|
||||
|
||||
// if JavaThread, transition to _thread_in_native to issue a final flushpoint
|
||||
NoHandleMark nhm;
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -39,7 +39,7 @@ class JfrEmergencyDump : AllStatic {
|
||||
static const char* chunk_path(const char* repository_path);
|
||||
static void on_vm_error(const char* repository_path);
|
||||
static void on_vm_error_report(outputStream* st, const char* repository_path);
|
||||
static void on_vm_shutdown(bool emit_old_object_samples, bool emit_event_shutdown);
|
||||
static void on_vm_shutdown(bool exception_handler, bool oom);
|
||||
};
|
||||
|
||||
#endif // SHARE_JFR_RECORDER_REPOSITORY_JFREMERGENCYDUMP_HPP
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2013, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2013, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -34,7 +34,8 @@
|
||||
(MSGBIT(MSG_START)) | \
|
||||
(MSGBIT(MSG_CLONE_IN_MEMORY)) | \
|
||||
(MSGBIT(MSG_VM_ERROR)) | \
|
||||
(MSGBIT(MSG_FLUSHPOINT)) \
|
||||
(MSGBIT(MSG_FLUSHPOINT)) | \
|
||||
(MSGBIT(MSG_EMIT_LEAKP_REFCHAINS)) \
|
||||
)
|
||||
|
||||
static JfrPostBox* _instance = nullptr;
|
||||
@ -165,7 +166,7 @@ void JfrPostBox::notify_waiters() {
|
||||
assert(JfrMsg_lock->owned_by_self(), "incrementing _msg_handled_serial is protected by JfrMsg_lock.");
|
||||
// Update made visible on release of JfrMsg_lock via fence instruction in Monitor::IUnlock.
|
||||
++_msg_handled_serial;
|
||||
JfrMsg_lock->notify();
|
||||
JfrMsg_lock->notify_all();
|
||||
}
|
||||
|
||||
// safeguard to ensure no threads are left waiting
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2013, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2013, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -43,6 +43,7 @@ enum JFR_Msg {
|
||||
MSG_SHUTDOWN,
|
||||
MSG_VM_ERROR,
|
||||
MSG_FLUSHPOINT,
|
||||
MSG_EMIT_LEAKP_REFCHAINS,
|
||||
MSG_NO_OF_MSGS
|
||||
};
|
||||
|
||||
@ -51,23 +52,25 @@ enum JFR_Msg {
|
||||
*
|
||||
* Synchronous messages (posting thread waits for message completion):
|
||||
*
|
||||
* MSG_CLONE_IN_MEMORY (0) ; MSGBIT(MSG_CLONE_IN_MEMORY) == (1 << 0) == 0x1
|
||||
* MSG_START(1) ; MSGBIT(MSG_START) == (1 << 0x1) == 0x2
|
||||
* MSG_STOP (2) ; MSGBIT(MSG_STOP) == (1 << 0x2) == 0x4
|
||||
* MSG_ROTATE (3) ; MSGBIT(MSG_ROTATE) == (1 << 0x3) == 0x8
|
||||
* MSG_VM_ERROR (8) ; MSGBIT(MSG_VM_ERROR) == (1 << 0x8) == 0x100
|
||||
* MSG_FLUSHPOINT (9) ; MSGBIT(MSG_FLUSHPOINT) == (1 << 0x9) == 0x200
|
||||
* MSG_CLONE_IN_MEMORY (0) ; MSGBIT(MSG_CLONE_IN_MEMORY) == (1 << 0) == 0x1
|
||||
* MSG_START(1) ; MSGBIT(MSG_START) == (1 << 0x1) == 0x2
|
||||
* MSG_STOP (2) ; MSGBIT(MSG_STOP) == (1 << 0x2) == 0x4
|
||||
* MSG_ROTATE (3) ; MSGBIT(MSG_ROTATE) == (1 << 0x3) == 0x8
|
||||
* MSG_VM_ERROR (8) ; MSGBIT(MSG_VM_ERROR) == (1 << 0x8) == 0x100
|
||||
* MSG_FLUSHPOINT (9) ; MSGBIT(MSG_FLUSHPOINT) == (1 << 0x9) == 0x200
|
||||
* MSG_EMIT_LEAKP_REFCHAINS (10); MSGBIT(MSG_EMIT_LEAKP_REFCHAINS) == (1 << 0xa) == 0x400
|
||||
*
|
||||
* Asynchronous messages (posting thread returns immediately upon deposit):
|
||||
*
|
||||
* MSG_FULLBUFFER (4) ; MSGBIT(MSG_FULLBUFFER) == (1 << 0x4) == 0x10
|
||||
* MSG_CHECKPOINT (5) ; MSGBIT(CHECKPOINT) == (1 << 0x5) == 0x20
|
||||
* MSG_WAKEUP (6) ; MSGBIT(WAKEUP) == (1 << 0x6) == 0x40
|
||||
* MSG_SHUTDOWN (7) ; MSGBIT(MSG_SHUTDOWN) == (1 << 0x7) == 0x80
|
||||
* MSG_FULLBUFFER (4) ; MSGBIT(MSG_FULLBUFFER) == (1 << 0x4) == 0x10
|
||||
* MSG_CHECKPOINT (5) ; MSGBIT(CHECKPOINT) == (1 << 0x5) == 0x20
|
||||
* MSG_WAKEUP (6) ; MSGBIT(WAKEUP) == (1 << 0x6) == 0x40
|
||||
* MSG_SHUTDOWN (7) ; MSGBIT(MSG_SHUTDOWN) == (1 << 0x7) == 0x80
|
||||
*/
|
||||
|
||||
class JfrPostBox : public JfrCHeapObj {
|
||||
friend class JfrRecorder;
|
||||
friend class JfrRecorderService;
|
||||
public:
|
||||
void post(JFR_Msg msg);
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -55,6 +55,7 @@
|
||||
#include "runtime/safepoint.hpp"
|
||||
#include "runtime/vmOperations.hpp"
|
||||
#include "runtime/vmThread.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
|
||||
// incremented on each flushpoint
|
||||
static u8 flushpoint_id = 0;
|
||||
@ -391,6 +392,7 @@ class JfrSafepointWriteVMOperation : public VM_Operation {
|
||||
JfrRecorderService::JfrRecorderService() :
|
||||
_checkpoint_manager(JfrCheckpointManager::instance()),
|
||||
_chunkwriter(JfrRepository::chunkwriter()),
|
||||
_post_box(JfrPostBox::instance()),
|
||||
_repository(JfrRepository::instance()),
|
||||
_stack_trace_repository(JfrStackTraceRepository::instance()),
|
||||
_storage(JfrStorage::instance()),
|
||||
@ -670,17 +672,173 @@ void JfrRecorderService::evaluate_chunk_size_for_rotation() {
|
||||
JfrChunkRotation::evaluate(_chunkwriter);
|
||||
}
|
||||
|
||||
void JfrRecorderService::emit_leakprofiler_events(int64_t cutoff_ticks, bool emit_all, bool skip_bfs) {
|
||||
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(JavaThread::current()));
|
||||
// Take the rotation lock to exclude flush() during event emits. This is because event emit
|
||||
// also creates a number checkpoint events. Those checkpoint events require a future typeset checkpoint
|
||||
// event for completeness, i.e. to be generated before being flushed to a segment.
|
||||
// The upcoming flush() or rotation() after event emit completes this typeset checkpoint
|
||||
// and serializes all event emit checkpoint events to the same segment.
|
||||
JfrRotationLock lock;
|
||||
// Take the rotation lock before the transition.
|
||||
JavaThread* current_thread = JavaThread::current();
|
||||
MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, current_thread));
|
||||
ThreadInVMfromNative transition(current_thread);
|
||||
LeakProfiler::emit_events(cutoff_ticks, emit_all, skip_bfs);
|
||||
// LeakProfiler event serialization support.
|
||||
|
||||
struct JfrLeakProfilerEmitRequest {
|
||||
int64_t cutoff_ticks;
|
||||
bool emit_all;
|
||||
bool skip_bfs;
|
||||
bool oom;
|
||||
};
|
||||
|
||||
typedef GrowableArrayCHeap<JfrLeakProfilerEmitRequest, mtTracing> JfrLeakProfilerEmitRequestQueue;
|
||||
static JfrLeakProfilerEmitRequestQueue* _queue = nullptr;
|
||||
constexpr const static int64_t _no_path_to_gc_roots = 0;
|
||||
static bool _oom_emit_request_posted = false;
|
||||
static bool _oom_emit_request_delivered = false;
|
||||
|
||||
static inline bool exclude_paths_to_gc_roots(int64_t cutoff_ticks) {
|
||||
return cutoff_ticks <= _no_path_to_gc_roots;
|
||||
}
|
||||
|
||||
static void enqueue(const JfrLeakProfilerEmitRequest& request) {
|
||||
assert(JfrRotationLock::is_owner(), "invariant");
|
||||
if (_queue == nullptr) {
|
||||
_queue = new JfrLeakProfilerEmitRequestQueue(4);
|
||||
}
|
||||
assert(_queue != nullptr, "invariant");
|
||||
assert(!_oom_emit_request_posted, "invariant");
|
||||
if (request.oom) {
|
||||
_oom_emit_request_posted = true;
|
||||
}
|
||||
_queue->append(request);
|
||||
}
|
||||
|
||||
static JfrLeakProfilerEmitRequest dequeue() {
|
||||
assert(JfrRotationLock::is_owner(), "invariant");
|
||||
assert(_queue != nullptr, "invariant");
|
||||
assert(_queue->is_nonempty(), "invariant");
|
||||
const JfrLeakProfilerEmitRequest& request = _queue->first();
|
||||
_queue->remove_at(0);
|
||||
return request;
|
||||
}
|
||||
|
||||
// This version of emit excludes path-to-gc-roots, i.e. it skips reference chains.
|
||||
static void emit_leakprofiler_events(bool emit_all, bool skip_bfs, JavaThread* jt) {
|
||||
assert(jt != nullptr, "invariant");
|
||||
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(jt));
|
||||
// Take the rotation lock to exclude flush() during event emits. This is because the event emit operation
|
||||
// also creates a number of checkpoint events. Those checkpoint events require a future typeset checkpoint
|
||||
// event for completeness, i.e., to be generated before being flushed to a segment.
|
||||
// The upcoming flush() or rotation() after event emit completes this typeset checkpoint
|
||||
// and serializes all checkpoint events to the same segment.
|
||||
JfrRotationLock lock;
|
||||
// Take the rotation lock before the thread transition, to avoid blocking safepoints.
|
||||
if (_oom_emit_request_posted) {
|
||||
// A request to emit leakprofiler events in response to CrashOnOutOfMemoryError
|
||||
// is pending or has already been completed. We are about to crash at any time now.
|
||||
assert(CrashOnOutOfMemoryError, "invariant");
|
||||
return;
|
||||
}
|
||||
MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, jt));
|
||||
ThreadInVMfromNative transition(jt);
|
||||
// Since we are not requesting path-to-gc-roots, i.e., reference chains, we need not issue a VM_Operation.
|
||||
// Therefore, we can let the requesting thread process the request directly, since it already holds the requisite lock.
|
||||
LeakProfiler::emit_events(_no_path_to_gc_roots, emit_all, skip_bfs);
|
||||
}
|
||||
|
||||
void JfrRecorderService::transition_and_post_leakprofiler_emit_msg(JavaThread* jt) {
|
||||
assert(jt != nullptr, "invariant");
|
||||
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(jt);)
|
||||
assert(!JfrRotationLock::is_owner(), "invariant");
|
||||
// Transition to _thread_in_VM and post a synchronous message to the JFR Recorder Thread
|
||||
// for it to process our enqueued request, which includes paths-to-gc-roots, i.e., reference chains.
|
||||
MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, jt));
|
||||
ThreadInVMfromNative transition(jt);
|
||||
_post_box.post(MSG_EMIT_LEAKP_REFCHAINS);
|
||||
}
|
||||
|
||||
// This version of emit includes path-to-gc-roots, i.e., it includes in the request traversing of reference chains.
|
||||
// Traversing reference chains is performed as part of a VM_Operation, and we initiate it from the JFR Recorder Thread.
|
||||
// Because multiple threads can concurrently report_on_java_out_of_memory(), having them all post a synchronous JFR msg,
|
||||
// they rendezvous at a safepoint in a convenient state, ThreadBlockInVM. This mechanism prevents any thread from racing past
|
||||
// this point and begin executing VMError::report_and_die(), until at least one oom request has been delivered.
|
||||
void JfrRecorderService::emit_leakprofiler_events_paths_to_gc_roots(int64_t cutoff_ticks,
|
||||
bool emit_all,
|
||||
bool skip_bfs,
|
||||
bool oom,
|
||||
JavaThread* jt) {
|
||||
assert(jt != nullptr, "invariant");
|
||||
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(jt);)
|
||||
assert(!exclude_paths_to_gc_roots(cutoff_ticks), "invariant");
|
||||
|
||||
{
|
||||
JfrRotationLock lock;
|
||||
// Take the rotation lock to read and post a request for the JFR Recorder Thread.
|
||||
if (_oom_emit_request_posted) {
|
||||
if (!oom) {
|
||||
// A request to emit leakprofiler events in response to CrashOnOutOfMemoryError
|
||||
// is pending or has already been completed. We are about to crash at any time now.
|
||||
assert(CrashOnOutOfMemoryError, "invariant");
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
assert(!_oom_emit_request_posted, "invariant");
|
||||
JfrLeakProfilerEmitRequest request = { cutoff_ticks, emit_all, skip_bfs, oom };
|
||||
enqueue(request);
|
||||
}
|
||||
}
|
||||
JfrRecorderService service;
|
||||
service.transition_and_post_leakprofiler_emit_msg(jt);
|
||||
}
|
||||
|
||||
// Leakprofiler serialization request, the jdk.jfr.internal.JVM.emitOldObjectSamples() Java entry point.
|
||||
void JfrRecorderService::emit_leakprofiler_events(int64_t cutoff_ticks,
|
||||
bool emit_all,
|
||||
bool skip_bfs) {
|
||||
JavaThread* const jt = JavaThread::current();
|
||||
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(jt);)
|
||||
if (exclude_paths_to_gc_roots(cutoff_ticks)) {
|
||||
::emit_leakprofiler_events(emit_all, skip_bfs, jt);
|
||||
return;
|
||||
}
|
||||
emit_leakprofiler_events_paths_to_gc_roots(cutoff_ticks, emit_all, skip_bfs, /* oom */ false, jt);
|
||||
}
|
||||
|
||||
// Leakprofiler serialization request, the report_on_java_out_of_memory VM entry point.
|
||||
void JfrRecorderService::emit_leakprofiler_events_on_oom() {
|
||||
assert(CrashOnOutOfMemoryError, "invariant");
|
||||
if (EventOldObjectSample::is_enabled()) {
|
||||
JavaThread* const jt = JavaThread::current();
|
||||
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(jt);)
|
||||
ThreadToNativeFromVM transition(jt);
|
||||
emit_leakprofiler_events_paths_to_gc_roots(max_jlong, false, false, /* oom */ true, jt);
|
||||
}
|
||||
}
|
||||
|
||||
// The worker routine for the JFR Recorder Thread when processing MSG_EMIT_LEAKP_REFCHAINS messages.
|
||||
void JfrRecorderService::emit_leakprofiler_events() {
|
||||
JavaThread* const jt = JavaThread::current();
|
||||
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(jt));
|
||||
// Take the rotation lock before the transition.
|
||||
JfrRotationLock lock;
|
||||
if (_oom_emit_request_delivered) {
|
||||
// A request to emit leakprofiler events in response to CrashOnOutOfMemoryError
|
||||
// has already been completed. We are about to crash at any time now.
|
||||
assert(_oom_emit_request_posted, "invariant");
|
||||
assert(CrashOnOutOfMemoryError, "invariant");
|
||||
return;
|
||||
}
|
||||
|
||||
assert(_queue->is_nonempty(), "invariant");
|
||||
|
||||
{
|
||||
MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, jt));
|
||||
ThreadInVMfromNative transition(jt);
|
||||
while (_queue->is_nonempty()) {
|
||||
const JfrLeakProfilerEmitRequest& request = dequeue();
|
||||
LeakProfiler::emit_events(request.cutoff_ticks, request.emit_all, request.skip_bfs);
|
||||
if (_oom_emit_request_posted && request.oom) {
|
||||
assert(CrashOnOutOfMemoryError, "invariant");
|
||||
_oom_emit_request_delivered = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If processing involved an out-of-memory request, issue an immediate flush operation.
|
||||
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(jt));
|
||||
if (_chunkwriter.is_valid() && _oom_emit_request_delivered) {
|
||||
invoke_flush();
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,19 +27,23 @@
|
||||
|
||||
#include "jfr/utilities/jfrAllocation.hpp"
|
||||
|
||||
class JavaThread;
|
||||
class JfrCheckpointManager;
|
||||
class JfrChunkWriter;
|
||||
class JfrPostBox;
|
||||
class JfrRepository;
|
||||
class JfrStackTraceRepository;
|
||||
class JfrStorage;
|
||||
class JfrStringPool;
|
||||
|
||||
class JfrRecorderService : public StackObj {
|
||||
friend class Jfr;
|
||||
friend class JfrSafepointClearVMOperation;
|
||||
friend class JfrSafepointWriteVMOperation;
|
||||
private:
|
||||
JfrCheckpointManager& _checkpoint_manager;
|
||||
JfrChunkWriter& _chunkwriter;
|
||||
JfrPostBox& _post_box;
|
||||
JfrRepository& _repository;
|
||||
JfrStackTraceRepository& _stack_trace_repository;
|
||||
JfrStorage& _storage;
|
||||
@ -64,6 +68,14 @@ class JfrRecorderService : public StackObj {
|
||||
void invoke_safepoint_write();
|
||||
void post_safepoint_write();
|
||||
|
||||
void transition_and_post_leakprofiler_emit_msg(JavaThread* jt);
|
||||
|
||||
static void emit_leakprofiler_events_on_oom();
|
||||
static void emit_leakprofiler_events_paths_to_gc_roots(int64_t cutoff_ticks,
|
||||
bool emit_all,
|
||||
bool skip_bfs,
|
||||
bool oom,
|
||||
JavaThread* jt);
|
||||
public:
|
||||
JfrRecorderService();
|
||||
void start();
|
||||
@ -72,8 +84,12 @@ class JfrRecorderService : public StackObj {
|
||||
void flushpoint();
|
||||
void process_full_buffers();
|
||||
void evaluate_chunk_size_for_rotation();
|
||||
void emit_leakprofiler_events(int64_t cutoff_ticks, bool emit_all, bool skip_bfs);
|
||||
void emit_leakprofiler_events();
|
||||
|
||||
static bool is_recording();
|
||||
static void emit_leakprofiler_events(int64_t cutoff_ticks,
|
||||
bool emit_all,
|
||||
bool skip_bfs);
|
||||
};
|
||||
|
||||
#endif // SHARE_JFR_RECORDER_SERVICE_JFRRECORDERSERVICE_HPP
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -44,6 +44,7 @@ void recorderthread_entry(JavaThread* thread, JavaThread* unused) {
|
||||
#define ROTATE (msgs & (MSGBIT(MSG_ROTATE)|MSGBIT(MSG_STOP)))
|
||||
#define FLUSHPOINT (msgs & (MSGBIT(MSG_FLUSHPOINT)))
|
||||
#define PROCESS_FULL_BUFFERS (msgs & (MSGBIT(MSG_ROTATE)|MSGBIT(MSG_STOP)|MSGBIT(MSG_FULLBUFFER)))
|
||||
#define LEAKPROFILER_REFCHAINS (msgs & MSGBIT(MSG_EMIT_LEAKP_REFCHAINS))
|
||||
|
||||
JfrPostBox& post_box = JfrRecorderThreadEntry::post_box();
|
||||
log_debug(jfr, system)("Recorder thread STARTED");
|
||||
@ -70,6 +71,9 @@ void recorderthread_entry(JavaThread* thread, JavaThread* unused) {
|
||||
if (PROCESS_FULL_BUFFERS) {
|
||||
service.process_full_buffers();
|
||||
}
|
||||
if (LEAKPROFILER_REFCHAINS) {
|
||||
service.emit_leakprofiler_events();
|
||||
}
|
||||
// Check amount of data written to chunk already
|
||||
// if it warrants asking for a new chunk.
|
||||
service.evaluate_chunk_size_for_rotation();
|
||||
@ -98,5 +102,5 @@ void recorderthread_entry(JavaThread* thread, JavaThread* unused) {
|
||||
#undef ROTATE
|
||||
#undef FLUSHPOINT
|
||||
#undef PROCESS_FULL_BUFFERS
|
||||
#undef SCAVENGE
|
||||
#undef LEAKPROFILER_REFCHAINS
|
||||
}
|
||||
|
||||
@ -1031,6 +1031,15 @@ bool CallNode::is_call_to_arraycopystub() const {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool CallNode::is_call_to_multianewarray_stub() const {
|
||||
if (_name != nullptr &&
|
||||
strstr(_name, "multianewarray") != nullptr &&
|
||||
strstr(_name, "C2 runtime") != nullptr) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
uint CallJavaNode::size_of() const { return sizeof(*this); }
|
||||
bool CallJavaNode::cmp( const Node &n ) const {
|
||||
|
||||
@ -758,6 +758,7 @@ public:
|
||||
virtual uint match_edge(uint idx) const;
|
||||
|
||||
bool is_call_to_arraycopystub() const;
|
||||
bool is_call_to_multianewarray_stub() const;
|
||||
|
||||
virtual void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {}
|
||||
|
||||
|
||||
@ -1112,8 +1112,6 @@ Node *ModINode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
if( !ti->is_con() ) return nullptr;
|
||||
jint con = ti->get_con();
|
||||
|
||||
Node *hook = new Node(1);
|
||||
|
||||
// First, special check for modulo 2^k-1
|
||||
if( con >= 0 && con < max_jint && is_power_of_2(con+1) ) {
|
||||
uint k = exact_log2(con+1); // Extract k
|
||||
@ -1129,7 +1127,9 @@ Node *ModINode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
Node *x = in(1); // Value being mod'd
|
||||
Node *divisor = in(2); // Also is mask
|
||||
|
||||
hook->init_req(0, x); // Add a use to x to prevent him from dying
|
||||
// Add a use to x to prevent it from dying
|
||||
Node* hook = new Node(1);
|
||||
hook->init_req(0, x);
|
||||
// Generate code to reduce X rapidly to nearly 2^k-1.
|
||||
for( int i = 0; i < trip_count; i++ ) {
|
||||
Node *xl = phase->transform( new AndINode(x,divisor) );
|
||||
@ -1185,6 +1185,7 @@ Node *ModINode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
}
|
||||
|
||||
// Save in(1) so that it cannot be changed or deleted
|
||||
Node* hook = new Node(1);
|
||||
hook->init_req(0, in(1));
|
||||
|
||||
// Divide using the transform from DivI to MulL
|
||||
@ -1407,8 +1408,6 @@ Node *ModLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
if( !tl->is_con() ) return nullptr;
|
||||
jlong con = tl->get_con();
|
||||
|
||||
Node *hook = new Node(1);
|
||||
|
||||
// Expand mod
|
||||
if(con >= 0 && con < max_jlong && is_power_of_2(con + 1)) {
|
||||
uint k = log2i_exact(con + 1); // Extract k
|
||||
@ -1426,13 +1425,15 @@ Node *ModLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
Node *x = in(1); // Value being mod'd
|
||||
Node *divisor = in(2); // Also is mask
|
||||
|
||||
hook->init_req(0, x); // Add a use to x to prevent him from dying
|
||||
// Add a use to x to prevent it from dying
|
||||
Node* hook = new Node(1);
|
||||
hook->init_req(0, x);
|
||||
// Generate code to reduce X rapidly to nearly 2^k-1.
|
||||
for( int i = 0; i < trip_count; i++ ) {
|
||||
Node *xl = phase->transform( new AndLNode(x,divisor) );
|
||||
Node *xh = phase->transform( new RShiftLNode(x,phase->intcon(k)) ); // Must be signed
|
||||
x = phase->transform( new AddLNode(xh,xl) );
|
||||
hook->set_req(0, x); // Add a use to x to prevent him from dying
|
||||
hook->set_req(0, x); // Add a use to x to prevent it from dying
|
||||
}
|
||||
|
||||
// Generate sign-fixup code. Was original value positive?
|
||||
@ -1482,6 +1483,8 @@ Node *ModLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
}
|
||||
|
||||
// Save in(1) so that it cannot be changed or deleted
|
||||
// Add a use to x to prevent him from dying
|
||||
Node* hook = new Node(1);
|
||||
hook->init_req(0, in(1));
|
||||
|
||||
// Divide using the transform from DivL to MulL
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1058,6 +1058,39 @@ void ConnectionGraph::updates_after_load_split(Node* data_phi, Node* previous_lo
|
||||
// "new_load" might actually be a constant, parameter, etc.
|
||||
if (new_load->is_Load()) {
|
||||
Node* new_addp = new_load->in(MemNode::Address);
|
||||
|
||||
// If new_load is a Load but not from an AddP, it means that the load is folded into another
|
||||
// load. And since this load is not from a field, we cannot create a unique type for it.
|
||||
// For example:
|
||||
//
|
||||
// if (b) {
|
||||
// Holder h1 = new Holder();
|
||||
// Object o = ...;
|
||||
// h.o = o.getClass();
|
||||
// } else {
|
||||
// Holder h2 = ...;
|
||||
// }
|
||||
// Holder h = Phi(h1, h2);
|
||||
// Object r = h.o;
|
||||
//
|
||||
// Then, splitting r through the merge point results in:
|
||||
//
|
||||
// if (b) {
|
||||
// Holder h1 = new Holder();
|
||||
// Object o = ...;
|
||||
// h.o = o.getClass();
|
||||
// Object o1 = h.o;
|
||||
// } else {
|
||||
// Holder h2 = ...;
|
||||
// Object o2 = h2.o;
|
||||
// }
|
||||
// Object r = Phi(o1, o2);
|
||||
//
|
||||
// In this case, o1 is folded to o.getClass() which is a Load but not from an AddP, but from
|
||||
// an OopHandle that is loaded from the Klass of o.
|
||||
if (!new_addp->is_AddP()) {
|
||||
continue;
|
||||
}
|
||||
Node* base = get_addp_base(new_addp);
|
||||
|
||||
// The base might not be something that we can create an unique
|
||||
@ -2066,8 +2099,7 @@ void ConnectionGraph::add_call_node(CallNode* call) {
|
||||
// Use bytecode estimator to record whether the call's return value escapes.
|
||||
ciMethod* meth = call->as_CallJava()->method();
|
||||
if (meth == nullptr) {
|
||||
const char* name = call->as_CallStaticJava()->_name;
|
||||
assert(strncmp(name, "C2 Runtime multianewarray", 25) == 0, "TODO: add failed case check");
|
||||
assert(call->as_CallStaticJava()->is_call_to_multianewarray_stub(), "TODO: add failed case check");
|
||||
// Returns a newly allocated non-escaped object.
|
||||
add_java_object(call, PointsToNode::NoEscape);
|
||||
set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray"));
|
||||
@ -2775,8 +2807,7 @@ int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) {
|
||||
assert(pta->arraycopy_dst() || alloc->as_CallStaticJava(), "sanity");
|
||||
#ifdef ASSERT
|
||||
if (!pta->arraycopy_dst() && alloc->as_CallStaticJava()->method() == nullptr) {
|
||||
const char* name = alloc->as_CallStaticJava()->_name;
|
||||
assert(strncmp(name, "C2 Runtime multianewarray", 25) == 0, "sanity");
|
||||
assert(alloc->as_CallStaticJava()->is_call_to_multianewarray_stub(), "sanity");
|
||||
}
|
||||
#endif
|
||||
// Non-escaped allocation returned from Java or runtime call have unknown values in fields.
|
||||
|
||||
@ -3913,7 +3913,6 @@ const Type* SCMemProjNode::Value(PhaseGVN* phase) const
|
||||
LoadStoreNode::LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required )
|
||||
: Node(required),
|
||||
_type(rt),
|
||||
_adr_type(at),
|
||||
_barrier_data(0)
|
||||
{
|
||||
init_req(MemNode::Control, c );
|
||||
@ -3921,6 +3920,7 @@ LoadStoreNode::LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const Ty
|
||||
init_req(MemNode::Address, adr);
|
||||
init_req(MemNode::ValueIn, val);
|
||||
init_class_id(Class_LoadStore);
|
||||
DEBUG_ONLY(_adr_type = at; adr_type();)
|
||||
}
|
||||
|
||||
//------------------------------Value-----------------------------------------
|
||||
@ -3944,6 +3944,11 @@ const Type* LoadStoreNode::Value(PhaseGVN* phase) const {
|
||||
return bottom_type();
|
||||
}
|
||||
|
||||
const TypePtr* LoadStoreNode::adr_type() const {
|
||||
const TypePtr* cross_check = DEBUG_ONLY(_adr_type) NOT_DEBUG(nullptr);
|
||||
return MemNode::calculate_adr_type(in(MemNode::Address)->bottom_type(), cross_check);
|
||||
}
|
||||
|
||||
uint LoadStoreNode::ideal_reg() const {
|
||||
return _type->ideal_reg();
|
||||
}
|
||||
|
||||
@ -797,11 +797,6 @@ public:
|
||||
virtual int Opcode() const;
|
||||
virtual bool is_CFG() const { return false; }
|
||||
virtual const Type *bottom_type() const {return Type::MEMORY;}
|
||||
virtual const TypePtr *adr_type() const {
|
||||
Node* ctrl = in(0);
|
||||
if (ctrl == nullptr) return nullptr; // node is dead
|
||||
return ctrl->in(MemNode::Memory)->adr_type();
|
||||
}
|
||||
virtual uint ideal_reg() const { return 0;} // memory projections don't have a register
|
||||
virtual const Type* Value(PhaseGVN* phase) const;
|
||||
#ifndef PRODUCT
|
||||
@ -814,9 +809,11 @@ public:
|
||||
class LoadStoreNode : public Node {
|
||||
private:
|
||||
const Type* const _type; // What kind of value is loaded?
|
||||
const TypePtr* _adr_type; // What kind of memory is being addressed?
|
||||
uint8_t _barrier_data; // Bit field with barrier information
|
||||
virtual uint size_of() const; // Size is bigger
|
||||
#ifdef ASSERT
|
||||
const TypePtr* _adr_type; // What kind of memory is being addressed?
|
||||
#endif // ASSERT
|
||||
public:
|
||||
LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required );
|
||||
virtual bool depends_only_on_test() const { return false; }
|
||||
@ -824,7 +821,7 @@ public:
|
||||
|
||||
virtual const Type *bottom_type() const { return _type; }
|
||||
virtual uint ideal_reg() const;
|
||||
virtual const class TypePtr *adr_type() const { return _adr_type; } // returns bottom_type of address
|
||||
virtual const TypePtr* adr_type() const;
|
||||
virtual const Type* Value(PhaseGVN* phase) const;
|
||||
|
||||
bool result_not_used() const;
|
||||
|
||||
@ -762,26 +762,36 @@ bool PhaseGVN::is_dominator_helper(Node *d, Node *n, bool linear_only) {
|
||||
//------------------------------dead_loop_check--------------------------------
|
||||
// Check for a simple dead loop when a data node references itself directly
|
||||
// or through an other data node excluding cons and phis.
|
||||
void PhaseGVN::dead_loop_check( Node *n ) {
|
||||
// Phi may reference itself in a loop
|
||||
if (n != nullptr && !n->is_dead_loop_safe() && !n->is_CFG()) {
|
||||
// Do 2 levels check and only data inputs.
|
||||
bool no_dead_loop = true;
|
||||
uint cnt = n->req();
|
||||
for (uint i = 1; i < cnt && no_dead_loop; i++) {
|
||||
Node *in = n->in(i);
|
||||
if (in == n) {
|
||||
no_dead_loop = false;
|
||||
} else if (in != nullptr && !in->is_dead_loop_safe()) {
|
||||
uint icnt = in->req();
|
||||
for (uint j = 1; j < icnt && no_dead_loop; j++) {
|
||||
if (in->in(j) == n || in->in(j) == in)
|
||||
no_dead_loop = false;
|
||||
}
|
||||
void PhaseGVN::dead_loop_check(Node* n) {
|
||||
// Phi may reference itself in a loop.
|
||||
if (n == nullptr || n->is_dead_loop_safe() || n->is_CFG()) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Do 2 levels check and only data inputs.
|
||||
for (uint i = 1; i < n->req(); i++) {
|
||||
Node* in = n->in(i);
|
||||
if (in == n) {
|
||||
n->dump_bfs(100, nullptr, "");
|
||||
fatal("Dead loop detected, node references itself: %s (%d)",
|
||||
n->Name(), n->_idx);
|
||||
}
|
||||
|
||||
if (in == nullptr || in->is_dead_loop_safe()) {
|
||||
continue;
|
||||
}
|
||||
for (uint j = 1; j < in->req(); j++) {
|
||||
if (in->in(j) == n) {
|
||||
n->dump_bfs(100, nullptr, "");
|
||||
fatal("Dead loop detected, node input references current node: %s (%d) -> %s (%d)",
|
||||
in->Name(), in->_idx, n->Name(), n->_idx);
|
||||
}
|
||||
if (in->in(j) == in) {
|
||||
n->dump_bfs(100, nullptr, "");
|
||||
fatal("Dead loop detected, node input references itself: %s (%d)",
|
||||
in->Name(), in->_idx);
|
||||
}
|
||||
}
|
||||
if (!no_dead_loop) { n->dump_bfs(100, nullptr, ""); }
|
||||
assert(no_dead_loop, "dead loop detected");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2007, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2007, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -2482,6 +2482,11 @@ static bool can_subword_truncate(Node* in, const Type* type) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Since casts specifically change the type of a node, stay on the safe side and do not truncate them.
|
||||
if (in->is_ConstraintCast()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Cannot be truncated:
|
||||
switch (opc) {
|
||||
case Op_AbsI:
|
||||
|
||||
@ -75,6 +75,7 @@
|
||||
// v.release_store(x) -> void
|
||||
// v.release_store_fence(x) -> void
|
||||
// v.compare_exchange(x, y [, o]) -> T
|
||||
// v.compare_set(x, y [, o]) -> bool
|
||||
// v.exchange(x [, o]) -> T
|
||||
//
|
||||
// (2) All atomic types are default constructible.
|
||||
@ -267,6 +268,11 @@ public:
|
||||
return AtomicAccess::cmpxchg(value_ptr(), compare_value, new_value, order);
|
||||
}
|
||||
|
||||
bool compare_set(T compare_value, T new_value,
|
||||
atomic_memory_order order = memory_order_conservative) {
|
||||
return compare_exchange(compare_value, new_value, order) == compare_value;
|
||||
}
|
||||
|
||||
T exchange(T new_value,
|
||||
atomic_memory_order order = memory_order_conservative) {
|
||||
return AtomicAccess::xchg(this->value_ptr(), new_value, order);
|
||||
@ -479,6 +485,13 @@ public:
|
||||
order));
|
||||
}
|
||||
|
||||
bool compare_set(T compare_value, T new_value,
|
||||
atomic_memory_order order = memory_order_conservative) {
|
||||
return _value.compare_set(decay(compare_value),
|
||||
decay(new_value),
|
||||
order);
|
||||
}
|
||||
|
||||
T exchange(T new_value, atomic_memory_order order = memory_order_conservative) {
|
||||
return recover(_value.exchange(decay(new_value), order));
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user