8377715: Thawing frame can undo deoptimization

Co-authored-by: Richard Reingruber <rrich@openjdk.org>
Reviewed-by: rrich, fyang, dlong
This commit is contained in:
Patricio Chilano Mateo 2026-04-23 21:36:27 +00:00
parent 8db8d274a2
commit 3bbb0d9f00
7 changed files with 297 additions and 22 deletions

View File

@ -540,6 +540,8 @@ template<typename FKind> frame ThawBase::new_stack_frame(const frame& hf, frame&
intptr_t* frame_sp = caller.sp() - fsize;
if ((bottom && argsize > 0) || caller.is_interpreted_frame()) {
assert(!_should_patch_caller_pc, "");
_should_patch_caller_pc = caller.is_interpreted_frame();
frame_sp -= argsize + frame::metadata_words_at_top;
frame_sp = align_down(frame_sp, frame::alignment_in_bytes);
caller.set_sp(frame_sp + fsize);

View File

@ -193,6 +193,7 @@ static void verify_continuation(oop continuation) { Continuation::debug_verify_c
static void do_deopt_after_thaw(JavaThread* thread);
static bool do_verify_after_thaw(JavaThread* thread, stackChunkOop chunk, outputStream* st);
static bool verify_deopt_state(const frame& f);
static void log_frames(JavaThread* thread);
static void log_frames_after_thaw(JavaThread* thread, ContinuationWrapper& cont, intptr_t* sp);
static void print_frame_layout(const frame& f, bool callee_complete, outputStream* st = tty);
@ -2054,10 +2055,12 @@ protected:
intptr_t* _fastpath;
bool _barriers;
bool _preempted_case;
bool _should_patch_caller_pc;
bool _process_args_at_top;
intptr_t* _top_unextended_sp_before_thaw;
int _align_size;
DEBUG_ONLY(intptr_t* _top_stack_address);
DEBUG_ONLY(intptr_t* _top_stack_address;)
DEBUG_ONLY(address _caller_raw_pc;)
// Only used for preemption on ObjectLocker
ObjectMonitor* _init_lock;
@ -2479,6 +2482,7 @@ NOINLINE intptr_t* Thaw<ConfigT>::thaw_slow(stackChunkOop chunk, Continuation::t
}
frame caller; // the thawed caller on the stack
_should_patch_caller_pc = false;
recurse_thaw(heap_frame, caller, num_frames, _preempted_case);
finish_thaw(caller); // caller is now the topmost thawed frame
_cont.write();
@ -2561,6 +2565,8 @@ void ThawBase::finalize_thaw(frame& entry, int argsize) {
assert(entry.sp() == _cont.entrySP(), "");
assert(Continuation::is_continuation_enterSpecial(entry), "");
assert(_cont.is_entry_frame(entry), "");
assert(entry.pc() == entry.raw_pc(), "");
DEBUG_ONLY(_caller_raw_pc = entry.pc();)
}
inline void ThawBase::before_thaw_java_frame(const frame& hf, const frame& caller, bool bottom, int num_frame) {
@ -2590,10 +2596,12 @@ inline void ThawBase::patch(frame& f, const frame& caller, bool bottom) {
if (bottom) {
ContinuationHelper::Frame::patch_pc(caller, _cont.is_empty() ? caller.pc()
: StubRoutines::cont_returnBarrier());
} else {
// caller might have been deoptimized during thaw but we've overwritten the return address when copying f from the heap.
// If the caller is not deoptimized, pc is unchanged.
} else if (_should_patch_caller_pc) {
// Caller was deoptimized during thaw but we've overwritten the return address when copying f from the heap.
// Also, on some platforms, if the caller is interpreted but the callee not we also need to patch.
assert(caller.is_deoptimized_frame() PPC64_ONLY(|| caller.is_interpreted_frame()), "");
ContinuationHelper::Frame::patch_pc(caller, caller.raw_pc());
_should_patch_caller_pc = false;
}
patch_pd(f, caller);
@ -2604,6 +2612,7 @@ inline void ThawBase::patch(frame& f, const frame& caller, bool bottom) {
assert(!bottom || !_cont.is_empty() || Continuation::is_continuation_entry_frame(f, nullptr), "");
assert(!bottom || (_cont.is_empty() != Continuation::is_cont_barrier_frame(f)), "");
assert(!caller.is_compiled_frame() || verify_deopt_state(caller), "");
}
void ThawBase::clear_bitmap_bits(address start, address end) {
@ -2805,6 +2814,10 @@ NOINLINE void ThawBase::recurse_thaw_interpreted_frame(const frame& hf, frame& c
}
DEBUG_ONLY(after_thaw_java_frame(f, is_bottom_frame);)
DEBUG_ONLY(address return_pc = ContinuationHelper::InterpretedFrame::return_pc(f);)
assert(return_pc == _caller_raw_pc || (is_bottom_frame && return_pc == StubRoutines::cont_returnBarrier()), "wrong return pc");
assert(f.pc() == f.raw_pc(), "");
DEBUG_ONLY(_caller_raw_pc = f.pc();)
caller = f;
}
@ -2855,6 +2868,7 @@ void ThawBase::recurse_thaw_compiled_frame(const frame& hf, frame& caller, int n
assert(!f.is_deoptimized_frame(), "");
if (hf.is_deoptimized_frame()) {
maybe_set_fastpath(f.sp());
f.set_deoptimized();
} else if (_thread->is_interp_only_mode()
|| (stub_caller && f.cb()->as_nmethod()->is_marked_for_deoptimization())) {
// The caller of the safepoint stub when the continuation is preempted is not at a call instruction, and so
@ -2868,6 +2882,8 @@ void ThawBase::recurse_thaw_compiled_frame(const frame& hf, frame& caller, int n
assert(f.is_deoptimized_frame(), "");
assert(ContinuationHelper::Frame::is_deopt_return(f.raw_pc(), f), "");
maybe_set_fastpath(f.sp());
assert(!_should_patch_caller_pc, "");
_should_patch_caller_pc = true;
}
if (!is_bottom_frame) {
@ -2881,6 +2897,9 @@ void ThawBase::recurse_thaw_compiled_frame(const frame& hf, frame& caller, int n
}
DEBUG_ONLY(after_thaw_java_frame(f, is_bottom_frame);)
DEBUG_ONLY(address return_pc = ContinuationHelper::CompiledFrame::return_pc(f);)
assert(return_pc == _caller_raw_pc || (is_bottom_frame && return_pc == StubRoutines::cont_returnBarrier()), "wrong return pc");
DEBUG_ONLY(_caller_raw_pc = f.raw_pc();)
caller = f;
}
@ -2930,6 +2949,7 @@ void ThawBase::recurse_thaw_stub_frame(const frame& hf, frame& caller, int num_f
_cont.tail()->fix_thawed_frame(caller, &map);
DEBUG_ONLY(after_thaw_java_frame(f, false /*is_bottom_frame*/);)
assert(ContinuationHelper::StubFrame::return_pc(f) == _caller_raw_pc, "wrong return pc");
caller = f;
}
@ -2979,6 +2999,7 @@ void ThawBase::recurse_thaw_native_frame(const frame& hf, frame& caller, int num
_cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance_no_args());
DEBUG_ONLY(after_thaw_java_frame(f, false /* bottom */);)
assert(ContinuationHelper::NativeFrame::return_pc(f) == _caller_raw_pc, "wrong return pc");
caller = f;
}
@ -3023,8 +3044,7 @@ void ThawBase::finish_thaw(frame& f) {
}
void ThawBase::push_return_frame(const frame& f) { // see generate_cont_thaw
assert(!f.is_compiled_frame() || f.is_deoptimized_frame() == f.cb()->as_nmethod()->is_deopt_pc(f.raw_pc()), "");
assert(!f.is_compiled_frame() || f.is_deoptimized_frame() == (f.pc() != f.raw_pc()), "");
assert(!f.is_compiled_frame() || verify_deopt_state(f), "");
LogTarget(Trace, continuations) lt;
if (lt.develop_is_enabled()) {
@ -3170,6 +3190,14 @@ static bool do_verify_after_thaw(JavaThread* thread, stackChunkOop chunk, output
return true;
}
static bool verify_deopt_state(const frame& f) {
nmethod* nm = f.cb()->as_nmethod();
assert(f.is_deoptimized_frame() == nm->is_deopt_pc(f.raw_pc()), "");
assert(f.is_deoptimized_frame() == (f.pc() != f.raw_pc()), "");
assert(f.is_deoptimized_frame() == nm->is_deopt_pc(ContinuationHelper::Frame::real_pc(f)), "");
return true;
}
static void log_frames(JavaThread* thread) {
const static int show_entry_callers = 3;
LogTarget(Trace, continuations) lt;

View File

@ -1133,22 +1133,6 @@ void frame::oops_upcall_do(OopClosure* f, const RegisterMap* map) const {
_cb->as_upcall_stub()->oops_do(f, *this);
}
bool frame::is_deoptimized_frame() const {
assert(_deopt_state != unknown, "not answerable");
if (_deopt_state == is_deoptimized) {
return true;
}
/* This method only checks if the frame is deoptimized
* as in return address being patched.
* It doesn't care if the OP that we return to is a
* deopt instruction */
/*if (_cb != nullptr && _cb->is_nmethod()) {
return NativeDeoptInstruction::is_deopt_at(_pc);
}*/
return false;
}
void frame::oops_do_internal(OopClosure* f, NMethodClosure* cf,
DerivedOopClosure* df, DerivedPointerIterationMode derived_mode,
const RegisterMap* map, bool use_interpreter_oop_map_cache) const {

View File

@ -214,6 +214,9 @@ class frame {
// tells whether this frame can be deoptimized
bool can_be_deoptimized() const;
// used by virtual thread thaw code to fix deopt state
inline void set_deoptimized();
// the frame size in machine words
inline int frame_size() const;

View File

@ -84,6 +84,19 @@ inline address frame::get_deopt_original_pc() const {
return nullptr;
}
inline bool frame::is_deoptimized_frame() const {
assert(_deopt_state != unknown, "not answerable");
return _deopt_state == is_deoptimized;
}
inline void frame::set_deoptimized() {
assert(is_compiled_frame(), "invalid operation");
assert(_cb == CodeCache::find_blob(_pc), "invalid _pc");
DEBUG_ONLY(nmethod* nm = _cb->as_nmethod();)
assert(!nm->is_deopt_pc(_pc) && nm->get_original_pc(this) == _pc, "wrong _pc");
_deopt_state = is_deoptimized;
}
template <typename RegisterMapT>
inline address frame::oopmapreg_to_location(VMReg reg, const RegisterMapT* reg_map) const {
if (reg->is_reg()) {

View File

@ -0,0 +1,85 @@
/*
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8377715
* @summary Test that thawing deoptimized frame preserves the deopt status
* @requires vm.continuations
* @library /test/lib /test/hotspot/jtreg
* @run main/othervm/native -agentlib:DeoptimizedFrame DeoptimizedFrame
*/
import jdk.test.lib.Asserts;
public class DeoptimizedFrame {
private static final Object lock = new Object();
private static A receiver = new A();
private static volatile int result;
private static native int setupReferences(Thread t, Object o);
private static native void waitForVThread();
private static native void notifyVThread();
public static void foo() {
synchronized (lock) {
result = receiver.m();
}
}
public static void main(String[] args) throws Exception {
warmUp();
Asserts.assertTrue(receiver.m() == 1, "unexpected value=" + receiver.m());
Thread vthread = Thread.ofVirtual().unstarted(() -> foo());
int res = setupReferences(vthread, lock);
Asserts.assertTrue(res == 0, "error setting references");
synchronized (lock) {
vthread.start();
waitForVThread();
receiver = new B();
notifyVThread();
}
vthread.join();
Asserts.assertTrue(result == 3, "unexpected result=" + result);
}
private static void warmUp() throws Exception {
for (int i = 0; i < 30_000; i++) {
foo();
}
}
static class A {
int m() {
return 1;
}
}
static class B extends A {
int m() {
return 3;
}
}
}

View File

@ -0,0 +1,160 @@
/*
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include <stdio.h>
#include <string.h>
#include "jvmti.h"
#include "jni.h"
extern "C" {
static jvmtiEnv *jvmti = nullptr;
static jthread test_vthread = nullptr;
static jobject test_monitor = nullptr;
static jrawMonitorID agent_monitor = nullptr;
static bool called_contended_enter = false;
class RawMonitorLocker {
private:
jvmtiEnv* _jvmti;
JNIEnv* _jni;
jrawMonitorID _monitor;
void check_jvmti_status(JNIEnv* jni, jvmtiError err, const char* msg) {
if (err != JVMTI_ERROR_NONE) {
jni->FatalError(msg);
}
}
public:
RawMonitorLocker(jvmtiEnv *jvmti, JNIEnv* jni, jrawMonitorID monitor): _jvmti(jvmti), _jni(jni), _monitor(monitor) {
check_jvmti_status(_jni, _jvmti->RawMonitorEnter(_monitor), "Fatal Error in RawMonitorEnter.");
}
~RawMonitorLocker() {
check_jvmti_status(_jni, _jvmti->RawMonitorExit(_monitor), "Fatal Error in RawMonitorExit.");
}
void wait() {
check_jvmti_status(_jni, _jvmti->RawMonitorWait(_monitor, 0), "Fatal Error in RawMonitorWait.");
}
void notify_all() {
check_jvmti_status(_jni, _jvmti->RawMonitorNotifyAll(_monitor), "Fatal Error in RawMonitorNotifyAll.");
}
};
JNIEXPORT void JNICALL
MonitorContendedEnter(jvmtiEnv *jvmti, JNIEnv *jni, jthread vthread, jobject monitor) {
if (!jni->IsSameObject(test_vthread, vthread)) {
printf("Thread is not the required one\n");
return;
}
if (!jni->IsSameObject(test_monitor, monitor)) {
printf("Monitor is not the required one\n");
return;
}
RawMonitorLocker rml(jvmti, jni, agent_monitor);
called_contended_enter = true;
rml.notify_all();
rml.wait();
}
static
jint Agent_Initialize(JavaVM *jvm, char *options, void *reserved) {
jint res;
jvmtiError err;
jvmtiCapabilities caps;
jvmtiEventCallbacks callbacks;
printf("Agent_OnLoad started\n");
res = jvm->GetEnv((void **)&jvmti, JVMTI_VERSION);
if (res != JNI_OK || jvmti == nullptr) {
printf("Agent_OnLoad: Error in GetEnv: %d\n", res);
return JNI_ERR;
}
memset(&caps, 0, sizeof(caps));
caps.can_generate_monitor_events = 1;
err = jvmti->AddCapabilities(&caps);
if (err != JVMTI_ERROR_NONE) {
printf("Agent_OnLoad: Error in JVMTI AddCapabilities: %d\n", err);
return JNI_ERR;
}
memset(&callbacks, 0, sizeof(callbacks));
callbacks.MonitorContendedEnter = &MonitorContendedEnter;
err = jvmti->SetEventCallbacks(&callbacks, sizeof(jvmtiEventCallbacks));
if (err != JVMTI_ERROR_NONE) {
printf("Agent_OnLoad: Error in JVMTI SetEventCallbacks: %d\n", err);
return JNI_ERR;
}
err = jvmti->SetEventNotificationMode(JVMTI_ENABLE, JVMTI_EVENT_MONITOR_CONTENDED_ENTER, nullptr);
if (err != JVMTI_ERROR_NONE) {
printf("Agent_OnLoad: Error in JVMTI SetEventNotificationMode: %d\n", err);
return JNI_ERR;
}
err = jvmti->CreateRawMonitor("agent sync monitor", &agent_monitor);
if (err != JVMTI_ERROR_NONE) {
printf("Agent_OnLoad: Error in JVMTI CreateRawMonitor: %d\n", err);
return JNI_ERR;
}
return JNI_OK;
}
JNIEXPORT jint JNICALL
Agent_OnLoad(JavaVM *jvm, char *options, void *reserved) {
return Agent_Initialize(jvm, options, reserved);
}
JNIEXPORT jint JNICALL
Java_DeoptimizedFrame_setupReferences(JNIEnv *jni, jclass cls, jthread vthread, jobject monitor) {
test_vthread = (jthread)jni->NewGlobalRef(vthread);
test_monitor = jni->NewGlobalRef(monitor);
if (test_vthread == nullptr || test_monitor == nullptr) {
printf("GlobalRef null");
return JNI_ERR;
}
return JNI_OK;
}
JNIEXPORT void JNICALL
Java_DeoptimizedFrame_waitForVThread(JNIEnv *jni, jclass cls) {
RawMonitorLocker rml(jvmti, jni, agent_monitor);
while (!called_contended_enter) {
rml.wait();
}
}
JNIEXPORT void JNICALL
Java_DeoptimizedFrame_notifyVThread(JNIEnv *jni, jclass cls) {
RawMonitorLocker rml(jvmti, jni, agent_monitor);
rml.notify_all();
}
} // extern "C"