mirror of
https://github.com/openjdk/jdk.git
synced 2026-01-28 12:09:14 +00:00
8351159: Remaining cleanups in cpu/x86 after 32-bit x86 removal
Reviewed-by: stefank, kvn
This commit is contained in:
parent
80fcfaf41a
commit
795bf9f6d1
@ -120,27 +120,6 @@ void AbstractInterpreter::layout_activation(Method* method,
|
||||
method->method_holder()->java_mirror();
|
||||
}
|
||||
|
||||
#ifndef _LP64
|
||||
int AbstractInterpreter::BasicType_as_index(BasicType type) {
|
||||
int i = 0;
|
||||
switch (type) {
|
||||
case T_BOOLEAN: i = 0; break;
|
||||
case T_CHAR : i = 1; break;
|
||||
case T_BYTE : i = 2; break;
|
||||
case T_SHORT : i = 3; break;
|
||||
case T_INT : // fall through
|
||||
case T_LONG : // fall through
|
||||
case T_VOID : i = 4; break;
|
||||
case T_FLOAT : i = 5; break; // have to treat float and double separately for SSE
|
||||
case T_DOUBLE : i = 6; break;
|
||||
case T_OBJECT : // fall through
|
||||
case T_ARRAY : i = 7; break;
|
||||
default : ShouldNotReachHere();
|
||||
}
|
||||
assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds");
|
||||
return i;
|
||||
}
|
||||
#else
|
||||
int AbstractInterpreter::BasicType_as_index(BasicType type) {
|
||||
int i = 0;
|
||||
switch (type) {
|
||||
@ -161,7 +140,6 @@ int AbstractInterpreter::BasicType_as_index(BasicType type) {
|
||||
"index out of bounds");
|
||||
return i;
|
||||
}
|
||||
#endif // _LP64
|
||||
|
||||
// How much stack a method activation needs in words.
|
||||
int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
|
||||
@ -173,11 +151,7 @@ int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
|
||||
const int overhead_size =
|
||||
-(frame::interpreter_frame_initial_sp_offset) + entry_size;
|
||||
|
||||
#ifndef _LP64
|
||||
const int stub_code = 4; // see generate_call_stub
|
||||
#else
|
||||
const int stub_code = frame::entry_frame_after_call_words;
|
||||
#endif
|
||||
|
||||
const int method_stack = (method->max_locals() + method->max_stack()) *
|
||||
Interpreter::stackElementWords;
|
||||
|
||||
@ -2935,7 +2935,6 @@ void Assembler::mov(Register dst, Register src) {
|
||||
}
|
||||
|
||||
void Assembler::movapd(XMMRegister dst, Address src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
||||
InstructionMark im(this);
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
|
||||
attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
|
||||
@ -8071,7 +8070,6 @@ void Assembler::vandps(XMMRegister dst, XMMRegister nds, Address src, int vector
|
||||
}
|
||||
|
||||
void Assembler::orpd(XMMRegister dst, XMMRegister src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
||||
InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
|
||||
attributes.set_rex_vex_w_reverted();
|
||||
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
|
||||
|
||||
@ -45,21 +45,13 @@ define_pd_global(intx, ConditionalMoveLimit, 3);
|
||||
define_pd_global(intx, FreqInlineSize, 325);
|
||||
define_pd_global(intx, MinJumpTableSize, 10);
|
||||
define_pd_global(intx, LoopPercentProfileLimit, 10);
|
||||
#ifdef AMD64
|
||||
define_pd_global(intx, InteriorEntryAlignment, 16);
|
||||
define_pd_global(size_t, NewSizeThreadIncrease, ScaleForWordSize(4*K));
|
||||
define_pd_global(size_t, NewSizeThreadIncrease, ScaleForWordSize(4*K));
|
||||
define_pd_global(intx, LoopUnrollLimit, 60);
|
||||
// InitialCodeCacheSize derived from specjbb2000 run.
|
||||
define_pd_global(size_t, InitialCodeCacheSize, 2496*K); // Integral multiple of CodeCacheExpansionSize
|
||||
define_pd_global(size_t, CodeCacheExpansionSize, 64*K);
|
||||
#else
|
||||
define_pd_global(intx, InteriorEntryAlignment, 4);
|
||||
define_pd_global(size_t, NewSizeThreadIncrease, 4*K);
|
||||
define_pd_global(intx, LoopUnrollLimit, 50); // Design center runs on 1.3.1
|
||||
// InitialCodeCacheSize derived from specjbb2000 run.
|
||||
define_pd_global(size_t, InitialCodeCacheSize, 2304*K); // Integral multiple of CodeCacheExpansionSize
|
||||
define_pd_global(size_t, CodeCacheExpansionSize, 32*K);
|
||||
#endif // AMD64
|
||||
|
||||
define_pd_global(intx, RegisterCostAreaRatio, 16000);
|
||||
|
||||
// Peephole and CISC spilling both break the graph, and so makes the
|
||||
|
||||
@ -33,14 +33,6 @@ extern void reg_mask_init();
|
||||
|
||||
void Compile::pd_compiler2_init() {
|
||||
guarantee(CodeEntryAlignment >= InteriorEntryAlignment, "" );
|
||||
// QQQ presumably all 64bit cpu's support this. Seems like the ifdef could
|
||||
// simply be left out.
|
||||
#ifndef AMD64
|
||||
if (!VM_Version::supports_cmov()) {
|
||||
ConditionalMoveLimit = 0;
|
||||
}
|
||||
#endif // AMD64
|
||||
|
||||
if (UseAVX < 3) {
|
||||
int delta = XMMRegister::max_slots_per_register * XMMRegister::number_of_registers;
|
||||
int bottom = ConcreteRegisterImpl::max_fpr;
|
||||
|
||||
@ -61,8 +61,7 @@ address CompiledDirectCall::emit_to_interp_stub(MacroAssembler *masm, address ma
|
||||
#undef __
|
||||
|
||||
int CompiledDirectCall::to_interp_stub_size() {
|
||||
return NOT_LP64(10) // movl; jmp
|
||||
LP64_ONLY(15); // movq (1+1+8); jmp (1+4)
|
||||
return 15; // movq (1+1+8); jmp (1+4)
|
||||
}
|
||||
|
||||
int CompiledDirectCall::to_trampoline_stub_size() {
|
||||
|
||||
@ -23,8 +23,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#ifdef _LP64
|
||||
|
||||
#include "memory/metaspace.hpp"
|
||||
#include "oops/compressedKlass.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
@ -54,5 +52,3 @@ char* CompressedKlassPointers::reserve_address_space_for_compressed_classes(size
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
#endif // _LP64
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,19 +28,11 @@
|
||||
#include OS_CPU_HEADER(copy)
|
||||
|
||||
static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) {
|
||||
#ifdef AMD64
|
||||
julong* to = (julong*) tohw;
|
||||
julong v = ((julong) value << 32) | value;
|
||||
while (count-- > 0) {
|
||||
*to++ = v;
|
||||
}
|
||||
#else
|
||||
juint* to = (juint*)tohw;
|
||||
count *= HeapWordSize / BytesPerInt;
|
||||
while (count-- > 0) {
|
||||
*to++ = value;
|
||||
}
|
||||
#endif // AMD64
|
||||
}
|
||||
|
||||
static void pd_fill_to_aligned_words(HeapWord* tohw, size_t count, juint value) {
|
||||
@ -60,52 +52,10 @@ static void pd_zero_to_bytes(void* to, size_t count) {
|
||||
}
|
||||
|
||||
static void pd_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
|
||||
#if defined AMD64 || defined _WINDOWS
|
||||
(void)memmove(to, from, count * HeapWordSize);
|
||||
#else
|
||||
// Includes a zero-count check.
|
||||
intx temp = 0;
|
||||
__asm__ volatile(" testl %6,%6 ;"
|
||||
" jz 7f ;"
|
||||
" cmpl %4,%5 ;"
|
||||
" leal -4(%4,%6,4),%3;"
|
||||
" jbe 1f ;"
|
||||
" cmpl %7,%5 ;"
|
||||
" jbe 4f ;"
|
||||
"1: cmpl $32,%6 ;"
|
||||
" ja 3f ;"
|
||||
" subl %4,%1 ;"
|
||||
"2: movl (%4),%3 ;"
|
||||
" movl %7,(%5,%4,1) ;"
|
||||
" addl $4,%0 ;"
|
||||
" subl $1,%2 ;"
|
||||
" jnz 2b ;"
|
||||
" jmp 7f ;"
|
||||
"3: rep; smovl ;"
|
||||
" jmp 7f ;"
|
||||
"4: cmpl $32,%2 ;"
|
||||
" movl %7,%0 ;"
|
||||
" leal -4(%5,%6,4),%1;"
|
||||
" ja 6f ;"
|
||||
" subl %4,%1 ;"
|
||||
"5: movl (%4),%3 ;"
|
||||
" movl %7,(%5,%4,1) ;"
|
||||
" subl $4,%0 ;"
|
||||
" subl $1,%2 ;"
|
||||
" jnz 5b ;"
|
||||
" jmp 7f ;"
|
||||
"6: std ;"
|
||||
" rep; smovl ;"
|
||||
" cld ;"
|
||||
"7: nop "
|
||||
: "=S" (from), "=D" (to), "=c" (count), "=r" (temp)
|
||||
: "0" (from), "1" (to), "2" (count), "3" (temp)
|
||||
: "memory", "flags");
|
||||
#endif // AMD64
|
||||
}
|
||||
|
||||
static void pd_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
|
||||
#ifdef AMD64
|
||||
switch (count) {
|
||||
case 8: to[7] = from[7];
|
||||
case 7: to[6] = from[6];
|
||||
@ -120,39 +70,10 @@ static void pd_disjoint_words(const HeapWord* from, HeapWord* to, size_t count)
|
||||
(void)memcpy(to, from, count * HeapWordSize);
|
||||
break;
|
||||
}
|
||||
#else
|
||||
#if defined _WINDOWS
|
||||
(void)memcpy(to, from, count * HeapWordSize);
|
||||
#else
|
||||
// Includes a zero-count check.
|
||||
intx temp = 0;
|
||||
__asm__ volatile(" testl %6,%6 ;"
|
||||
" jz 3f ;"
|
||||
" cmpl $32,%6 ;"
|
||||
" ja 2f ;"
|
||||
" subl %4,%1 ;"
|
||||
"1: movl (%4),%3 ;"
|
||||
" movl %7,(%5,%4,1);"
|
||||
" addl $4,%0 ;"
|
||||
" subl $1,%2 ;"
|
||||
" jnz 1b ;"
|
||||
" jmp 3f ;"
|
||||
"2: rep; smovl ;"
|
||||
"3: nop "
|
||||
: "=S" (from), "=D" (to), "=c" (count), "=r" (temp)
|
||||
: "0" (from), "1" (to), "2" (count), "3" (temp)
|
||||
: "memory", "cc");
|
||||
#endif // _WINDOWS
|
||||
#endif // AMD64
|
||||
}
|
||||
|
||||
static void pd_disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count) {
|
||||
#ifdef AMD64
|
||||
shared_disjoint_words_atomic(from, to, count);
|
||||
#else
|
||||
// pd_disjoint_words is word-atomic in this implementation.
|
||||
pd_disjoint_words(from, to, count);
|
||||
#endif // AMD64
|
||||
}
|
||||
|
||||
static void pd_aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
|
||||
@ -164,82 +85,7 @@ static void pd_aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t
|
||||
}
|
||||
|
||||
static void pd_conjoint_bytes(const void* from, void* to, size_t count) {
|
||||
#if defined AMD64 || defined _WINDOWS
|
||||
(void)memmove(to, from, count);
|
||||
#else
|
||||
// Includes a zero-count check.
|
||||
intx temp = 0;
|
||||
__asm__ volatile(" testl %6,%6 ;"
|
||||
" jz 13f ;"
|
||||
" cmpl %4,%5 ;"
|
||||
" leal -1(%4,%6),%3 ;"
|
||||
" jbe 1f ;"
|
||||
" cmpl %7,%5 ;"
|
||||
" jbe 8f ;"
|
||||
"1: cmpl $3,%6 ;"
|
||||
" jbe 6f ;"
|
||||
" movl %6,%3 ;"
|
||||
" movl $4,%2 ;"
|
||||
" subl %4,%2 ;"
|
||||
" andl $3,%2 ;"
|
||||
" jz 2f ;"
|
||||
" subl %6,%3 ;"
|
||||
" rep; smovb ;"
|
||||
"2: movl %7,%2 ;"
|
||||
" shrl $2,%2 ;"
|
||||
" jz 5f ;"
|
||||
" cmpl $32,%2 ;"
|
||||
" ja 4f ;"
|
||||
" subl %4,%1 ;"
|
||||
"3: movl (%4),%%edx ;"
|
||||
" movl %%edx,(%5,%4,1);"
|
||||
" addl $4,%0 ;"
|
||||
" subl $1,%2 ;"
|
||||
" jnz 3b ;"
|
||||
" addl %4,%1 ;"
|
||||
" jmp 5f ;"
|
||||
"4: rep; smovl ;"
|
||||
"5: movl %7,%2 ;"
|
||||
" andl $3,%2 ;"
|
||||
" jz 13f ;"
|
||||
"6: xorl %7,%3 ;"
|
||||
"7: movb (%4,%7,1),%%dl ;"
|
||||
" movb %%dl,(%5,%7,1) ;"
|
||||
" addl $1,%3 ;"
|
||||
" subl $1,%2 ;"
|
||||
" jnz 7b ;"
|
||||
" jmp 13f ;"
|
||||
"8: std ;"
|
||||
" cmpl $12,%2 ;"
|
||||
" ja 9f ;"
|
||||
" movl %7,%0 ;"
|
||||
" leal -1(%6,%5),%1 ;"
|
||||
" jmp 11f ;"
|
||||
"9: xchgl %3,%2 ;"
|
||||
" movl %6,%0 ;"
|
||||
" addl $1,%2 ;"
|
||||
" leal -1(%7,%5),%1 ;"
|
||||
" andl $3,%2 ;"
|
||||
" jz 10f ;"
|
||||
" subl %6,%3 ;"
|
||||
" rep; smovb ;"
|
||||
"10: movl %7,%2 ;"
|
||||
" subl $3,%0 ;"
|
||||
" shrl $2,%2 ;"
|
||||
" subl $3,%1 ;"
|
||||
" rep; smovl ;"
|
||||
" andl $3,%3 ;"
|
||||
" jz 12f ;"
|
||||
" movl %7,%2 ;"
|
||||
" addl $3,%0 ;"
|
||||
" addl $3,%1 ;"
|
||||
"11: rep; smovb ;"
|
||||
"12: cld ;"
|
||||
"13: nop ;"
|
||||
: "=S" (from), "=D" (to), "=c" (count), "=r" (temp)
|
||||
: "0" (from), "1" (to), "2" (count), "3" (temp)
|
||||
: "memory", "flags", "%edx");
|
||||
#endif // AMD64
|
||||
}
|
||||
|
||||
static void pd_conjoint_bytes_atomic(const void* from, void* to, size_t count) {
|
||||
@ -253,49 +99,16 @@ static void pd_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t co
|
||||
}
|
||||
|
||||
static void pd_conjoint_jints_atomic(const jint* from, jint* to, size_t count) {
|
||||
#ifdef AMD64
|
||||
_Copy_conjoint_jints_atomic(from, to, count);
|
||||
#else
|
||||
assert(HeapWordSize == BytesPerInt, "heapwords and jints must be the same size");
|
||||
// pd_conjoint_words is word-atomic in this implementation.
|
||||
pd_conjoint_words((const HeapWord*)from, (HeapWord*)to, count);
|
||||
#endif // AMD64
|
||||
}
|
||||
|
||||
static void pd_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) {
|
||||
#ifdef AMD64
|
||||
_Copy_conjoint_jlongs_atomic(from, to, count);
|
||||
#else
|
||||
// Guarantee use of fild/fistp or xmm regs via some asm code, because compilers won't.
|
||||
if (from > to) {
|
||||
while (count-- > 0) {
|
||||
__asm__ volatile("fildll (%0); fistpll (%1)"
|
||||
:
|
||||
: "r" (from), "r" (to)
|
||||
: "memory" );
|
||||
++from;
|
||||
++to;
|
||||
}
|
||||
} else {
|
||||
while (count-- > 0) {
|
||||
__asm__ volatile("fildll (%0,%2,8); fistpll (%1,%2,8)"
|
||||
:
|
||||
: "r" (from), "r" (to), "r" (count)
|
||||
: "memory" );
|
||||
}
|
||||
}
|
||||
#endif // AMD64
|
||||
}
|
||||
|
||||
static void pd_conjoint_oops_atomic(const oop* from, oop* to, size_t count) {
|
||||
#ifdef AMD64
|
||||
assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size");
|
||||
_Copy_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count);
|
||||
#else
|
||||
assert(HeapWordSize == BytesPerOop, "heapwords and oops must be the same size");
|
||||
// pd_conjoint_words is word-atomic in this implementation.
|
||||
pd_conjoint_words((const HeapWord*)from, (HeapWord*)to, count);
|
||||
#endif // AMD64
|
||||
}
|
||||
|
||||
static void pd_arrayof_conjoint_bytes(const HeapWord* from, HeapWord* to, size_t count) {
|
||||
@ -307,28 +120,16 @@ static void pd_arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size
|
||||
}
|
||||
|
||||
static void pd_arrayof_conjoint_jints(const HeapWord* from, HeapWord* to, size_t count) {
|
||||
#ifdef AMD64
|
||||
_Copy_arrayof_conjoint_jints(from, to, count);
|
||||
#else
|
||||
pd_conjoint_jints_atomic((const jint*)from, (jint*)to, count);
|
||||
#endif // AMD64
|
||||
}
|
||||
|
||||
static void pd_arrayof_conjoint_jlongs(const HeapWord* from, HeapWord* to, size_t count) {
|
||||
#ifdef AMD64
|
||||
_Copy_arrayof_conjoint_jlongs(from, to, count);
|
||||
#else
|
||||
pd_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count);
|
||||
#endif // AMD64
|
||||
}
|
||||
|
||||
static void pd_arrayof_conjoint_oops(const HeapWord* from, HeapWord* to, size_t count) {
|
||||
#ifdef AMD64
|
||||
assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size");
|
||||
_Copy_arrayof_conjoint_jlongs(from, to, count);
|
||||
#else
|
||||
pd_conjoint_oops_atomic((const oop*)from, (oop*)to, count);
|
||||
#endif // AMD64
|
||||
}
|
||||
|
||||
#endif // _WINDOWS
|
||||
|
||||
@ -536,14 +536,9 @@ BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result)
|
||||
// then ST0 is saved before EAX/EDX. See the note in generate_native_result
|
||||
tos_addr = (intptr_t*)sp();
|
||||
if (type == T_FLOAT || type == T_DOUBLE) {
|
||||
// QQQ seems like this code is equivalent on the two platforms
|
||||
#ifdef AMD64
|
||||
// This is times two because we do a push(ltos) after pushing XMM0
|
||||
// and that takes two interpreter stack slots.
|
||||
tos_addr += 2 * Interpreter::stackElementWords;
|
||||
#else
|
||||
tos_addr += 2;
|
||||
#endif // AMD64
|
||||
}
|
||||
} else {
|
||||
tos_addr = (intptr_t*)interpreter_frame_tos_address();
|
||||
@ -569,19 +564,7 @@ BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result)
|
||||
case T_SHORT : value_result->s = *(jshort*)tos_addr; break;
|
||||
case T_INT : value_result->i = *(jint*)tos_addr; break;
|
||||
case T_LONG : value_result->j = *(jlong*)tos_addr; break;
|
||||
case T_FLOAT : {
|
||||
#ifdef AMD64
|
||||
value_result->f = *(jfloat*)tos_addr;
|
||||
#else
|
||||
if (method->is_native()) {
|
||||
jdouble d = *(jdouble*)tos_addr; // Result was in ST0 so need to convert to jfloat
|
||||
value_result->f = (jfloat)d;
|
||||
} else {
|
||||
value_result->f = *(jfloat*)tos_addr;
|
||||
}
|
||||
#endif // AMD64
|
||||
break;
|
||||
}
|
||||
case T_FLOAT : value_result->f = *(jfloat*)tos_addr; break;
|
||||
case T_DOUBLE : value_result->d = *(jdouble*)tos_addr; break;
|
||||
case T_VOID : /* Nothing to do */ break;
|
||||
default : ShouldNotReachHere();
|
||||
@ -611,7 +594,6 @@ void frame::describe_pd(FrameValues& values, int frame_no) {
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_locals);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_bcp);
|
||||
DESCRIBE_FP_OFFSET(interpreter_frame_initial_sp);
|
||||
#ifdef AMD64
|
||||
} else if (is_entry_frame()) {
|
||||
// This could be more descriptive if we use the enum in
|
||||
// stubGenerator to map to real names but it's most important to
|
||||
@ -619,7 +601,6 @@ void frame::describe_pd(FrameValues& values, int frame_no) {
|
||||
for (int i = 0; i < entry_frame_after_call_words; i++) {
|
||||
values.describe(frame_no, fp() - i, err_msg("call_stub word fp - %d", i));
|
||||
}
|
||||
#endif // AMD64
|
||||
}
|
||||
|
||||
if (is_java_frame() || Continuation::is_continuation_enterSpecial(*this)) {
|
||||
|
||||
@ -80,8 +80,7 @@
|
||||
interpreter_frame_monitor_block_bottom_offset = interpreter_frame_initial_sp_offset,
|
||||
|
||||
// Entry frames
|
||||
#ifdef AMD64
|
||||
#ifdef _WIN64
|
||||
#ifdef _WINDOWS
|
||||
entry_frame_after_call_words = 28,
|
||||
entry_frame_call_wrapper_offset = 2,
|
||||
|
||||
@ -91,10 +90,7 @@
|
||||
entry_frame_call_wrapper_offset = -6,
|
||||
|
||||
arg_reg_save_area_bytes = 0,
|
||||
#endif // _WIN64
|
||||
#else
|
||||
entry_frame_call_wrapper_offset = 2,
|
||||
#endif // AMD64
|
||||
#endif // _WINDOWS
|
||||
|
||||
// size, in words, of frame metadata (e.g. pc and link)
|
||||
metadata_words = sender_sp_offset,
|
||||
|
||||
@ -483,7 +483,6 @@ void frame::update_map_with_saved_link(RegisterMapT* map, intptr_t** link_addr)
|
||||
// we don't have to always save EBP/RBP on entry and exit to c2 compiled
|
||||
// code, on entry will be enough.
|
||||
map->set_location(rbp->as_VMReg(), (address) link_addr);
|
||||
#ifdef AMD64
|
||||
// this is weird "H" ought to be at a higher address however the
|
||||
// oopMaps seems to have the "H" regs at the same address and the
|
||||
// vanilla register.
|
||||
@ -491,6 +490,5 @@ void frame::update_map_with_saved_link(RegisterMapT* map, intptr_t** link_addr)
|
||||
if (true) {
|
||||
map->set_location(rbp->as_VMReg()->next(), (address) link_addr);
|
||||
}
|
||||
#endif // AMD64
|
||||
}
|
||||
#endif // CPU_X86_FRAME_X86_INLINE_HPP
|
||||
|
||||
@ -518,8 +518,8 @@ void InterpreterMacroAssembler::load_resolved_klass_at_index(Register klass,
|
||||
void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
|
||||
Label& ok_is_subtype) {
|
||||
assert(Rsub_klass != rax, "rax holds superklass");
|
||||
LP64_ONLY(assert(Rsub_klass != r14, "r14 holds locals");)
|
||||
LP64_ONLY(assert(Rsub_klass != r13, "r13 holds bcp");)
|
||||
assert(Rsub_klass != r14, "r14 holds locals");
|
||||
assert(Rsub_klass != r13, "r13 holds bcp");
|
||||
assert(Rsub_klass != rcx, "rcx holds 2ndary super array length");
|
||||
assert(Rsub_klass != rdi, "rdi holds 2ndary super array scan ptr");
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -42,22 +42,12 @@ class JNITypes : AllStatic {
|
||||
// reverse the argument list constructed by JavaCallArguments (see
|
||||
// javaCalls.hpp).
|
||||
|
||||
private:
|
||||
|
||||
#ifndef AMD64
|
||||
// 32bit Helper routines.
|
||||
static inline void put_int2r(jint *from, intptr_t *to) { *(jint *)(to++) = from[1];
|
||||
*(jint *)(to ) = from[0]; }
|
||||
static inline void put_int2r(jint *from, intptr_t *to, int& pos) { put_int2r(from, to + pos); pos += 2; }
|
||||
#endif // AMD64
|
||||
|
||||
public:
|
||||
// Ints are stored in native format in one JavaCallArgument slot at *to.
|
||||
static inline void put_int(jint from, intptr_t *to) { *(jint *)(to + 0 ) = from; }
|
||||
static inline void put_int(jint from, intptr_t *to, int& pos) { *(jint *)(to + pos++) = from; }
|
||||
static inline void put_int(jint *from, intptr_t *to, int& pos) { *(jint *)(to + pos++) = *from; }
|
||||
|
||||
#ifdef AMD64
|
||||
// Longs are stored in native format in one JavaCallArgument slot at
|
||||
// *(to+1).
|
||||
static inline void put_long(jlong from, intptr_t *to) {
|
||||
@ -73,13 +63,6 @@ public:
|
||||
*(jlong*) (to + 1 + pos) = *from;
|
||||
pos += 2;
|
||||
}
|
||||
#else
|
||||
// Longs are stored in big-endian word format in two JavaCallArgument slots at *to.
|
||||
// The high half is in *to and the low half in *(to+1).
|
||||
static inline void put_long(jlong from, intptr_t *to) { put_int2r((jint *)&from, to); }
|
||||
static inline void put_long(jlong from, intptr_t *to, int& pos) { put_int2r((jint *)&from, to, pos); }
|
||||
static inline void put_long(jlong *from, intptr_t *to, int& pos) { put_int2r((jint *) from, to, pos); }
|
||||
#endif // AMD64
|
||||
|
||||
// Oops are stored in native format in one JavaCallArgument slot at *to.
|
||||
static inline void put_obj(const Handle& from_handle, intptr_t *to, int& pos) { *(to + pos++) = (intptr_t)from_handle.raw_value(); }
|
||||
@ -91,7 +74,6 @@ public:
|
||||
static inline void put_float(jfloat *from, intptr_t *to, int& pos) { *(jfloat *)(to + pos++) = *from; }
|
||||
|
||||
#undef _JNI_SLOT_OFFSET
|
||||
#ifdef AMD64
|
||||
#define _JNI_SLOT_OFFSET 1
|
||||
// Doubles are stored in native word format in one JavaCallArgument
|
||||
// slot at *(to+1).
|
||||
@ -108,14 +90,6 @@ public:
|
||||
*(jdouble*) (to + 1 + pos) = *from;
|
||||
pos += 2;
|
||||
}
|
||||
#else
|
||||
#define _JNI_SLOT_OFFSET 0
|
||||
// Doubles are stored in big-endian word format in two JavaCallArgument slots at *to.
|
||||
// The high half is in *to and the low half in *(to+1).
|
||||
static inline void put_double(jdouble from, intptr_t *to) { put_int2r((jint *)&from, to); }
|
||||
static inline void put_double(jdouble from, intptr_t *to, int& pos) { put_int2r((jint *)&from, to, pos); }
|
||||
static inline void put_double(jdouble *from, intptr_t *to, int& pos) { put_int2r((jint *) from, to, pos); }
|
||||
#endif // AMD64
|
||||
|
||||
|
||||
// The get_xxx routines, on the other hand, actually _do_ fetch
|
||||
|
||||
@ -77,14 +77,10 @@ void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle& obj, bool compre
|
||||
address pc = _instructions->start() + pc_offset;
|
||||
jobject value = JNIHandles::make_local(obj());
|
||||
if (compressed) {
|
||||
#ifdef _LP64
|
||||
address operand = Assembler::locate_operand(pc, Assembler::narrow_oop_operand);
|
||||
int oop_index = _oop_recorder->find_index(value);
|
||||
_instructions->relocate(pc, oop_Relocation::spec(oop_index), Assembler::narrow_oop_operand);
|
||||
JVMCI_event_3("relocating (narrow oop constant) at " PTR_FORMAT "/" PTR_FORMAT, p2i(pc), p2i(operand));
|
||||
#else
|
||||
JVMCI_ERROR("compressed oop on 32bit");
|
||||
#endif
|
||||
} else {
|
||||
address operand = Assembler::locate_operand(pc, Assembler::imm_operand);
|
||||
*((jobject*) operand) = value;
|
||||
@ -96,13 +92,9 @@ void CodeInstaller::pd_patch_OopConstant(int pc_offset, Handle& obj, bool compre
|
||||
void CodeInstaller::pd_patch_MetaspaceConstant(int pc_offset, HotSpotCompiledCodeStream* stream, u1 tag, JVMCI_TRAPS) {
|
||||
address pc = _instructions->start() + pc_offset;
|
||||
if (tag == PATCH_NARROW_KLASS) {
|
||||
#ifdef _LP64
|
||||
address operand = Assembler::locate_operand(pc, Assembler::narrow_oop_operand);
|
||||
*((narrowKlass*) operand) = record_narrow_metadata_reference(_instructions, operand, stream, tag, JVMCI_CHECK);
|
||||
JVMCI_event_3("relocating (narrow metaspace constant) at " PTR_FORMAT "/" PTR_FORMAT, p2i(pc), p2i(operand));
|
||||
#else
|
||||
JVMCI_ERROR("compressed Klass* on 32bit");
|
||||
#endif
|
||||
} else {
|
||||
address operand = Assembler::locate_operand(pc, Assembler::imm_operand);
|
||||
*((void**) operand) = record_metadata_reference(_instructions, operand, stream, tag, JVMCI_CHECK);
|
||||
|
||||
@ -561,7 +561,6 @@ void trace_method_handle_stub(const char* adaptername,
|
||||
for (int i = 0; i < saved_regs_count; i++) {
|
||||
Register r = as_Register(i);
|
||||
// The registers are stored in reverse order on the stack (by pusha).
|
||||
#ifdef AMD64
|
||||
int num_regs = UseAPX ? 32 : 16;
|
||||
assert(Register::available_gp_registers() == num_regs, "sanity");
|
||||
if (r == rsp) {
|
||||
@ -570,9 +569,6 @@ void trace_method_handle_stub(const char* adaptername,
|
||||
} else {
|
||||
ls.print("%3s=" PTR_FORMAT, r->name(), saved_regs[((saved_regs_count - 1) - i)]);
|
||||
}
|
||||
#else
|
||||
ls.print("%3s=" PTR_FORMAT, r->name(), saved_regs[((saved_regs_count - 1) - i)]);
|
||||
#endif
|
||||
if ((i + 1) % 4 == 0) {
|
||||
ls.cr();
|
||||
} else {
|
||||
|
||||
@ -36,7 +36,6 @@
|
||||
|
||||
|
||||
void Relocation::pd_set_data_value(address x, bool verify_only) {
|
||||
#ifdef AMD64
|
||||
typedef Assembler::WhichOperand WhichOperand;
|
||||
WhichOperand which = (WhichOperand) format(); // that is, disp32 or imm, call32, narrow oop
|
||||
assert(which == Assembler::disp32_operand ||
|
||||
@ -76,13 +75,6 @@ void Relocation::pd_set_data_value(address x, bool verify_only) {
|
||||
*(int32_t*) disp = checked_cast<int32_t>(x - next_ip);
|
||||
}
|
||||
}
|
||||
#else
|
||||
if (verify_only) {
|
||||
guarantee(*pd_address_in_code() == x, "instructions must match");
|
||||
} else {
|
||||
*pd_address_in_code() = x;
|
||||
}
|
||||
#endif // AMD64
|
||||
}
|
||||
|
||||
|
||||
@ -150,22 +142,17 @@ address* Relocation::pd_address_in_code() {
|
||||
assert(is_data(), "must be a DataRelocation");
|
||||
typedef Assembler::WhichOperand WhichOperand;
|
||||
WhichOperand which = (WhichOperand) format(); // that is, disp32 or imm/imm32
|
||||
#ifdef AMD64
|
||||
assert(which == Assembler::disp32_operand ||
|
||||
which == Assembler::call32_operand ||
|
||||
which == Assembler::imm_operand, "format unpacks ok");
|
||||
// The "address" in the code is a displacement can't return it as
|
||||
// and address* since it is really a jint*
|
||||
guarantee(which == Assembler::imm_operand, "must be immediate operand");
|
||||
#else
|
||||
assert(which == Assembler::disp32_operand || which == Assembler::imm_operand, "format unpacks ok");
|
||||
#endif // AMD64
|
||||
return (address*) Assembler::locate_operand(addr(), which);
|
||||
}
|
||||
|
||||
|
||||
address Relocation::pd_get_address_from_code() {
|
||||
#ifdef AMD64
|
||||
// All embedded Intel addresses are stored in 32-bit words.
|
||||
// Since the addr points at the start of the instruction,
|
||||
// we must parse the instruction a bit to find the embedded word.
|
||||
@ -182,7 +169,6 @@ address Relocation::pd_get_address_from_code() {
|
||||
address a = next_ip + *(int32_t*) disp;
|
||||
return a;
|
||||
}
|
||||
#endif // AMD64
|
||||
return *pd_address_in_code();
|
||||
}
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -32,12 +32,8 @@
|
||||
offset_unit = 1,
|
||||
|
||||
// Encodes Assembler::disp32_operand vs. Assembler::imm32_operand.
|
||||
#ifndef AMD64
|
||||
format_width = 1
|
||||
#else
|
||||
// vs Assembler::narrow_oop_operand and ZGC barrier encodings.
|
||||
format_width = 3
|
||||
#endif
|
||||
};
|
||||
|
||||
public:
|
||||
|
||||
@ -1260,13 +1260,11 @@ void VM_Version::get_processor_features() {
|
||||
|
||||
// Kyber Intrinsics
|
||||
// Currently we only have them for AVX512
|
||||
#ifdef _LP64
|
||||
if (supports_evex() && supports_avx512bw()) {
|
||||
if (FLAG_IS_DEFAULT(UseKyberIntrinsics)) {
|
||||
UseKyberIntrinsics = true;
|
||||
}
|
||||
} else
|
||||
#endif
|
||||
if (UseKyberIntrinsics) {
|
||||
warning("Intrinsics for ML-KEM are not available on this CPU.");
|
||||
FLAG_SET_DEFAULT(UseKyberIntrinsics, false);
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -93,7 +93,6 @@ inline T AtomicAccess::PlatformCmpxchg<4>::operator()(T volatile* dest,
|
||||
return exchange_value;
|
||||
}
|
||||
|
||||
#ifdef AMD64
|
||||
template<>
|
||||
template<typename D, typename I>
|
||||
inline D AtomicAccess::PlatformAdd<8>::fetch_then_add(D volatile* dest, I add_value,
|
||||
@ -135,51 +134,6 @@ inline T AtomicAccess::PlatformCmpxchg<8>::operator()(T volatile* dest,
|
||||
return exchange_value;
|
||||
}
|
||||
|
||||
#else // !AMD64
|
||||
|
||||
extern "C" {
|
||||
// defined in bsd_x86.s
|
||||
int64_t _Atomic_cmpxchg_long(int64_t, volatile int64_t*, int64_t);
|
||||
void _Atomic_move_long(const volatile int64_t* src, volatile int64_t* dst);
|
||||
}
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T AtomicAccess::PlatformCmpxchg<8>::operator()(T volatile* dest,
|
||||
T compare_value,
|
||||
T exchange_value,
|
||||
atomic_memory_order /* order */) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, dest, compare_value, exchange_value);
|
||||
}
|
||||
|
||||
// No direct support for 8-byte xchg; emulate using cmpxchg.
|
||||
template<>
|
||||
struct AtomicAccess::PlatformXchg<8> : AtomicAccess::XchgUsingCmpxchg<8> {};
|
||||
|
||||
// No direct support for 8-byte add; emulate using cmpxchg.
|
||||
template<>
|
||||
struct AtomicAccess::PlatformAdd<8> : AtomicAccess::AddUsingCmpxchg<8> {};
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T AtomicAccess::PlatformLoad<8>::operator()(T const volatile* src) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
volatile int64_t dest;
|
||||
_Atomic_move_long(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));
|
||||
return PrimitiveConversions::cast<T>(dest);
|
||||
}
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline void AtomicAccess::PlatformStore<8>::operator()(T volatile* dest,
|
||||
T store_value) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
_Atomic_move_long(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
|
||||
}
|
||||
|
||||
#endif // AMD64
|
||||
|
||||
template<>
|
||||
struct AtomicAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
|
||||
{
|
||||
@ -216,7 +170,6 @@ struct AtomicAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
|
||||
}
|
||||
};
|
||||
|
||||
#ifdef AMD64
|
||||
template<>
|
||||
struct AtomicAccess::PlatformOrderedStore<8, RELEASE_X_FENCE>
|
||||
{
|
||||
@ -228,6 +181,5 @@ struct AtomicAccess::PlatformOrderedStore<8, RELEASE_X_FENCE>
|
||||
: "memory");
|
||||
}
|
||||
};
|
||||
#endif // AMD64
|
||||
|
||||
#endif // OS_CPU_BSD_X86_ATOMICACCESS_BSD_X86_HPP
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,19 +29,9 @@
|
||||
// Sets the default values for platform dependent flags used by the runtime system.
|
||||
// (see globals.hpp)
|
||||
//
|
||||
#ifdef AMD64
|
||||
define_pd_global(intx, CompilerThreadStackSize, 1024);
|
||||
define_pd_global(intx, ThreadStackSize, 1024); // 0 => use system default
|
||||
define_pd_global(intx, VMThreadStackSize, 1024);
|
||||
#else
|
||||
define_pd_global(intx, CompilerThreadStackSize, 512);
|
||||
// ThreadStackSize 320 allows a couple of test cases to run while
|
||||
// keeping the number of threads that can be created high. System
|
||||
// default ThreadStackSize appears to be 512 which is too big.
|
||||
define_pd_global(intx, ThreadStackSize, 320);
|
||||
define_pd_global(intx, VMThreadStackSize, 512);
|
||||
#endif // AMD64
|
||||
|
||||
|
||||
define_pd_global(size_t, JVMInvokeMethodSlack, 8192);
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -51,11 +51,7 @@ inline void OrderAccess::release() { compiler_barrier(); }
|
||||
|
||||
inline void OrderAccess::fence() {
|
||||
// always use locked addl since mfence is sometimes expensive
|
||||
#ifdef AMD64
|
||||
__asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
|
||||
#else
|
||||
__asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
|
||||
#endif
|
||||
compiler_barrier();
|
||||
}
|
||||
|
||||
|
||||
@ -86,56 +86,34 @@
|
||||
# define OS_X_10_9_0_KERNEL_MAJOR_VERSION 13
|
||||
#endif
|
||||
|
||||
#ifdef AMD64
|
||||
#define SPELL_REG_SP "rsp"
|
||||
#define SPELL_REG_FP "rbp"
|
||||
#define REG_BCP context_r13
|
||||
#else
|
||||
#define SPELL_REG_SP "esp"
|
||||
#define SPELL_REG_FP "ebp"
|
||||
#endif // AMD64
|
||||
|
||||
#ifdef __FreeBSD__
|
||||
# define context_trapno uc_mcontext.mc_trapno
|
||||
# ifdef AMD64
|
||||
# define context_pc uc_mcontext.mc_rip
|
||||
# define context_sp uc_mcontext.mc_rsp
|
||||
# define context_fp uc_mcontext.mc_rbp
|
||||
# define context_rip uc_mcontext.mc_rip
|
||||
# define context_rsp uc_mcontext.mc_rsp
|
||||
# define context_rbp uc_mcontext.mc_rbp
|
||||
# define context_rax uc_mcontext.mc_rax
|
||||
# define context_rbx uc_mcontext.mc_rbx
|
||||
# define context_rcx uc_mcontext.mc_rcx
|
||||
# define context_rdx uc_mcontext.mc_rdx
|
||||
# define context_rsi uc_mcontext.mc_rsi
|
||||
# define context_rdi uc_mcontext.mc_rdi
|
||||
# define context_r8 uc_mcontext.mc_r8
|
||||
# define context_r9 uc_mcontext.mc_r9
|
||||
# define context_r10 uc_mcontext.mc_r10
|
||||
# define context_r11 uc_mcontext.mc_r11
|
||||
# define context_r12 uc_mcontext.mc_r12
|
||||
# define context_r13 uc_mcontext.mc_r13
|
||||
# define context_r14 uc_mcontext.mc_r14
|
||||
# define context_r15 uc_mcontext.mc_r15
|
||||
# define context_flags uc_mcontext.mc_flags
|
||||
# define context_err uc_mcontext.mc_err
|
||||
# else
|
||||
# define context_pc uc_mcontext.mc_eip
|
||||
# define context_sp uc_mcontext.mc_esp
|
||||
# define context_fp uc_mcontext.mc_ebp
|
||||
# define context_eip uc_mcontext.mc_eip
|
||||
# define context_esp uc_mcontext.mc_esp
|
||||
# define context_eax uc_mcontext.mc_eax
|
||||
# define context_ebx uc_mcontext.mc_ebx
|
||||
# define context_ecx uc_mcontext.mc_ecx
|
||||
# define context_edx uc_mcontext.mc_edx
|
||||
# define context_ebp uc_mcontext.mc_ebp
|
||||
# define context_esi uc_mcontext.mc_esi
|
||||
# define context_edi uc_mcontext.mc_edi
|
||||
# define context_eflags uc_mcontext.mc_eflags
|
||||
# define context_trapno uc_mcontext.mc_trapno
|
||||
# endif
|
||||
# define context_pc uc_mcontext.mc_rip
|
||||
# define context_sp uc_mcontext.mc_rsp
|
||||
# define context_fp uc_mcontext.mc_rbp
|
||||
# define context_rip uc_mcontext.mc_rip
|
||||
# define context_rsp uc_mcontext.mc_rsp
|
||||
# define context_rbp uc_mcontext.mc_rbp
|
||||
# define context_rax uc_mcontext.mc_rax
|
||||
# define context_rbx uc_mcontext.mc_rbx
|
||||
# define context_rcx uc_mcontext.mc_rcx
|
||||
# define context_rdx uc_mcontext.mc_rdx
|
||||
# define context_rsi uc_mcontext.mc_rsi
|
||||
# define context_rdi uc_mcontext.mc_rdi
|
||||
# define context_r8 uc_mcontext.mc_r8
|
||||
# define context_r9 uc_mcontext.mc_r9
|
||||
# define context_r10 uc_mcontext.mc_r10
|
||||
# define context_r11 uc_mcontext.mc_r11
|
||||
# define context_r12 uc_mcontext.mc_r12
|
||||
# define context_r13 uc_mcontext.mc_r13
|
||||
# define context_r14 uc_mcontext.mc_r14
|
||||
# define context_r15 uc_mcontext.mc_r15
|
||||
# define context_flags uc_mcontext.mc_flags
|
||||
# define context_err uc_mcontext.mc_err
|
||||
#endif
|
||||
|
||||
#ifdef __APPLE__
|
||||
@ -146,133 +124,82 @@
|
||||
#define DU3_PREFIX(s, m) s ## . ## m
|
||||
# endif
|
||||
|
||||
# ifdef AMD64
|
||||
# define context_pc context_rip
|
||||
# define context_sp context_rsp
|
||||
# define context_fp context_rbp
|
||||
# define context_rip uc_mcontext->DU3_PREFIX(ss,rip)
|
||||
# define context_rsp uc_mcontext->DU3_PREFIX(ss,rsp)
|
||||
# define context_rax uc_mcontext->DU3_PREFIX(ss,rax)
|
||||
# define context_rbx uc_mcontext->DU3_PREFIX(ss,rbx)
|
||||
# define context_rcx uc_mcontext->DU3_PREFIX(ss,rcx)
|
||||
# define context_rdx uc_mcontext->DU3_PREFIX(ss,rdx)
|
||||
# define context_rbp uc_mcontext->DU3_PREFIX(ss,rbp)
|
||||
# define context_rsi uc_mcontext->DU3_PREFIX(ss,rsi)
|
||||
# define context_rdi uc_mcontext->DU3_PREFIX(ss,rdi)
|
||||
# define context_r8 uc_mcontext->DU3_PREFIX(ss,r8)
|
||||
# define context_r9 uc_mcontext->DU3_PREFIX(ss,r9)
|
||||
# define context_r10 uc_mcontext->DU3_PREFIX(ss,r10)
|
||||
# define context_r11 uc_mcontext->DU3_PREFIX(ss,r11)
|
||||
# define context_r12 uc_mcontext->DU3_PREFIX(ss,r12)
|
||||
# define context_r13 uc_mcontext->DU3_PREFIX(ss,r13)
|
||||
# define context_r14 uc_mcontext->DU3_PREFIX(ss,r14)
|
||||
# define context_r15 uc_mcontext->DU3_PREFIX(ss,r15)
|
||||
# define context_flags uc_mcontext->DU3_PREFIX(ss,rflags)
|
||||
# define context_trapno uc_mcontext->DU3_PREFIX(es,trapno)
|
||||
# define context_err uc_mcontext->DU3_PREFIX(es,err)
|
||||
# else
|
||||
# define context_pc context_eip
|
||||
# define context_sp context_esp
|
||||
# define context_fp context_ebp
|
||||
# define context_eip uc_mcontext->DU3_PREFIX(ss,eip)
|
||||
# define context_esp uc_mcontext->DU3_PREFIX(ss,esp)
|
||||
# define context_eax uc_mcontext->DU3_PREFIX(ss,eax)
|
||||
# define context_ebx uc_mcontext->DU3_PREFIX(ss,ebx)
|
||||
# define context_ecx uc_mcontext->DU3_PREFIX(ss,ecx)
|
||||
# define context_edx uc_mcontext->DU3_PREFIX(ss,edx)
|
||||
# define context_ebp uc_mcontext->DU3_PREFIX(ss,ebp)
|
||||
# define context_esi uc_mcontext->DU3_PREFIX(ss,esi)
|
||||
# define context_edi uc_mcontext->DU3_PREFIX(ss,edi)
|
||||
# define context_eflags uc_mcontext->DU3_PREFIX(ss,eflags)
|
||||
# define context_trapno uc_mcontext->DU3_PREFIX(es,trapno)
|
||||
# endif
|
||||
# define context_pc context_rip
|
||||
# define context_sp context_rsp
|
||||
# define context_fp context_rbp
|
||||
# define context_rip uc_mcontext->DU3_PREFIX(ss,rip)
|
||||
# define context_rsp uc_mcontext->DU3_PREFIX(ss,rsp)
|
||||
# define context_rax uc_mcontext->DU3_PREFIX(ss,rax)
|
||||
# define context_rbx uc_mcontext->DU3_PREFIX(ss,rbx)
|
||||
# define context_rcx uc_mcontext->DU3_PREFIX(ss,rcx)
|
||||
# define context_rdx uc_mcontext->DU3_PREFIX(ss,rdx)
|
||||
# define context_rbp uc_mcontext->DU3_PREFIX(ss,rbp)
|
||||
# define context_rsi uc_mcontext->DU3_PREFIX(ss,rsi)
|
||||
# define context_rdi uc_mcontext->DU3_PREFIX(ss,rdi)
|
||||
# define context_r8 uc_mcontext->DU3_PREFIX(ss,r8)
|
||||
# define context_r9 uc_mcontext->DU3_PREFIX(ss,r9)
|
||||
# define context_r10 uc_mcontext->DU3_PREFIX(ss,r10)
|
||||
# define context_r11 uc_mcontext->DU3_PREFIX(ss,r11)
|
||||
# define context_r12 uc_mcontext->DU3_PREFIX(ss,r12)
|
||||
# define context_r13 uc_mcontext->DU3_PREFIX(ss,r13)
|
||||
# define context_r14 uc_mcontext->DU3_PREFIX(ss,r14)
|
||||
# define context_r15 uc_mcontext->DU3_PREFIX(ss,r15)
|
||||
# define context_flags uc_mcontext->DU3_PREFIX(ss,rflags)
|
||||
# define context_trapno uc_mcontext->DU3_PREFIX(es,trapno)
|
||||
# define context_err uc_mcontext->DU3_PREFIX(es,err)
|
||||
#endif
|
||||
|
||||
#ifdef __OpenBSD__
|
||||
# define context_trapno sc_trapno
|
||||
# ifdef AMD64
|
||||
# define context_pc sc_rip
|
||||
# define context_sp sc_rsp
|
||||
# define context_fp sc_rbp
|
||||
# define context_rip sc_rip
|
||||
# define context_rsp sc_rsp
|
||||
# define context_rbp sc_rbp
|
||||
# define context_rax sc_rax
|
||||
# define context_rbx sc_rbx
|
||||
# define context_rcx sc_rcx
|
||||
# define context_rdx sc_rdx
|
||||
# define context_rsi sc_rsi
|
||||
# define context_rdi sc_rdi
|
||||
# define context_r8 sc_r8
|
||||
# define context_r9 sc_r9
|
||||
# define context_r10 sc_r10
|
||||
# define context_r11 sc_r11
|
||||
# define context_r12 sc_r12
|
||||
# define context_r13 sc_r13
|
||||
# define context_r14 sc_r14
|
||||
# define context_r15 sc_r15
|
||||
# define context_flags sc_rflags
|
||||
# define context_err sc_err
|
||||
# else
|
||||
# define context_pc sc_eip
|
||||
# define context_sp sc_esp
|
||||
# define context_fp sc_ebp
|
||||
# define context_eip sc_eip
|
||||
# define context_esp sc_esp
|
||||
# define context_eax sc_eax
|
||||
# define context_ebx sc_ebx
|
||||
# define context_ecx sc_ecx
|
||||
# define context_edx sc_edx
|
||||
# define context_ebp sc_ebp
|
||||
# define context_esi sc_esi
|
||||
# define context_edi sc_edi
|
||||
# define context_eflags sc_eflags
|
||||
# define context_trapno sc_trapno
|
||||
# endif
|
||||
# define context_pc sc_rip
|
||||
# define context_sp sc_rsp
|
||||
# define context_fp sc_rbp
|
||||
# define context_rip sc_rip
|
||||
# define context_rsp sc_rsp
|
||||
# define context_rbp sc_rbp
|
||||
# define context_rax sc_rax
|
||||
# define context_rbx sc_rbx
|
||||
# define context_rcx sc_rcx
|
||||
# define context_rdx sc_rdx
|
||||
# define context_rsi sc_rsi
|
||||
# define context_rdi sc_rdi
|
||||
# define context_r8 sc_r8
|
||||
# define context_r9 sc_r9
|
||||
# define context_r10 sc_r10
|
||||
# define context_r11 sc_r11
|
||||
# define context_r12 sc_r12
|
||||
# define context_r13 sc_r13
|
||||
# define context_r14 sc_r14
|
||||
# define context_r15 sc_r15
|
||||
# define context_flags sc_rflags
|
||||
# define context_err sc_err
|
||||
#endif
|
||||
|
||||
#ifdef __NetBSD__
|
||||
# define context_trapno uc_mcontext.__gregs[_REG_TRAPNO]
|
||||
# ifdef AMD64
|
||||
# define __register_t __greg_t
|
||||
# define context_pc uc_mcontext.__gregs[_REG_RIP]
|
||||
# define context_sp uc_mcontext.__gregs[_REG_URSP]
|
||||
# define context_fp uc_mcontext.__gregs[_REG_RBP]
|
||||
# define context_rip uc_mcontext.__gregs[_REG_RIP]
|
||||
# define context_rsp uc_mcontext.__gregs[_REG_URSP]
|
||||
# define context_rax uc_mcontext.__gregs[_REG_RAX]
|
||||
# define context_rbx uc_mcontext.__gregs[_REG_RBX]
|
||||
# define context_rcx uc_mcontext.__gregs[_REG_RCX]
|
||||
# define context_rdx uc_mcontext.__gregs[_REG_RDX]
|
||||
# define context_rbp uc_mcontext.__gregs[_REG_RBP]
|
||||
# define context_rsi uc_mcontext.__gregs[_REG_RSI]
|
||||
# define context_rdi uc_mcontext.__gregs[_REG_RDI]
|
||||
# define context_r8 uc_mcontext.__gregs[_REG_R8]
|
||||
# define context_r9 uc_mcontext.__gregs[_REG_R9]
|
||||
# define context_r10 uc_mcontext.__gregs[_REG_R10]
|
||||
# define context_r11 uc_mcontext.__gregs[_REG_R11]
|
||||
# define context_r12 uc_mcontext.__gregs[_REG_R12]
|
||||
# define context_r13 uc_mcontext.__gregs[_REG_R13]
|
||||
# define context_r14 uc_mcontext.__gregs[_REG_R14]
|
||||
# define context_r15 uc_mcontext.__gregs[_REG_R15]
|
||||
# define context_flags uc_mcontext.__gregs[_REG_RFL]
|
||||
# define context_err uc_mcontext.__gregs[_REG_ERR]
|
||||
# else
|
||||
# define context_pc uc_mcontext.__gregs[_REG_EIP]
|
||||
# define context_sp uc_mcontext.__gregs[_REG_UESP]
|
||||
# define context_fp uc_mcontext.__gregs[_REG_EBP]
|
||||
# define context_eip uc_mcontext.__gregs[_REG_EIP]
|
||||
# define context_esp uc_mcontext.__gregs[_REG_UESP]
|
||||
# define context_eax uc_mcontext.__gregs[_REG_EAX]
|
||||
# define context_ebx uc_mcontext.__gregs[_REG_EBX]
|
||||
# define context_ecx uc_mcontext.__gregs[_REG_ECX]
|
||||
# define context_edx uc_mcontext.__gregs[_REG_EDX]
|
||||
# define context_ebp uc_mcontext.__gregs[_REG_EBP]
|
||||
# define context_esi uc_mcontext.__gregs[_REG_ESI]
|
||||
# define context_edi uc_mcontext.__gregs[_REG_EDI]
|
||||
# define context_eflags uc_mcontext.__gregs[_REG_EFL]
|
||||
# define context_trapno uc_mcontext.__gregs[_REG_TRAPNO]
|
||||
# endif
|
||||
# define __register_t __greg_t
|
||||
# define context_pc uc_mcontext.__gregs[_REG_RIP]
|
||||
# define context_sp uc_mcontext.__gregs[_REG_URSP]
|
||||
# define context_fp uc_mcontext.__gregs[_REG_RBP]
|
||||
# define context_rip uc_mcontext.__gregs[_REG_RIP]
|
||||
# define context_rsp uc_mcontext.__gregs[_REG_URSP]
|
||||
# define context_rax uc_mcontext.__gregs[_REG_RAX]
|
||||
# define context_rbx uc_mcontext.__gregs[_REG_RBX]
|
||||
# define context_rcx uc_mcontext.__gregs[_REG_RCX]
|
||||
# define context_rdx uc_mcontext.__gregs[_REG_RDX]
|
||||
# define context_rbp uc_mcontext.__gregs[_REG_RBP]
|
||||
# define context_rsi uc_mcontext.__gregs[_REG_RSI]
|
||||
# define context_rdi uc_mcontext.__gregs[_REG_RDI]
|
||||
# define context_r8 uc_mcontext.__gregs[_REG_R8]
|
||||
# define context_r9 uc_mcontext.__gregs[_REG_R9]
|
||||
# define context_r10 uc_mcontext.__gregs[_REG_R10]
|
||||
# define context_r11 uc_mcontext.__gregs[_REG_R11]
|
||||
# define context_r12 uc_mcontext.__gregs[_REG_R12]
|
||||
# define context_r13 uc_mcontext.__gregs[_REG_R13]
|
||||
# define context_r14 uc_mcontext.__gregs[_REG_R14]
|
||||
# define context_r15 uc_mcontext.__gregs[_REG_R15]
|
||||
# define context_flags uc_mcontext.__gregs[_REG_RFL]
|
||||
# define context_err uc_mcontext.__gregs[_REG_ERR]
|
||||
#endif
|
||||
|
||||
address os::current_stack_pointer() {
|
||||
@ -468,13 +395,11 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
|
||||
}
|
||||
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
|
||||
}
|
||||
} else
|
||||
#ifdef AMD64
|
||||
if (sig == SIGFPE &&
|
||||
(info->si_code == FPE_INTDIV || info->si_code == FPE_FLTDIV
|
||||
// Workaround for macOS ARM incorrectly reporting FPE_FLTINV for "div by 0"
|
||||
// instead of the expected FPE_FLTDIV when running x86_64 binary under Rosetta emulation
|
||||
MACOS_ONLY(|| (VM_Version::is_cpu_emulated() && info->si_code == FPE_FLTINV)))) {
|
||||
} else if (sig == SIGFPE &&
|
||||
(info->si_code == FPE_INTDIV || info->si_code == FPE_FLTDIV
|
||||
// Workaround for macOS ARM incorrectly reporting FPE_FLTINV for "div by 0"
|
||||
// instead of the expected FPE_FLTDIV when running x86_64 binary under Rosetta emulation
|
||||
MACOS_ONLY(|| (VM_Version::is_cpu_emulated() && info->si_code == FPE_FLTINV)))) {
|
||||
stub =
|
||||
SharedRuntime::
|
||||
continuation_for_implicit_exception(thread,
|
||||
@ -502,34 +427,6 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
|
||||
fatal("please update this code.");
|
||||
}
|
||||
#endif /* __APPLE__ */
|
||||
|
||||
#else
|
||||
if (sig == SIGFPE /* && info->si_code == FPE_INTDIV */) {
|
||||
// HACK: si_code does not work on bsd 2.2.12-20!!!
|
||||
int op = pc[0];
|
||||
if (op == 0xDB) {
|
||||
// FIST
|
||||
// TODO: The encoding of D2I in x86_32.ad can cause an exception
|
||||
// prior to the fist instruction if there was an invalid operation
|
||||
// pending. We want to dismiss that exception. From the win_32
|
||||
// side it also seems that if it really was the fist causing
|
||||
// the exception that we do the d2i by hand with different
|
||||
// rounding. Seems kind of weird.
|
||||
// NOTE: that we take the exception at the NEXT floating point instruction.
|
||||
assert(pc[0] == 0xDB, "not a FIST opcode");
|
||||
assert(pc[1] == 0x14, "not a FIST opcode");
|
||||
assert(pc[2] == 0x24, "not a FIST opcode");
|
||||
return true;
|
||||
} else if (op == 0xF7) {
|
||||
// IDIV
|
||||
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO);
|
||||
} else {
|
||||
// TODO: handle more cases if we are using other x86 instructions
|
||||
// that can generate SIGFPE signal on bsd.
|
||||
tty->print_cr("unknown opcode 0x%X with SIGFPE.", op);
|
||||
fatal("please update this code.");
|
||||
}
|
||||
#endif // AMD64
|
||||
} else if ((sig == SIGSEGV || sig == SIGBUS) &&
|
||||
MacroAssembler::uses_implicit_null_check(info->si_addr)) {
|
||||
// Determination of interpreter/vtable stub/compiled code null exception
|
||||
@ -556,81 +453,6 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef AMD64
|
||||
// Execution protection violation
|
||||
//
|
||||
// This should be kept as the last step in the triage. We don't
|
||||
// have a dedicated trap number for a no-execute fault, so be
|
||||
// conservative and allow other handlers the first shot.
|
||||
//
|
||||
// Note: We don't test that info->si_code == SEGV_ACCERR here.
|
||||
// this si_code is so generic that it is almost meaningless; and
|
||||
// the si_code for this condition may change in the future.
|
||||
// Furthermore, a false-positive should be harmless.
|
||||
if (UnguardOnExecutionViolation > 0 &&
|
||||
stub == nullptr &&
|
||||
(sig == SIGSEGV || sig == SIGBUS) &&
|
||||
uc->context_trapno == trap_page_fault) {
|
||||
size_t page_size = os::vm_page_size();
|
||||
address addr = (address) info->si_addr;
|
||||
address pc = os::Posix::ucontext_get_pc(uc);
|
||||
// Make sure the pc and the faulting address are sane.
|
||||
//
|
||||
// If an instruction spans a page boundary, and the page containing
|
||||
// the beginning of the instruction is executable but the following
|
||||
// page is not, the pc and the faulting address might be slightly
|
||||
// different - we still want to unguard the 2nd page in this case.
|
||||
//
|
||||
// 15 bytes seems to be a (very) safe value for max instruction size.
|
||||
bool pc_is_near_addr =
|
||||
(pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
|
||||
bool instr_spans_page_boundary =
|
||||
(align_down((intptr_t) pc ^ (intptr_t) addr,
|
||||
(intptr_t) page_size) > 0);
|
||||
|
||||
if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
|
||||
static volatile address last_addr =
|
||||
(address) os::non_memory_address_word();
|
||||
|
||||
// In conservative mode, don't unguard unless the address is in the VM
|
||||
if (addr != last_addr &&
|
||||
(UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
|
||||
|
||||
// Set memory to RWX and retry
|
||||
address page_start = align_down(addr, page_size);
|
||||
bool res = os::protect_memory((char*) page_start, page_size,
|
||||
os::MEM_PROT_RWX);
|
||||
|
||||
log_debug(os)("Execution protection violation "
|
||||
"at " INTPTR_FORMAT
|
||||
", unguarding " INTPTR_FORMAT ": %s, errno=%d", p2i(addr),
|
||||
p2i(page_start), (res ? "success" : "failed"), errno);
|
||||
stub = pc;
|
||||
|
||||
// Set last_addr so if we fault again at the same address, we don't end
|
||||
// up in an endless loop.
|
||||
//
|
||||
// There are two potential complications here. Two threads trapping at
|
||||
// the same address at the same time could cause one of the threads to
|
||||
// think it already unguarded, and abort the VM. Likely very rare.
|
||||
//
|
||||
// The other race involves two threads alternately trapping at
|
||||
// different addresses and failing to unguard the page, resulting in
|
||||
// an endless loop. This condition is probably even more unlikely than
|
||||
// the first.
|
||||
//
|
||||
// Although both cases could be avoided by using locks or thread local
|
||||
// last_addr, these solutions are unnecessary complication: this
|
||||
// handler is a best-effort safety net, not a complete solution. It is
|
||||
// disabled by default and should only be used as a workaround in case
|
||||
// we missed any no-execute-unsafe VM code.
|
||||
|
||||
last_addr = addr;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif // !AMD64
|
||||
|
||||
if (stub != nullptr) {
|
||||
// save all thread context in case we need to restore it
|
||||
if (thread != nullptr) thread->set_saved_exception_pc(pc);
|
||||
@ -646,10 +468,6 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
|
||||
extern "C" void fixcw();
|
||||
|
||||
void os::Bsd::init_thread_fpu_state(void) {
|
||||
#ifndef AMD64
|
||||
// Set fpu to 53 bit precision. This happens too early to use a stub.
|
||||
fixcw();
|
||||
#endif // !AMD64
|
||||
}
|
||||
|
||||
juint os::cpu_microcode_revision() {
|
||||
@ -671,26 +489,12 @@ juint os::cpu_microcode_revision() {
|
||||
// HotSpot guard pages is added later.
|
||||
size_t os::_compiler_thread_min_stack_allowed = 48 * K;
|
||||
size_t os::_java_thread_min_stack_allowed = 48 * K;
|
||||
#ifdef _LP64
|
||||
size_t os::_vm_internal_thread_min_stack_allowed = 64 * K;
|
||||
#else
|
||||
size_t os::_vm_internal_thread_min_stack_allowed = (48 DEBUG_ONLY(+ 4)) * K;
|
||||
#endif // _LP64
|
||||
|
||||
#ifndef AMD64
|
||||
#ifdef __GNUC__
|
||||
#define GET_GS() ({int gs; __asm__ volatile("movw %%gs, %w0":"=q"(gs)); gs&0xffff;})
|
||||
#endif
|
||||
#endif // AMD64
|
||||
|
||||
// return default stack size for thr_type
|
||||
size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
|
||||
// default stack size (compiler thread needs larger stack)
|
||||
#ifdef AMD64
|
||||
size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
|
||||
#else
|
||||
size_t s = (thr_type == os::compiler_thread ? 2 * M : 512 * K);
|
||||
#endif // AMD64
|
||||
return s;
|
||||
}
|
||||
|
||||
@ -803,7 +607,6 @@ void os::print_context(outputStream *st, const void *context) {
|
||||
const ucontext_t *uc = (const ucontext_t*)context;
|
||||
|
||||
st->print_cr("Registers:");
|
||||
#ifdef AMD64
|
||||
st->print( "RAX=" INTPTR_FORMAT, (intptr_t)uc->context_rax);
|
||||
st->print(", RBX=" INTPTR_FORMAT, (intptr_t)uc->context_rbx);
|
||||
st->print(", RCX=" INTPTR_FORMAT, (intptr_t)uc->context_rcx);
|
||||
@ -829,26 +632,12 @@ void os::print_context(outputStream *st, const void *context) {
|
||||
st->print(", ERR=" INTPTR_FORMAT, (intptr_t)uc->context_err);
|
||||
st->cr();
|
||||
st->print(" TRAPNO=" INTPTR_FORMAT, (intptr_t)uc->context_trapno);
|
||||
#else
|
||||
st->print( "EAX=" INTPTR_FORMAT, (intptr_t)uc->context_eax);
|
||||
st->print(", EBX=" INTPTR_FORMAT, (intptr_t)uc->context_ebx);
|
||||
st->print(", ECX=" INTPTR_FORMAT, (intptr_t)uc->context_ecx);
|
||||
st->print(", EDX=" INTPTR_FORMAT, (intptr_t)uc->context_edx);
|
||||
st->cr();
|
||||
st->print( "ESP=" INTPTR_FORMAT, (intptr_t)uc->context_esp);
|
||||
st->print(", EBP=" INTPTR_FORMAT, (intptr_t)uc->context_ebp);
|
||||
st->print(", ESI=" INTPTR_FORMAT, (intptr_t)uc->context_esi);
|
||||
st->print(", EDI=" INTPTR_FORMAT, (intptr_t)uc->context_edi);
|
||||
st->cr();
|
||||
st->print( "EIP=" INTPTR_FORMAT, (intptr_t)uc->context_eip);
|
||||
st->print(", EFLAGS=" INTPTR_FORMAT, (intptr_t)uc->context_eflags);
|
||||
#endif // AMD64
|
||||
st->cr();
|
||||
st->cr();
|
||||
}
|
||||
|
||||
void os::print_register_info(outputStream *st, const void *context, int& continuation) {
|
||||
const int register_count = AMD64_ONLY(16) NOT_AMD64(8);
|
||||
const int register_count = 16;
|
||||
int n = continuation;
|
||||
assert(n >= 0 && n <= register_count, "Invalid continuation value");
|
||||
if (context == nullptr || n == register_count) {
|
||||
@ -861,7 +650,6 @@ void os::print_register_info(outputStream *st, const void *context, int& continu
|
||||
continuation = n + 1;
|
||||
# define CASE_PRINT_REG(n, str, id) case n: st->print(str); print_location(st, uc->context_##id);
|
||||
switch (n) {
|
||||
#ifdef AMD64
|
||||
CASE_PRINT_REG( 0, "RAX=", rax); break;
|
||||
CASE_PRINT_REG( 1, "RBX=", rbx); break;
|
||||
CASE_PRINT_REG( 2, "RCX=", rcx); break;
|
||||
@ -878,28 +666,13 @@ void os::print_register_info(outputStream *st, const void *context, int& continu
|
||||
CASE_PRINT_REG(13, "R13=", r13); break;
|
||||
CASE_PRINT_REG(14, "R14=", r14); break;
|
||||
CASE_PRINT_REG(15, "R15=", r15); break;
|
||||
#else
|
||||
CASE_PRINT_REG(0, "EAX=", eax); break;
|
||||
CASE_PRINT_REG(1, "EBX=", ebx); break;
|
||||
CASE_PRINT_REG(2, "ECX=", ecx); break;
|
||||
CASE_PRINT_REG(3, "EDX=", edx); break;
|
||||
CASE_PRINT_REG(4, "ESP=", esp); break;
|
||||
CASE_PRINT_REG(5, "EBP=", ebp); break;
|
||||
CASE_PRINT_REG(6, "ESI=", esi); break;
|
||||
CASE_PRINT_REG(7, "EDI=", edi); break;
|
||||
#endif // AMD64
|
||||
}
|
||||
}
|
||||
# undef CASE_PRINT_REG
|
||||
++n;
|
||||
}
|
||||
}
|
||||
|
||||
void os::setup_fpu() {
|
||||
#ifndef AMD64
|
||||
address fpu_cntrl = StubRoutines::addr_fpu_cntrl_wrd_std();
|
||||
__asm__ volatile ( "fldcw (%0)" :
|
||||
: "r" (fpu_cntrl) : "memory");
|
||||
#endif // !AMD64
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -39,18 +39,11 @@ inline size_t os::cds_core_region_alignment() {
|
||||
|
||||
// See http://www.technovelty.org/code/c/reading-rdtsc.htl for details
|
||||
inline jlong os::rdtsc() {
|
||||
#ifndef AMD64
|
||||
// 64 bit result in edx:eax
|
||||
uint64_t res;
|
||||
__asm__ __volatile__ ("rdtsc" : "=A" (res));
|
||||
return (jlong)res;
|
||||
#else
|
||||
uint64_t res;
|
||||
uint32_t ts1, ts2;
|
||||
__asm__ __volatile__ ("rdtsc" : "=a" (ts1), "=d" (ts2));
|
||||
res = ((uint64_t)ts1 | (uint64_t)ts2 << 32);
|
||||
return (jlong)res;
|
||||
#endif // AMD64
|
||||
}
|
||||
|
||||
#endif // OS_CPU_BSD_X86_OS_BSD_X86_INLINE_HPP
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,19 +29,13 @@
|
||||
|
||||
|
||||
inline void Prefetch::read (const void *loc, intx interval) {
|
||||
#ifdef AMD64
|
||||
__asm__ ("prefetcht0 (%0,%1,1)" : : "r" (loc), "r" (interval));
|
||||
#endif // AMD64
|
||||
}
|
||||
|
||||
inline void Prefetch::write(void *loc, intx interval) {
|
||||
#ifdef AMD64
|
||||
|
||||
// Do not use the 3dnow prefetchw instruction. It isn't supported on em64t.
|
||||
// __asm__ ("prefetchw (%0,%1,1)" : : "r" (loc), "r" (interval));
|
||||
__asm__ ("prefetcht0 (%0,%1,1)" : : "r" (loc), "r" (interval));
|
||||
|
||||
#endif // AMD64
|
||||
}
|
||||
|
||||
#endif // OS_CPU_BSD_X86_PREFETCH_BSD_X86_INLINE_HPP
|
||||
|
||||
@ -93,8 +93,6 @@ inline T AtomicAccess::PlatformCmpxchg<4>::operator()(T volatile* dest,
|
||||
return exchange_value;
|
||||
}
|
||||
|
||||
#ifdef AMD64
|
||||
|
||||
template<>
|
||||
template<typename D, typename I>
|
||||
inline D AtomicAccess::PlatformAdd<8>::fetch_then_add(D volatile* dest, I add_value,
|
||||
@ -135,51 +133,6 @@ inline T AtomicAccess::PlatformCmpxchg<8>::operator()(T volatile* dest,
|
||||
return exchange_value;
|
||||
}
|
||||
|
||||
#else // !AMD64
|
||||
|
||||
extern "C" {
|
||||
// defined in linux_x86.s
|
||||
int64_t _Atomic_cmpxchg_long(int64_t, volatile int64_t*, int64_t);
|
||||
void _Atomic_move_long(const volatile int64_t* src, volatile int64_t* dst);
|
||||
}
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T AtomicAccess::PlatformCmpxchg<8>::operator()(T volatile* dest,
|
||||
T compare_value,
|
||||
T exchange_value,
|
||||
atomic_memory_order order) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, dest, compare_value, exchange_value);
|
||||
}
|
||||
|
||||
// No direct support for 8-byte xchg; emulate using cmpxchg.
|
||||
template<>
|
||||
struct AtomicAccess::PlatformXchg<8> : AtomicAccess::XchgUsingCmpxchg<8> {};
|
||||
|
||||
// No direct support for 8-byte add; emulate using cmpxchg.
|
||||
template<>
|
||||
struct AtomicAccess::PlatformAdd<8> : AtomicAccess::AddUsingCmpxchg<8> {};
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline T AtomicAccess::PlatformLoad<8>::operator()(T const volatile* src) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
volatile int64_t dest;
|
||||
_Atomic_move_long(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));
|
||||
return PrimitiveConversions::cast<T>(dest);
|
||||
}
|
||||
|
||||
template<>
|
||||
template<typename T>
|
||||
inline void AtomicAccess::PlatformStore<8>::operator()(T volatile* dest,
|
||||
T store_value) const {
|
||||
STATIC_ASSERT(8 == sizeof(T));
|
||||
_Atomic_move_long(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
|
||||
}
|
||||
|
||||
#endif // AMD64
|
||||
|
||||
template<>
|
||||
struct AtomicAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
|
||||
{
|
||||
@ -216,7 +169,6 @@ struct AtomicAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
|
||||
}
|
||||
};
|
||||
|
||||
#ifdef AMD64
|
||||
template<>
|
||||
struct AtomicAccess::PlatformOrderedStore<8, RELEASE_X_FENCE>
|
||||
{
|
||||
@ -228,6 +180,5 @@ struct AtomicAccess::PlatformOrderedStore<8, RELEASE_X_FENCE>
|
||||
: "memory");
|
||||
}
|
||||
};
|
||||
#endif // AMD64
|
||||
|
||||
#endif // OS_CPU_LINUX_X86_ATOMICACCESS_LINUX_X86_HPP
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,24 +28,9 @@
|
||||
// Sets the default values for platform dependent flags used by the runtime system.
|
||||
// (see globals.hpp)
|
||||
|
||||
#ifdef AMD64
|
||||
define_pd_global(intx, CompilerThreadStackSize, 1024);
|
||||
define_pd_global(intx, ThreadStackSize, 1024); // 0 => use system default
|
||||
define_pd_global(intx, VMThreadStackSize, 1024);
|
||||
#else
|
||||
// Some tests in debug VM mode run out of compile thread stack.
|
||||
// Observed on some x86_32 VarHandles tests during escape analysis.
|
||||
#ifdef ASSERT
|
||||
define_pd_global(intx, CompilerThreadStackSize, 768);
|
||||
#else
|
||||
define_pd_global(intx, CompilerThreadStackSize, 512);
|
||||
#endif
|
||||
// ThreadStackSize 320 allows a couple of test cases to run while
|
||||
// keeping the number of threads that can be created high. System
|
||||
// default ThreadStackSize appears to be 512 which is too big.
|
||||
define_pd_global(intx, ThreadStackSize, 320);
|
||||
define_pd_global(intx, VMThreadStackSize, 512);
|
||||
#endif // AMD64
|
||||
|
||||
define_pd_global(size_t, JVMInvokeMethodSlack, 8192);
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -46,12 +46,8 @@ inline void OrderAccess::acquire() { compiler_barrier(); }
|
||||
inline void OrderAccess::release() { compiler_barrier(); }
|
||||
|
||||
inline void OrderAccess::fence() {
|
||||
// always use locked addl since mfence is sometimes expensive
|
||||
#ifdef AMD64
|
||||
// always use locked addl since mfence is sometimes expensive
|
||||
__asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
|
||||
#else
|
||||
__asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
|
||||
#endif
|
||||
compiler_barrier();
|
||||
}
|
||||
|
||||
@ -60,13 +56,7 @@ inline void OrderAccess::cross_modify_fence_impl() {
|
||||
__asm__ volatile (".byte 0x0f, 0x01, 0xe8\n\t" : : :); //serialize
|
||||
} else {
|
||||
int idx = 0;
|
||||
#ifdef AMD64
|
||||
__asm__ volatile ("cpuid " : "+a" (idx) : : "ebx", "ecx", "edx", "memory");
|
||||
#else
|
||||
// On some x86 systems EBX is a reserved register that cannot be
|
||||
// clobbered, so we must protect it around the CPUID.
|
||||
__asm__ volatile ("xchg %%esi, %%ebx; cpuid; xchg %%esi, %%ebx " : "+a" (idx) : : "esi", "ecx", "edx", "memory");
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -72,24 +72,13 @@
|
||||
# include <pwd.h>
|
||||
# include <poll.h>
|
||||
# include <ucontext.h>
|
||||
#ifndef AMD64
|
||||
# include <fpu_control.h>
|
||||
#endif
|
||||
|
||||
#ifdef AMD64
|
||||
#define REG_SP REG_RSP
|
||||
#define REG_PC REG_RIP
|
||||
#define REG_FP REG_RBP
|
||||
#define REG_BCP REG_R13
|
||||
#define SPELL_REG_SP "rsp"
|
||||
#define SPELL_REG_FP "rbp"
|
||||
#else
|
||||
#define REG_SP REG_UESP
|
||||
#define REG_PC REG_EIP
|
||||
#define REG_FP REG_EBP
|
||||
#define SPELL_REG_SP "esp"
|
||||
#define SPELL_REG_FP "ebp"
|
||||
#endif // AMD64
|
||||
|
||||
address os::current_stack_pointer() {
|
||||
return (address)__builtin_frame_address(0);
|
||||
@ -281,43 +270,14 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
|
||||
}
|
||||
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
|
||||
}
|
||||
} else
|
||||
#ifdef AMD64
|
||||
if (sig == SIGFPE &&
|
||||
(info->si_code == FPE_INTDIV || info->si_code == FPE_FLTDIV)) {
|
||||
} else if (sig == SIGFPE &&
|
||||
(info->si_code == FPE_INTDIV || info->si_code == FPE_FLTDIV)) {
|
||||
stub =
|
||||
SharedRuntime::
|
||||
continuation_for_implicit_exception(thread,
|
||||
pc,
|
||||
SharedRuntime::
|
||||
IMPLICIT_DIVIDE_BY_ZERO);
|
||||
#else
|
||||
if (sig == SIGFPE /* && info->si_code == FPE_INTDIV */) {
|
||||
// HACK: si_code does not work on linux 2.2.12-20!!!
|
||||
int op = pc[0];
|
||||
if (op == 0xDB) {
|
||||
// FIST
|
||||
// TODO: The encoding of D2I in x86_32.ad can cause an exception
|
||||
// prior to the fist instruction if there was an invalid operation
|
||||
// pending. We want to dismiss that exception. From the win_32
|
||||
// side it also seems that if it really was the fist causing
|
||||
// the exception that we do the d2i by hand with different
|
||||
// rounding. Seems kind of weird.
|
||||
// NOTE: that we take the exception at the NEXT floating point instruction.
|
||||
assert(pc[0] == 0xDB, "not a FIST opcode");
|
||||
assert(pc[1] == 0x14, "not a FIST opcode");
|
||||
assert(pc[2] == 0x24, "not a FIST opcode");
|
||||
return true;
|
||||
} else if (op == 0xF7) {
|
||||
// IDIV
|
||||
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO);
|
||||
} else {
|
||||
// TODO: handle more cases if we are using other x86 instructions
|
||||
// that can generate SIGFPE signal on linux.
|
||||
tty->print_cr("unknown opcode 0x%X with SIGFPE.", op);
|
||||
fatal("please update this code.");
|
||||
}
|
||||
#endif // AMD64
|
||||
} else if (sig == SIGSEGV &&
|
||||
MacroAssembler::uses_implicit_null_check(info->si_addr)) {
|
||||
// Determination of interpreter/vtable stub/compiled code null exception
|
||||
@ -344,81 +304,6 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef AMD64
|
||||
// Execution protection violation
|
||||
//
|
||||
// This should be kept as the last step in the triage. We don't
|
||||
// have a dedicated trap number for a no-execute fault, so be
|
||||
// conservative and allow other handlers the first shot.
|
||||
//
|
||||
// Note: We don't test that info->si_code == SEGV_ACCERR here.
|
||||
// this si_code is so generic that it is almost meaningless; and
|
||||
// the si_code for this condition may change in the future.
|
||||
// Furthermore, a false-positive should be harmless.
|
||||
if (UnguardOnExecutionViolation > 0 &&
|
||||
stub == nullptr &&
|
||||
(sig == SIGSEGV || sig == SIGBUS) &&
|
||||
uc->uc_mcontext.gregs[REG_TRAPNO] == trap_page_fault) {
|
||||
size_t page_size = os::vm_page_size();
|
||||
address addr = (address) info->si_addr;
|
||||
address pc = os::Posix::ucontext_get_pc(uc);
|
||||
// Make sure the pc and the faulting address are sane.
|
||||
//
|
||||
// If an instruction spans a page boundary, and the page containing
|
||||
// the beginning of the instruction is executable but the following
|
||||
// page is not, the pc and the faulting address might be slightly
|
||||
// different - we still want to unguard the 2nd page in this case.
|
||||
//
|
||||
// 15 bytes seems to be a (very) safe value for max instruction size.
|
||||
bool pc_is_near_addr =
|
||||
(pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
|
||||
bool instr_spans_page_boundary =
|
||||
(align_down((intptr_t) pc ^ (intptr_t) addr,
|
||||
(intptr_t) page_size) > 0);
|
||||
|
||||
if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
|
||||
static volatile address last_addr =
|
||||
(address) os::non_memory_address_word();
|
||||
|
||||
// In conservative mode, don't unguard unless the address is in the VM
|
||||
if (addr != last_addr &&
|
||||
(UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
|
||||
|
||||
// Set memory to RWX and retry
|
||||
address page_start = align_down(addr, page_size);
|
||||
bool res = os::protect_memory((char*) page_start, page_size,
|
||||
os::MEM_PROT_RWX);
|
||||
|
||||
log_debug(os)("Execution protection violation "
|
||||
"at " INTPTR_FORMAT
|
||||
", unguarding " INTPTR_FORMAT ": %s, errno=%d", p2i(addr),
|
||||
p2i(page_start), (res ? "success" : "failed"), errno);
|
||||
stub = pc;
|
||||
|
||||
// Set last_addr so if we fault again at the same address, we don't end
|
||||
// up in an endless loop.
|
||||
//
|
||||
// There are two potential complications here. Two threads trapping at
|
||||
// the same address at the same time could cause one of the threads to
|
||||
// think it already unguarded, and abort the VM. Likely very rare.
|
||||
//
|
||||
// The other race involves two threads alternately trapping at
|
||||
// different addresses and failing to unguard the page, resulting in
|
||||
// an endless loop. This condition is probably even more unlikely than
|
||||
// the first.
|
||||
//
|
||||
// Although both cases could be avoided by using locks or thread local
|
||||
// last_addr, these solutions are unnecessary complication: this
|
||||
// handler is a best-effort safety net, not a complete solution. It is
|
||||
// disabled by default and should only be used as a workaround in case
|
||||
// we missed any no-execute-unsafe VM code.
|
||||
|
||||
last_addr = addr;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif // !AMD64
|
||||
|
||||
if (stub != nullptr) {
|
||||
// save all thread context in case we need to restore it
|
||||
if (thread != nullptr) thread->set_saved_exception_pc(pc);
|
||||
@ -431,26 +316,13 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
|
||||
}
|
||||
|
||||
void os::Linux::init_thread_fpu_state(void) {
|
||||
#ifndef AMD64
|
||||
// set fpu to 53 bit precision
|
||||
set_fpu_control_word(0x27f);
|
||||
#endif // !AMD64
|
||||
}
|
||||
|
||||
int os::Linux::get_fpu_control_word(void) {
|
||||
#ifdef AMD64
|
||||
return 0;
|
||||
#else
|
||||
int fpu_control;
|
||||
_FPU_GETCW(fpu_control);
|
||||
return fpu_control & 0xffff;
|
||||
#endif // AMD64
|
||||
}
|
||||
|
||||
void os::Linux::set_fpu_control_word(int fpu_control) {
|
||||
#ifndef AMD64
|
||||
_FPU_SETCW(fpu_control);
|
||||
#endif // !AMD64
|
||||
}
|
||||
|
||||
juint os::cpu_microcode_revision() {
|
||||
@ -496,20 +368,12 @@ juint os::cpu_microcode_revision() {
|
||||
// HotSpot guard pages is added later.
|
||||
size_t os::_compiler_thread_min_stack_allowed = 48 * K;
|
||||
size_t os::_java_thread_min_stack_allowed = 40 * K;
|
||||
#ifdef _LP64
|
||||
size_t os::_vm_internal_thread_min_stack_allowed = 64 * K;
|
||||
#else
|
||||
size_t os::_vm_internal_thread_min_stack_allowed = (48 DEBUG_ONLY(+ 4)) * K;
|
||||
#endif // _LP64
|
||||
|
||||
// return default stack size for thr_type
|
||||
size_t os::Posix::default_stack_size(os::ThreadType thr_type) {
|
||||
// default stack size (compiler thread needs larger stack)
|
||||
#ifdef AMD64
|
||||
size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
|
||||
#else
|
||||
size_t s = (thr_type == os::compiler_thread ? 2 * M : 512 * K);
|
||||
#endif // AMD64
|
||||
return s;
|
||||
}
|
||||
|
||||
@ -522,7 +386,6 @@ void os::print_context(outputStream *st, const void *context) {
|
||||
const ucontext_t *uc = (const ucontext_t*)context;
|
||||
|
||||
st->print_cr("Registers:");
|
||||
#ifdef AMD64
|
||||
st->print( "RAX=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_RAX]);
|
||||
st->print(", RBX=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_RBX]);
|
||||
st->print(", RCX=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_RCX]);
|
||||
@ -564,27 +427,12 @@ void os::print_context(outputStream *st, const void *context) {
|
||||
}
|
||||
st->print(" MXCSR=" UINT32_FORMAT_X_0, uc->uc_mcontext.fpregs->mxcsr);
|
||||
}
|
||||
#else
|
||||
st->print( "EAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EAX]);
|
||||
st->print(", EBX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EBX]);
|
||||
st->print(", ECX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_ECX]);
|
||||
st->print(", EDX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EDX]);
|
||||
st->cr();
|
||||
st->print( "ESP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_UESP]);
|
||||
st->print(", EBP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EBP]);
|
||||
st->print(", ESI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_ESI]);
|
||||
st->print(", EDI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EDI]);
|
||||
st->cr();
|
||||
st->print( "EIP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EIP]);
|
||||
st->print(", EFLAGS=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EFL]);
|
||||
st->print(", CR2=" UINT64_FORMAT_X_0, (uint64_t)uc->uc_mcontext.cr2);
|
||||
#endif // AMD64
|
||||
st->cr();
|
||||
st->cr();
|
||||
}
|
||||
|
||||
void os::print_register_info(outputStream *st, const void *context, int& continuation) {
|
||||
const int register_count = AMD64_ONLY(16) NOT_AMD64(8);
|
||||
const int register_count = 16;
|
||||
int n = continuation;
|
||||
assert(n >= 0 && n <= register_count, "Invalid continuation value");
|
||||
if (context == nullptr || n == register_count) {
|
||||
@ -597,7 +445,6 @@ void os::print_register_info(outputStream *st, const void *context, int& continu
|
||||
continuation = n + 1;
|
||||
# define CASE_PRINT_REG(n, str, id) case n: st->print(str); print_location(st, uc->uc_mcontext.gregs[REG_##id]);
|
||||
switch (n) {
|
||||
#ifdef AMD64
|
||||
CASE_PRINT_REG( 0, "RAX=", RAX); break;
|
||||
CASE_PRINT_REG( 1, "RBX=", RBX); break;
|
||||
CASE_PRINT_REG( 2, "RCX=", RCX); break;
|
||||
@ -614,16 +461,6 @@ void os::print_register_info(outputStream *st, const void *context, int& continu
|
||||
CASE_PRINT_REG(13, "R13=", R13); break;
|
||||
CASE_PRINT_REG(14, "R14=", R14); break;
|
||||
CASE_PRINT_REG(15, "R15=", R15); break;
|
||||
#else
|
||||
CASE_PRINT_REG(0, "EAX=", EAX); break;
|
||||
CASE_PRINT_REG(1, "EBX=", EBX); break;
|
||||
CASE_PRINT_REG(2, "ECX=", ECX); break;
|
||||
CASE_PRINT_REG(3, "EDX=", EDX); break;
|
||||
CASE_PRINT_REG(4, "ESP=", ESP); break;
|
||||
CASE_PRINT_REG(5, "EBP=", EBP); break;
|
||||
CASE_PRINT_REG(6, "ESI=", ESI); break;
|
||||
CASE_PRINT_REG(7, "EDI=", EDI); break;
|
||||
#endif // AMD64
|
||||
}
|
||||
# undef CASE_PRINT_REG
|
||||
++n;
|
||||
@ -631,18 +468,11 @@ void os::print_register_info(outputStream *st, const void *context, int& continu
|
||||
}
|
||||
|
||||
void os::setup_fpu() {
|
||||
#ifndef AMD64
|
||||
address fpu_cntrl = StubRoutines::x86::addr_fpu_cntrl_wrd_std();
|
||||
__asm__ volatile ( "fldcw (%0)" :
|
||||
: "r" (fpu_cntrl) : "memory");
|
||||
#endif // !AMD64
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void os::verify_stack_alignment() {
|
||||
#ifdef AMD64
|
||||
assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment");
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,18 +29,11 @@
|
||||
|
||||
// See http://www.technovelty.org/code/c/reading-rdtsc.htl for details
|
||||
inline jlong os::rdtsc() {
|
||||
#ifndef AMD64
|
||||
// 64 bit result in edx:eax
|
||||
uint64_t res;
|
||||
__asm__ __volatile__ ("rdtsc" : "=A" (res));
|
||||
return (jlong)res;
|
||||
#else
|
||||
uint64_t res;
|
||||
uint32_t ts1, ts2;
|
||||
__asm__ __volatile__ ("rdtsc" : "=a" (ts1), "=d" (ts2));
|
||||
res = ((uint64_t)ts1 | (uint64_t)ts2 << 32);
|
||||
return (jlong)res;
|
||||
#endif // AMD64
|
||||
}
|
||||
|
||||
#endif // OS_CPU_LINUX_X86_OS_LINUX_X86_INLINE_HPP
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,19 +29,13 @@
|
||||
|
||||
|
||||
inline void Prefetch::read (const void *loc, intx interval) {
|
||||
#ifdef AMD64
|
||||
__asm__ ("prefetcht0 (%0,%1,1)" : : "r" (loc), "r" (interval));
|
||||
#endif // AMD64
|
||||
}
|
||||
|
||||
inline void Prefetch::write(void *loc, intx interval) {
|
||||
#ifdef AMD64
|
||||
|
||||
// Do not use the 3dnow prefetchw instruction. It isn't supported on em64t.
|
||||
// __asm__ ("prefetchw (%0,%1,1)" : : "r" (loc), "r" (interval));
|
||||
__asm__ ("prefetcht0 (%0,%1,1)" : : "r" (loc), "r" (interval));
|
||||
|
||||
#endif // AMD64
|
||||
}
|
||||
|
||||
#endif // OS_CPU_LINUX_X86_PREFETCH_LINUX_X86_INLINE_HPP
|
||||
|
||||
@ -162,7 +162,6 @@ bool os::win32::register_code_area(char *low, char *high) {
|
||||
return true;
|
||||
}
|
||||
|
||||
#if defined(_M_AMD64)
|
||||
//-----------------------------------------------------------------------------
|
||||
bool handle_FLT_exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
|
||||
// handle exception caused by native method modifying control word
|
||||
@ -197,7 +196,6 @@ bool handle_FLT_exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
|
||||
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
address os::fetch_frame_from_context(const void* ucVoid,
|
||||
intptr_t** ret_sp, intptr_t** ret_fp) {
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user