Merge branch 'master' into 8288966-plab-boost

This commit is contained in:
Thomas Schatzl 2022-08-08 15:10:04 +02:00
commit ce2701cbc5
309 changed files with 4782 additions and 2177 deletions

View File

@ -0,0 +1 @@
0x25 U+0025

View File

@ -81,6 +81,7 @@ JVM_GetClassDeclaredConstructors
JVM_GetClassDeclaredFields
JVM_GetClassDeclaredMethods
JVM_GetClassFieldsCount
JVM_GetClassFileVersion
JVM_GetClassInterfaces
JVM_GetClassMethodsCount
JVM_GetClassModifiers

View File

@ -172,8 +172,10 @@ ifeq ($(call isTargetOs, macosx), true)
endif
ifeq ($(call isTargetOs, windows), true)
# Supply the name of the C runtime lib.
LIBJLI_CFLAGS += -DMSVCR_DLL_NAME='"$(notdir $(MSVCR_DLL))"'
# Supply the name of the C runtime libs.
ifneq ($(MSVCR_DLL), )
LIBJLI_CFLAGS += -DMSVCR_DLL_NAME='"$(notdir $(MSVCR_DLL))"'
endif
ifneq ($(VCRUNTIME_1_DLL), )
LIBJLI_CFLAGS += -DVCRUNTIME_1_DLL_NAME='"$(notdir $(VCRUNTIME_1_DLL))"'
endif

View File

@ -336,19 +336,13 @@ inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
// Compiled frames
inline oop frame::saved_oop_result(RegisterMap* map) const {
PRAGMA_DIAG_PUSH
PRAGMA_NONNULL_IGNORED
oop* result_adr = (oop *)map->location(r0->as_VMReg(), sp());
PRAGMA_DIAG_POP
guarantee(result_adr != NULL, "bad register save location");
return *result_adr;
}
inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) {
PRAGMA_DIAG_PUSH
PRAGMA_NONNULL_IGNORED
oop* result_adr = (oop *)map->location(r0->as_VMReg(), sp());
PRAGMA_DIAG_POP
guarantee(result_adr != NULL, "bad register save location");
*result_adr = obj;

View File

@ -28,13 +28,11 @@
#include "runtime/arguments.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/java.hpp"
#include "runtime/os.hpp"
#include "runtime/os.inline.hpp"
#include "runtime/vm_version.hpp"
#include "utilities/formatBuffer.hpp"
#include "utilities/macros.hpp"
#include OS_HEADER_INLINE(os)
int VM_Version::_cpu;
int VM_Version::_model;
int VM_Version::_model2;

View File

@ -196,6 +196,10 @@ inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
// Compiled frames
// Register is a class, but it would be assigned numerical value.
// "0" is assigned for rax. Thus we need to ignore -Wnonnull.
PRAGMA_DIAG_PUSH
PRAGMA_NONNULL_IGNORED
inline oop frame::saved_oop_result(RegisterMap* map) const {
oop* result_adr = (oop*) map->location(R0->as_VMReg(), nullptr);
guarantee(result_adr != NULL, "bad register save location");
@ -207,6 +211,7 @@ inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) {
guarantee(result_adr != NULL, "bad register save location");
*result_adr = obj;
}
PRAGMA_DIAG_POP
inline int frame::frame_size() const {
return sender_sp() - sp();

View File

@ -38,6 +38,7 @@
#include "oops/compressedOops.hpp"
#include "oops/objArrayKlass.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/os.inline.hpp"
#include "runtime/safepointMechanism.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"

View File

@ -34,6 +34,7 @@
#include "gc/shared/barrierSetAssembler.hpp"
#include "oops/accessDecorators.hpp"
#include "oops/compressedOops.hpp"
#include "runtime/os.inline.hpp"
#include "runtime/safepointMechanism.hpp"
#include "runtime/vm_version.hpp"
#include "utilities/powerOfTwo.hpp"

View File

@ -38,6 +38,7 @@
#include "oops/klass.inline.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/jniHandles.hpp"
#include "runtime/os.inline.hpp"
#include "runtime/safepointMechanism.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/signature.hpp"

View File

@ -41,6 +41,7 @@
#include <sys/sysinfo.h>
#if defined(_AIX)
#include "os_aix.hpp"
#include <libperfstat.h>
#endif

View File

@ -311,13 +311,6 @@ void Assembler::movptr(Register Rd, address addr) {
addi(Rd, Rd, offset);
}
void Assembler::ifence() {
fence_i();
if (UseConservativeFence) {
fence(ir, ir);
}
}
#define INSN(NAME, NEG_INSN) \
void Assembler::NAME(Register Rs, Register Rt, const address &dest) { \
NEG_INSN(Rt, Rs, dest); \

View File

@ -339,7 +339,6 @@ public:
void movptr(Register Rd, address addr);
void movptr_with_offset(Register Rd, address addr, int32_t &offset);
void movptr(Register Rd, uintptr_t imm64);
void ifence();
void j(const address &dest, Register temp = t0);
void j(const Address &adr, Register temp = t0);
void j(Label &l, Register temp = t0);
@ -961,7 +960,6 @@ public:
emit(insn); \
}
INSN(fence_i, 0b0001111, 0b001, 0b000000000000);
INSN(ecall, 0b1110011, 0b000, 0b000000000000);
INSN(_ebreak, 0b1110011, 0b000, 0b000000000001);

View File

@ -69,8 +69,8 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
#undef __
int CompiledStaticCall::to_interp_stub_size() {
// fence_i + fence* + (lui, addi, slli, addi, slli, addi) + (lui, addi, slli, addi, slli) + jalr
return NativeFenceI::instruction_size() + 12 * NativeInstruction::instruction_size;
// (lui, addi, slli, addi, slli, addi) + (lui, addi, slli, addi, slli) + jalr
return 12 * NativeInstruction::instruction_size;
}
int CompiledStaticCall::to_trampoline_stub_size() {
@ -98,7 +98,7 @@ void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, ad
// Creation also verifies the object.
NativeMovConstReg* method_holder
= nativeMovConstReg_at(stub + NativeFenceI::instruction_size());
= nativeMovConstReg_at(stub);
#ifdef ASSERT
NativeGeneralJump* jump = nativeGeneralJump_at(method_holder->next_instruction_address());
@ -119,7 +119,7 @@ void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_
assert(CompiledICLocker::is_safe(stub), "mt unsafe call");
// Creation also verifies the object.
NativeMovConstReg* method_holder
= nativeMovConstReg_at(stub + NativeFenceI::instruction_size());
= nativeMovConstReg_at(stub);
method_holder->set_data(0);
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
jump->set_jump_destination((address)-1);
@ -139,7 +139,7 @@ void CompiledDirectStaticCall::verify() {
assert(stub != NULL, "no stub found for static call");
// Creation also verifies the object.
NativeMovConstReg* method_holder
= nativeMovConstReg_at(stub + NativeFenceI::instruction_size());
= nativeMovConstReg_at(stub);
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
// Verify state.

View File

@ -275,13 +275,11 @@ inline intptr_t* frame::interpreter_frame_expression_stack() const {
// Entry frames
inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
return (JavaCallWrapper**)addr_at(entry_frame_call_wrapper_offset);
return (JavaCallWrapper**)addr_at(entry_frame_call_wrapper_offset);
}
// Compiled frames
PRAGMA_DIAG_PUSH
PRAGMA_NONNULL_IGNORED
inline oop frame::saved_oop_result(RegisterMap* map) const {
oop* result_adr = (oop *)map->location(x10->as_VMReg(), nullptr);
guarantee(result_adr != NULL, "bad register save location");
@ -293,7 +291,6 @@ inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) {
guarantee(result_adr != NULL, "bad register save location");
*result_adr = obj;
}
PRAGMA_DIAG_POP
inline const ImmutableOopMap* frame::get_oop_map() const {
if (_cb == NULL) return NULL;

View File

@ -88,8 +88,7 @@ define_pd_global(intx, InlineSmallCode, 1000);
product(bool, TraceTraps, false, "Trace all traps the signal handler") \
/* For now we're going to be safe and add the I/O bits to userspace fences. */ \
product(bool, UseConservativeFence, true, \
"Extend i for r and o for w in the pred/succ flags of fence;" \
"Extend fence.i to fence.i + fence.") \
"Extend i for r and o for w in the pred/succ flags of fence") \
product(bool, AvoidUnalignedAccesses, true, \
"Avoid generating unaligned memory accesses") \
product(bool, UseRVV, false, EXPERIMENTAL, "Use RVV instructions") \

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -30,7 +30,15 @@
#define __ _masm->
static int icache_flush(address addr, int lines, int magic) {
os::icache_flush((long int) addr, (long int) (addr + (lines << ICache::log2_line_size)));
// To make a store to instruction memory visible to all RISC-V harts,
// the writing hart has to execute a data FENCE before requesting that
// all remote RISC-V harts execute a FENCE.I.
//
// No sush assurance is defined at the interface level of the builtin
// method, and so we should make sure it works.
__asm__ volatile("fence rw, rw" : : : "memory");
__builtin___clear_cache(addr, addr + (lines << ICache::log2_line_size));
return magic;
}

View File

@ -555,7 +555,6 @@ void MacroAssembler::emit_static_call_stub() {
// CompiledDirectStaticCall::set_to_interpreted knows the
// exact layout of this stub.
ifence();
mov_metadata(xmethod, (Metadata*)NULL);
// Jump to the entry point of the i2c stub.
@ -1668,7 +1667,6 @@ void MacroAssembler::movoop(Register dst, jobject obj, bool immediate) {
// nmethod entry barrier necessitate using the constant pool. They have to be
// ordered with respected to oop access.
// Using immediate literals would necessitate fence.i.
if (BarrierSet::barrier_set()->barrier_set_nmethod() != NULL || !immediate) {
address dummy = address(uintptr_t(pc()) & -wordSize); // A nearby aligned address
ld_constant(dst, Address(dummy, rspec));
@ -2746,7 +2744,6 @@ void MacroAssembler::build_frame(int framesize) {
sd(fp, Address(sp, framesize - 2 * wordSize));
sd(ra, Address(sp, framesize - wordSize));
if (PreserveFramePointer) { add(fp, sp, framesize); }
verify_cross_modify_fence_not_required();
}
void MacroAssembler::remove_frame(int framesize) {
@ -2797,7 +2794,6 @@ address MacroAssembler::read_polling_page(Register r, int32_t offset, relocInfo:
lwu(zr, Address(r, offset));
mark = inst_mark();
}
verify_cross_modify_fence_not_required();
return mark;
}
@ -3981,29 +3977,3 @@ void MacroAssembler::cmp_l2i(Register dst, Register src1, Register src2, Registe
neg(dst, dst);
bind(done);
}
void MacroAssembler::safepoint_ifence() {
ifence();
#ifndef PRODUCT
if (VerifyCrossModifyFence) {
// Clear the thread state.
sb(zr, Address(xthread, in_bytes(JavaThread::requires_cross_modify_fence_offset())));
}
#endif
}
#ifndef PRODUCT
void MacroAssembler::verify_cross_modify_fence_not_required() {
if (VerifyCrossModifyFence) {
// Check if thread needs a cross modify fence.
lbu(t0, Address(xthread, in_bytes(JavaThread::requires_cross_modify_fence_offset())));
Label fence_not_required;
beqz(t0, fence_not_required);
// If it does then fail.
la(t0, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::verify_cross_modify_fence_failure)));
mv(c_rarg0, xthread);
jalr(t0);
bind(fence_not_required);
}
}
#endif

View File

@ -46,9 +46,6 @@ class MacroAssembler: public Assembler {
void safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod);
// Place a fence.i after code may have been modified due to a safepoint.
void safepoint_ifence();
// Alignment
void align(int modulus, int extra_offset = 0);
@ -836,9 +833,6 @@ private:
void load_reserved(Register addr, enum operand_size size, Assembler::Aqrl acquire);
void store_conditional(Register addr, Register new_val, enum operand_size size, Assembler::Aqrl release);
// Check the current thread doesn't need a cross modify fence.
void verify_cross_modify_fence_not_required() PRODUCT_RETURN;
};
#ifdef ASSERT

View File

@ -42,7 +42,6 @@
// - - NativeIllegalInstruction
// - - NativeCallTrampolineStub
// - - NativeMembar
// - - NativeFenceI
// The base class for different kinds of native instruction abstractions.
// Provides the primitive operations to manipulate code relative to this.
@ -554,14 +553,6 @@ inline NativeMembar *NativeMembar_at(address addr) {
return (NativeMembar*)addr;
}
class NativeFenceI : public NativeInstruction {
public:
static inline int instruction_size() {
// 2 for fence.i + fence
return (UseConservativeFence ? 2 : 1) * NativeInstruction::instruction_size;
}
};
class NativePostCallNop: public NativeInstruction {
public:
bool check() const { return is_nop(); }

View File

@ -354,10 +354,6 @@ static void patch_callers_callsite(MacroAssembler *masm) {
__ la_patchable(t0, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)), offset);
__ jalr(x1, t0, offset);
// Explicit fence.i required because fixup_callers_callsite may change the code
// stream.
__ safepoint_ifence();
__ pop_CPU_state();
// restore sp
__ leave();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -25,13 +25,11 @@
#include "precompiled.hpp"
#include "runtime/java.hpp"
#include "runtime/os.hpp"
#include "runtime/os.inline.hpp"
#include "runtime/vm_version.hpp"
#include "utilities/formatBuffer.hpp"
#include "utilities/macros.hpp"
#include OS_HEADER_INLINE(os)
const char* VM_Version::_uarch = "";
uint32_t VM_Version::_initial_vector_length = 0;

View File

@ -896,6 +896,8 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
tail_size = 1;
break;
case 0x10: // movups
case 0x11: // movups
case 0x12: // movlps
case 0x28: // movaps
case 0x2E: // ucomiss
@ -2561,10 +2563,22 @@ void Assembler::movddup(XMMRegister dst, XMMRegister src) {
emit_int16(0x12, 0xC0 | encode);
}
void Assembler::movddup(XMMRegister dst, Address src) {
NOT_LP64(assert(VM_Version::supports_sse3(), ""));
InstructionMark im(this);
InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
attributes.set_address_attributes(/* tuple_type */ EVEX_DUP, /* input_size_in_bits */ EVEX_64bit);
attributes.set_rex_vex_w_reverted();
simd_prefix(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
emit_int8(0x12);
emit_operand(dst, src);
}
void Assembler::vmovddup(XMMRegister dst, Address src, int vector_len) {
assert(VM_Version::supports_avx(), "");
InstructionMark im(this);
InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
attributes.set_address_attributes(/* tuple_type */ EVEX_DUP, /* input_size_in_bits */ EVEX_64bit);
attributes.set_rex_vex_w_reverted();
simd_prefix(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
emit_int8(0x12);
@ -3505,6 +3519,46 @@ void Assembler::movswl(Register dst, Register src) { // movsxw
emit_int24(0x0F, (unsigned char)0xBF, (0xC0 | encode));
}
void Assembler::movups(XMMRegister dst, Address src) {
NOT_LP64(assert(VM_Version::supports_sse(), ""));
InstructionMark im(this);
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_32bit);
simd_prefix(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
emit_int8(0x10);
emit_operand(dst, src);
}
void Assembler::vmovups(XMMRegister dst, Address src, int vector_len) {
assert(vector_len == AVX_512bit ? VM_Version::supports_evex() : VM_Version::supports_avx(), "");
InstructionMark im(this);
InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_32bit);
simd_prefix(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
emit_int8(0x10);
emit_operand(dst, src);
}
void Assembler::movups(Address dst, XMMRegister src) {
NOT_LP64(assert(VM_Version::supports_sse(), ""));
InstructionMark im(this);
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_32bit);
simd_prefix(src, xnoreg, dst, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
emit_int8(0x11);
emit_operand(src, dst);
}
void Assembler::vmovups(Address dst, XMMRegister src, int vector_len) {
assert(vector_len == AVX_512bit ? VM_Version::supports_evex() : VM_Version::supports_avx(), "");
InstructionMark im(this);
InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_32bit);
simd_prefix(src, xnoreg, dst, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
emit_int8(0x11);
emit_operand(src, dst);
}
void Assembler::movw(Address dst, int imm16) {
InstructionMark im(this);
@ -5156,7 +5210,7 @@ void Assembler::evshufi64x2(XMMRegister dst, XMMRegister nds, XMMRegister src, i
emit_int24(0x43, (0xC0 | encode), imm8 & 0xFF);
}
void Assembler::pshufpd(XMMRegister dst, XMMRegister src, int imm8) {
void Assembler::shufpd(XMMRegister dst, XMMRegister src, int imm8) {
assert(isByte(imm8), "invalid value");
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
@ -5164,14 +5218,14 @@ void Assembler::pshufpd(XMMRegister dst, XMMRegister src, int imm8) {
emit_int24((unsigned char)0xC6, (0xC0 | encode), imm8 & 0xFF);
}
void Assembler::vpshufpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) {
void Assembler::vshufpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) {
InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
attributes.set_rex_vex_w_reverted();
int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
emit_int24((unsigned char)0xC6, (0xC0 | encode), imm8 & 0xFF);
}
void Assembler::pshufps(XMMRegister dst, XMMRegister src, int imm8) {
void Assembler::shufps(XMMRegister dst, XMMRegister src, int imm8) {
assert(isByte(imm8), "invalid value");
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
@ -5179,7 +5233,7 @@ void Assembler::pshufps(XMMRegister dst, XMMRegister src, int imm8) {
emit_int24((unsigned char)0xC6, (0xC0 | encode), imm8 & 0xFF);
}
void Assembler::vpshufps(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) {
void Assembler::vshufps(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) {
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
emit_int24((unsigned char)0xC6, (0xC0 | encode), imm8 & 0xFF);
@ -7993,10 +8047,6 @@ void Assembler::evprolq(XMMRegister dst, XMMRegister src, int shift, int vector_
emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
}
// Register is a class, but it would be assigned numerical value.
// "0" is assigned for xmm0. Thus we need to ignore -Wnonnull.
PRAGMA_DIAG_PUSH
PRAGMA_NONNULL_IGNORED
void Assembler::evprord(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
assert(VM_Version::supports_evex(), "requires EVEX support");
assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support");
@ -8014,7 +8064,6 @@ void Assembler::evprorq(XMMRegister dst, XMMRegister src, int shift, int vector_
int encode = vex_prefix_and_encode(xmm0->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
}
PRAGMA_DIAG_POP
void Assembler::evprolvd(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
assert(VM_Version::supports_evex(), "requires EVEX support");
@ -11566,10 +11615,6 @@ void Assembler::evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, Address
emit_int8((unsigned char)comparison);
}
// Register is a class, but it would be assigned numerical value.
// "0" is assigned for xmm0. Thus we need to ignore -Wnonnull.
PRAGMA_DIAG_PUSH
PRAGMA_NONNULL_IGNORED
void Assembler::evprord(XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vector_len) {
assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
@ -11593,7 +11638,6 @@ void Assembler::evprorq(XMMRegister dst, KRegister mask, XMMRegister src, int sh
int encode = vex_prefix_and_encode(xmm0->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
emit_int24(0x72, (0xC0 | encode), shift & 0xFF);
}
PRAGMA_DIAG_POP
void Assembler::evprorvd(XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) {
assert(vector_len == AVX_512bit || VM_Version::supports_avx512vl(), "");

View File

@ -1492,6 +1492,7 @@ private:
void movb(Register dst, Address src);
void movddup(XMMRegister dst, XMMRegister src);
void movddup(XMMRegister dst, Address src);
void vmovddup(XMMRegister dst, Address src, int vector_len);
void kandbl(KRegister dst, KRegister src1, KRegister src2);
@ -1663,6 +1664,11 @@ private:
void movswq(Register dst, Register src);
#endif
void movups(XMMRegister dst, Address src);
void vmovups(XMMRegister dst, Address src, int vector_len);
void movups(Address dst, XMMRegister src);
void vmovups(Address dst, XMMRegister src, int vector_len);
void movw(Address dst, int imm16);
void movw(Register dst, Address src);
void movw(Address dst, Register src);
@ -1942,10 +1948,10 @@ private:
void pshuflw(XMMRegister dst, Address src, int mode);
//shuffle floats and doubles
void pshufps(XMMRegister, XMMRegister, int);
void pshufpd(XMMRegister, XMMRegister, int);
void vpshufps(XMMRegister, XMMRegister, XMMRegister, int, int);
void vpshufpd(XMMRegister, XMMRegister, XMMRegister, int, int);
void shufps(XMMRegister, XMMRegister, int);
void shufpd(XMMRegister, XMMRegister, int);
void vshufps(XMMRegister, XMMRegister, XMMRegister, int, int);
void vshufpd(XMMRegister, XMMRegister, XMMRegister, int, int);
// Shuffle packed values at 128 bit granularity
void evshufi64x2(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len);

View File

@ -320,10 +320,6 @@ enum reg_save_layout {
// describe FPU registers. In all other cases it should be sufficient
// to simply save their current value.
//
// Register is a class, but it would be assigned numerical value.
// "0" is assigned for rax. Thus we need to ignore -Wnonnull.
PRAGMA_DIAG_PUSH
PRAGMA_NONNULL_IGNORED
static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args,
bool save_fpu_registers = true) {
@ -418,7 +414,6 @@ static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args,
return map;
}
PRAGMA_DIAG_POP
#define __ this->

View File

@ -1643,12 +1643,12 @@ void C2_MacroAssembler::load_vector_mask(KRegister dst, XMMRegister src, XMMRegi
void C2_MacroAssembler::load_vector(XMMRegister dst, Address src, int vlen_in_bytes) {
switch (vlen_in_bytes) {
case 4: movdl(dst, src); break;
case 8: movq(dst, src); break;
case 16: movdqu(dst, src); break;
case 32: vmovdqu(dst, src); break;
case 64: evmovdquq(dst, src, Assembler::AVX_512bit); break;
default: ShouldNotReachHere();
case 4: movdl(dst, src); break;
case 8: movq(dst, src); break;
case 16: movdqu(dst, src); break;
case 32: vmovdqu(dst, src); break;
case 64: evmovdqul(dst, src, Assembler::AVX_512bit); break;
default: ShouldNotReachHere();
}
}
@ -1661,6 +1661,38 @@ void C2_MacroAssembler::load_vector(XMMRegister dst, AddressLiteral src, int vle
}
}
void C2_MacroAssembler::load_constant_vector(BasicType bt, XMMRegister dst, InternalAddress src, int vlen) {
int vlen_enc = vector_length_encoding(vlen);
if (VM_Version::supports_avx()) {
if (bt == T_LONG) {
if (VM_Version::supports_avx2()) {
vpbroadcastq(dst, src, vlen_enc, noreg);
} else {
vmovddup(dst, src, vlen_enc, noreg);
}
} else if (bt == T_DOUBLE) {
if (vlen_enc != Assembler::AVX_128bit) {
vbroadcastsd(dst, src, vlen_enc, noreg);
} else {
vmovddup(dst, src, vlen_enc, noreg);
}
} else {
if (VM_Version::supports_avx2() && is_integral_type(bt)) {
vpbroadcastd(dst, src, vlen_enc, noreg);
} else {
vbroadcastss(dst, src, vlen_enc, noreg);
}
}
} else if (VM_Version::supports_sse3()) {
movddup(dst, src);
} else {
movq(dst, src);
if (vlen == 16) {
punpcklqdq(dst, dst);
}
}
}
void C2_MacroAssembler::load_iota_indices(XMMRegister dst, Register scratch, int vlen_in_bytes) {
ExternalAddress addr(StubRoutines::x86::vector_iota_indices());
if (vlen_in_bytes <= 4) {
@ -2317,9 +2349,9 @@ void C2_MacroAssembler::get_elem(BasicType typ, XMMRegister dst, XMMRegister src
if (typ == T_FLOAT) {
if (UseAVX == 0) {
movdqu(dst, src);
pshufps(dst, dst, eindex);
shufps(dst, dst, eindex);
} else {
vpshufps(dst, src, src, eindex, Assembler::AVX_128bit);
vshufps(dst, src, src, eindex, Assembler::AVX_128bit);
}
} else {
if (UseAVX == 0) {

View File

@ -159,6 +159,7 @@ public:
void load_vector(XMMRegister dst, Address src, int vlen_in_bytes);
void load_vector(XMMRegister dst, AddressLiteral src, int vlen_in_bytes, Register rscratch = rscratch1);
void load_constant_vector(BasicType bt, XMMRegister dst, InternalAddress src, int vlen);
void load_iota_indices(XMMRegister dst, Register scratch, int vlen_in_bytes);
// Reductions for vectors of bytes, shorts, ints, longs, floats, and doubles.

View File

@ -322,10 +322,6 @@ inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
// Compiled frames
// Register is a class, but it would be assigned numerical value.
// "0" is assigned for rax. Thus we need to ignore -Wnonnull.
PRAGMA_DIAG_PUSH
PRAGMA_NONNULL_IGNORED
inline oop frame::saved_oop_result(RegisterMap* map) const {
oop* result_adr = (oop *)map->location(rax->as_VMReg(), sp());
guarantee(result_adr != NULL, "bad register save location");
@ -338,7 +334,6 @@ inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) {
*result_adr = obj;
}
PRAGMA_DIAG_POP
inline bool frame::is_interpreted_frame() const {
return Interpreter::contains(pc());

View File

@ -466,10 +466,6 @@ private:
_spill_offset += 8;
}
// Register is a class, but it would be assigned numerical value.
// "0" is assigned for rax. Thus we need to ignore -Wnonnull.
PRAGMA_DIAG_PUSH
PRAGMA_NONNULL_IGNORED
void initialize(ZLoadBarrierStubC2* stub) {
// Create mask of caller saved registers that need to
// be saved/restored if live
@ -545,7 +541,6 @@ PRAGMA_NONNULL_IGNORED
// Stack pointer must be 16 bytes aligned for the call
_spill_offset = _spill_size = align_up(xmm_spill_size + gp_spill_size + opmask_spill_size + arg_spill_size, 16);
}
PRAGMA_DIAG_POP
public:
ZSaveLiveRegisters(MacroAssembler* masm, ZLoadBarrierStubC2* stub) :

View File

@ -1122,7 +1122,7 @@ void InterpreterMacroAssembler::remove_activation(
bind(loop);
// check if current entry is used
cmpptr(Address(rmon, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL);
cmpptr(Address(rmon, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL_WORD);
jcc(Assembler::notEqual, exception);
addptr(rmon, entry_size); // otherwise advance to next entry

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -348,10 +348,10 @@ class SlowSignatureHandler
intptr_t *from_addr = (intptr_t*)(_from + Interpreter::local_offset_in_bytes(0));
_from -= Interpreter::stackElementSize;
if (_num_args < Argument::n_int_register_parameters_c-1) {
*_reg_args++ = (*from_addr == 0) ? NULL : (intptr_t) from_addr;
*_reg_args++ = (*from_addr == 0) ? NULL_WORD : (intptr_t) from_addr;
_num_args++;
} else {
*_to++ = (*from_addr == 0) ? NULL : (intptr_t) from_addr;
*_to++ = (*from_addr == 0) ? NULL_WORD : (intptr_t) from_addr;
}
}
@ -443,10 +443,10 @@ class SlowSignatureHandler
_from -= Interpreter::stackElementSize;
if (_num_int_args < Argument::n_int_register_parameters_c-1) {
*_int_args++ = (*from_addr == 0) ? NULL : (intptr_t)from_addr;
*_int_args++ = (*from_addr == 0) ? NULL_WORD : (intptr_t)from_addr;
_num_int_args++;
} else {
*_to++ = (*from_addr == 0) ? NULL : (intptr_t) from_addr;
*_to++ = (*from_addr == 0) ? NULL_WORD : (intptr_t) from_addr;
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2004, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2004, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,6 +28,7 @@
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"
#include "prims/jvmtiExport.hpp"
#include "runtime/os.inline.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/stubRoutines.hpp"

View File

@ -2732,6 +2732,15 @@ void MacroAssembler::movss(XMMRegister dst, AddressLiteral src) {
}
}
void MacroAssembler::movddup(XMMRegister dst, AddressLiteral src, Register rscratch) {
if (reachable(src)) {
Assembler::movddup(dst, as_Address(src));
} else {
lea(rscratch, src);
Assembler::movddup(dst, Address(rscratch, 0));
}
}
void MacroAssembler::vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
if (reachable(src)) {
Assembler::vmovddup(dst, as_Address(src), vector_len);
@ -3288,9 +3297,13 @@ void MacroAssembler::vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src,
}
}
void MacroAssembler::vpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len) {
assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
Assembler::vpbroadcastw(dst, src, vector_len);
void MacroAssembler::vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
if (reachable(src)) {
Assembler::vpbroadcastd(dst, as_Address(src), vector_len);
} else {
lea(rscratch, src);
Assembler::vpbroadcastd(dst, Address(rscratch, 0), vector_len);
}
}
void MacroAssembler::vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
@ -3311,6 +3324,15 @@ void MacroAssembler::vbroadcastsd(XMMRegister dst, AddressLiteral src, int vecto
}
}
void MacroAssembler::vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) {
if (reachable(src)) {
Assembler::vbroadcastss(dst, as_Address(src), vector_len);
} else {
lea(rscratch, src);
Assembler::vbroadcastss(dst, Address(rscratch, 0), vector_len);
}
}
void MacroAssembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15");
Assembler::vpcmpeqb(dst, nds, src, vector_len);
@ -4354,10 +4376,14 @@ void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file,
void MacroAssembler::vallones(XMMRegister dst, int vector_len) {
if (UseAVX > 2 && (vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl())) {
// Only pcmpeq has dependency breaking treatment (i.e the execution can begin without
// waiting for the previous result on dst), not vpcmpeqd, so just use vpternlog
vpternlogd(dst, 0xFF, dst, dst, vector_len);
} else if (VM_Version::supports_avx()) {
vpcmpeqd(dst, dst, dst, vector_len);
} else {
assert(UseAVX > 0, "");
vpcmpeqb(dst, dst, dst, vector_len);
assert(VM_Version::supports_sse2(), "");
pcmpeqd(dst, dst);
}
}

View File

@ -1114,6 +1114,12 @@ public:
void addpd(XMMRegister dst, Address src) { Assembler::addpd(dst, src); }
void addpd(XMMRegister dst, AddressLiteral src);
using Assembler::vbroadcastsd;
void vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = rscratch1);
using Assembler::vbroadcastss;
void vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = rscratch1);
void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); }
void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); }
void divsd(XMMRegister dst, AddressLiteral src);
@ -1150,6 +1156,11 @@ public:
void kmov(Register dst, KRegister src);
void kmov(KRegister dst, Register src);
using Assembler::movddup;
void movddup(XMMRegister dst, AddressLiteral src, Register rscratch = rscratch1);
using Assembler::vmovddup;
void vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = rscratch1);
// AVX Unaligned forms
void vmovdqu(Address dst, XMMRegister src);
void vmovdqu(XMMRegister dst, Address src);
@ -1157,7 +1168,6 @@ public:
void vmovdqu(XMMRegister dst, AddressLiteral src, Register scratch_reg = rscratch1);
void vmovdqu(XMMRegister dst, AddressLiteral src, Register scratch_reg, int vector_len);
// AVX512 Unaligned
void evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, bool merge, int vector_len);
void evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, bool merge, int vector_len);
@ -1229,9 +1239,6 @@ public:
void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); }
void movsd(XMMRegister dst, AddressLiteral src);
using Assembler::vmovddup;
void vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = rscratch1);
void mulpd(XMMRegister dst, XMMRegister src) { Assembler::mulpd(dst, src); }
void mulpd(XMMRegister dst, Address src) { Assembler::mulpd(dst, src); }
void mulpd(XMMRegister dst, AddressLiteral src);
@ -1337,16 +1344,11 @@ public:
void vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { Assembler::vpand(dst, nds, src, vector_len); }
void vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register scratch_reg = rscratch1);
void vpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len);
void vpbroadcastw(XMMRegister dst, Address src, int vector_len) { Assembler::vpbroadcastw(dst, src, vector_len); }
using Assembler::vpbroadcastd;
void vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = rscratch1);
using Assembler::vbroadcastsd;
void vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = rscratch1);
using Assembler::vpbroadcastq;
void vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch = rscratch1);
void vpbroadcastq(XMMRegister dst, XMMRegister src, int vector_len) { Assembler::vpbroadcastq(dst, src, vector_len); }
void vpbroadcastq(XMMRegister dst, Address src, int vector_len) { Assembler::vpbroadcastq(dst, src, vector_len); }
void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,12 +26,10 @@
#include "rdtsc_x86.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/javaThread.hpp"
#include "runtime/os.inline.hpp"
#include "runtime/orderAccess.hpp"
#include "vm_version_x86.hpp"
// The following header contains the implementations of rdtsc()
#include OS_CPU_HEADER_INLINE(os)
static jlong _epoch = 0;
static bool rdtsc_elapsed_counter_enabled = false;
static jlong tsc_frequency = 0;

View File

@ -171,10 +171,6 @@ class RegisterSaver {
static void restore_result_registers(MacroAssembler* masm);
};
// Register is a class, but it would be assigned numerical value.
// "0" is assigned for rax. Thus we need to ignore -Wnonnull.
PRAGMA_DIAG_PUSH
PRAGMA_NONNULL_IGNORED
OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_wide_vectors) {
int off = 0;
int num_xmm_regs = XMMRegisterImpl::available_xmm_registers();
@ -365,7 +361,6 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
return map;
}
PRAGMA_DIAG_POP
void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_wide_vectors) {
int num_xmm_regs = XMMRegisterImpl::available_xmm_registers();

View File

@ -529,7 +529,7 @@ class StubGenerator: public StubCodeGenerator {
// make sure this code is only executed if there is a pending exception
{
Label L;
__ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL);
__ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
__ jcc(Assembler::notEqual, L);
__ stop("StubRoutines::forward exception: no pending exception (1)");
__ bind(L);

View File

@ -164,10 +164,7 @@ static void restore_callee_saved_registers(MacroAssembler* _masm, const ABIDescr
__ block_comment("} restore_callee_saved_regs ");
}
// Register is a class, but it would be assigned numerical value.
// "0" is assigned for rax and for xmm0. Thus we need to ignore -Wnonnull.
PRAGMA_DIAG_PUSH
PRAGMA_NONNULL_IGNORED
address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
BasicType* in_sig_bt, int total_in_args,
BasicType* out_sig_bt, int total_out_args,
@ -398,4 +395,3 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
return blob->code_begin();
}
PRAGMA_DIAG_POP

View File

@ -33,14 +33,12 @@
#include "memory/universe.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/java.hpp"
#include "runtime/os.hpp"
#include "runtime/os.inline.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "runtime/vm_version.hpp"
#include "utilities/powerOfTwo.hpp"
#include "utilities/virtualizationSupport.hpp"
#include OS_HEADER_INLINE(os)
int VM_Version::_cpu;
int VM_Version::_model;
int VM_Version::_stepping;

View File

@ -4107,37 +4107,43 @@ instruct scatter_masked(memory mem, vec src, vec idx, kReg mask, kReg ktmp, rReg
// ====================REPLICATE=======================================
// Replicate byte scalar to be vector
instruct ReplB_reg(vec dst, rRegI src) %{
instruct vReplB_reg(vec dst, rRegI src) %{
predicate(UseAVX >= 2);
match(Set dst (ReplicateB src));
format %{ "replicateB $dst,$src" %}
ins_encode %{
uint vlen = Matcher::vector_length(this);
int vlen_enc = vector_length_encoding(this);
if (vlen == 64 || VM_Version::supports_avx512vlbw()) { // AVX512VL for <512bit operands
assert(VM_Version::supports_avx512bw(), "required"); // 512-bit byte vectors assume AVX512BW
int vlen_enc = vector_length_encoding(this);
__ evpbroadcastb($dst$$XMMRegister, $src$$Register, vlen_enc);
} else if (VM_Version::supports_avx2()) {
int vlen_enc = vector_length_encoding(this);
__ movdl($dst$$XMMRegister, $src$$Register);
__ vpbroadcastb($dst$$XMMRegister, $dst$$XMMRegister, vlen_enc);
} else {
__ movdl($dst$$XMMRegister, $src$$Register);
__ punpcklbw($dst$$XMMRegister, $dst$$XMMRegister);
__ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
if (vlen >= 16) {
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
if (vlen >= 32) {
assert(vlen == 32, "sanity");
__ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister);
}
}
__ vpbroadcastb($dst$$XMMRegister, $dst$$XMMRegister, vlen_enc);
}
%}
ins_pipe( pipe_slow );
%}
instruct ReplB_reg(vec dst, rRegI src) %{
predicate(UseAVX < 2);
match(Set dst (ReplicateB src));
format %{ "replicateB $dst,$src" %}
ins_encode %{
uint vlen = Matcher::vector_length(this);
__ movdl($dst$$XMMRegister, $src$$Register);
__ punpcklbw($dst$$XMMRegister, $dst$$XMMRegister);
__ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
if (vlen >= 16) {
assert(vlen == 16, "");
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
}
%}
ins_pipe( pipe_slow );
%}
instruct ReplB_mem(vec dst, memory mem) %{
predicate(VM_Version::supports_avx2());
predicate(UseAVX >= 2);
match(Set dst (ReplicateB (LoadB mem)));
format %{ "replicateB $dst,$mem" %}
ins_encode %{
@ -4147,48 +4153,45 @@ instruct ReplB_mem(vec dst, memory mem) %{
ins_pipe( pipe_slow );
%}
instruct ReplB_imm(vec dst, immI con) %{
match(Set dst (ReplicateB con));
format %{ "replicateB $dst,$con" %}
ins_encode %{
InternalAddress addr = $constantaddress(T_BYTE, vreplicate_imm(T_BYTE, $con$$constant, Matcher::vector_length(this)));
__ load_vector($dst$$XMMRegister, addr, Matcher::vector_length_in_bytes(this));
%}
ins_pipe( pipe_slow );
%}
// ====================ReplicateS=======================================
instruct ReplS_reg(vec dst, rRegI src) %{
instruct vReplS_reg(vec dst, rRegI src) %{
predicate(UseAVX >= 2);
match(Set dst (ReplicateS src));
format %{ "replicateS $dst,$src" %}
ins_encode %{
uint vlen = Matcher::vector_length(this);
int vlen_enc = vector_length_encoding(this);
if (vlen == 32 || VM_Version::supports_avx512vlbw()) { // AVX512VL for <512bit operands
assert(VM_Version::supports_avx512bw(), "required"); // 512-bit short vectors assume AVX512BW
int vlen_enc = vector_length_encoding(this);
__ evpbroadcastw($dst$$XMMRegister, $src$$Register, vlen_enc);
} else if (VM_Version::supports_avx2()) {
int vlen_enc = vector_length_encoding(this);
__ movdl($dst$$XMMRegister, $src$$Register);
__ vpbroadcastw($dst$$XMMRegister, $dst$$XMMRegister, vlen_enc);
} else {
__ movdl($dst$$XMMRegister, $src$$Register);
__ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
if (vlen >= 8) {
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
if (vlen >= 16) {
assert(vlen == 16, "sanity");
__ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister);
}
}
__ vpbroadcastw($dst$$XMMRegister, $dst$$XMMRegister, vlen_enc);
}
%}
ins_pipe( pipe_slow );
%}
instruct ReplS_reg(vec dst, rRegI src) %{
predicate(UseAVX < 2);
match(Set dst (ReplicateS src));
format %{ "replicateS $dst,$src" %}
ins_encode %{
uint vlen = Matcher::vector_length(this);
int vlen_enc = vector_length_encoding(this);
__ movdl($dst$$XMMRegister, $src$$Register);
__ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
if (vlen >= 8) {
assert(vlen == 8, "");
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
}
%}
ins_pipe( pipe_slow );
%}
instruct ReplS_mem(vec dst, memory mem) %{
predicate(VM_Version::supports_avx2());
predicate(UseAVX >= 2);
match(Set dst (ReplicateS (LoadS mem)));
format %{ "replicateS $dst,$mem" %}
ins_encode %{
@ -4198,16 +4201,6 @@ instruct ReplS_mem(vec dst, memory mem) %{
ins_pipe( pipe_slow );
%}
instruct ReplS_imm(vec dst, immI con) %{
match(Set dst (ReplicateS con));
format %{ "replicateS $dst,$con" %}
ins_encode %{
InternalAddress addr = $constantaddress(T_SHORT, vreplicate_imm(T_SHORT, $con$$constant, Matcher::vector_length(this)));
__ load_vector($dst$$XMMRegister, addr, Matcher::vector_length_in_bytes(this));
%}
ins_pipe( pipe_slow );
%}
// ====================ReplicateI=======================================
instruct ReplI_reg(vec dst, rRegI src) %{
@ -4215,20 +4208,15 @@ instruct ReplI_reg(vec dst, rRegI src) %{
format %{ "replicateI $dst,$src" %}
ins_encode %{
uint vlen = Matcher::vector_length(this);
int vlen_enc = vector_length_encoding(this);
if (vlen == 16 || VM_Version::supports_avx512vl()) { // AVX512VL for <512bit operands
int vlen_enc = vector_length_encoding(this);
__ evpbroadcastd($dst$$XMMRegister, $src$$Register, vlen_enc);
} else if (VM_Version::supports_avx2()) {
int vlen_enc = vector_length_encoding(this);
__ movdl($dst$$XMMRegister, $src$$Register);
__ vpbroadcastd($dst$$XMMRegister, $dst$$XMMRegister, vlen_enc);
} else {
__ movdl($dst$$XMMRegister, $src$$Register);
__ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
if (vlen >= 8) {
assert(vlen == 8, "sanity");
__ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister);
}
}
%}
ins_pipe( pipe_slow );
@ -4238,25 +4226,32 @@ instruct ReplI_mem(vec dst, memory mem) %{
match(Set dst (ReplicateI (LoadI mem)));
format %{ "replicateI $dst,$mem" %}
ins_encode %{
uint vlen = Matcher::vector_length(this);
if (vlen <= 4) {
int vlen_enc = vector_length_encoding(this);
if (VM_Version::supports_avx2()) {
__ vpbroadcastd($dst$$XMMRegister, $mem$$Address, vlen_enc);
} else if (VM_Version::supports_avx()) {
__ vbroadcastss($dst$$XMMRegister, $mem$$Address, vlen_enc);
} else {
__ movdl($dst$$XMMRegister, $mem$$Address);
__ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
} else {
assert(VM_Version::supports_avx2(), "sanity");
int vlen_enc = vector_length_encoding(this);
__ vpbroadcastd($dst$$XMMRegister, $mem$$Address, vlen_enc);
}
%}
ins_pipe( pipe_slow );
%}
instruct ReplI_imm(vec dst, immI con) %{
match(Set dst (ReplicateB con));
match(Set dst (ReplicateS con));
match(Set dst (ReplicateI con));
format %{ "replicateI $dst,$con" %}
ins_encode %{
InternalAddress addr = $constantaddress(T_INT, vreplicate_imm(T_INT, $con$$constant, Matcher::vector_length(this)));
__ load_vector($dst$$XMMRegister, addr, Matcher::vector_length_in_bytes(this));
InternalAddress addr = $constantaddress(Matcher::vector_element_basic_type(this),
vreplicate_imm(Matcher::vector_element_basic_type(this), $con$$constant,
(VM_Version::supports_sse3() ? (VM_Version::supports_avx() ? 4 : 8) : 8) /
type2aelembytes(Matcher::vector_element_basic_type(this))));
BasicType bt = Matcher::vector_element_basic_type(this);
int vlen = Matcher::vector_length_in_bytes(this);
__ load_constant_vector(bt, $dst$$XMMRegister, addr, vlen);
%}
ins_pipe( pipe_slow );
%}
@ -4268,23 +4263,21 @@ instruct ReplI_zero(vec dst, immI_0 zero) %{
match(Set dst (ReplicateI zero));
format %{ "replicateI $dst,$zero" %}
ins_encode %{
uint vsize = Matcher::vector_length_in_bytes(this);
if (vsize <= 16) {
__ pxor($dst$$XMMRegister, $dst$$XMMRegister);
} else {
int vlen_enc = vector_length_encoding(this);
int vlen_enc = vector_length_encoding(this);
if (VM_Version::supports_evex() && !VM_Version::supports_avx512vl()) {
__ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vlen_enc);
} else {
__ pxor($dst$$XMMRegister, $dst$$XMMRegister);
}
%}
ins_pipe( fpu_reg_reg );
%}
instruct ReplI_M1(vec dst, immI_M1 con) %{
predicate(UseAVX > 0 && Matcher::vector_length_in_bytes(n) >= 16);
predicate(UseSSE >= 2);
match(Set dst (ReplicateB con));
match(Set dst (ReplicateS con));
match(Set dst (ReplicateI con));
effect(TEMP dst);
format %{ "vallones $dst" %}
ins_encode %{
int vector_len = vector_length_encoding(this);
@ -4301,23 +4294,16 @@ instruct ReplL_reg(vec dst, rRegL src) %{
match(Set dst (ReplicateL src));
format %{ "replicateL $dst,$src" %}
ins_encode %{
uint vlen = Matcher::vector_length(this);
if (vlen == 2) {
__ movdq($dst$$XMMRegister, $src$$Register);
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
} else if (vlen == 8 || VM_Version::supports_avx512vl()) { // AVX512VL for <512bit operands
int vlen_enc = vector_length_encoding(this);
int vlen = Matcher::vector_length(this);
int vlen_enc = vector_length_encoding(this);
if (vlen == 8 || VM_Version::supports_avx512vl()) { // AVX512VL for <512bit operands
__ evpbroadcastq($dst$$XMMRegister, $src$$Register, vlen_enc);
} else if (VM_Version::supports_avx2()) {
assert(vlen == 4, "sanity");
int vlen_enc = vector_length_encoding(this);
__ movdq($dst$$XMMRegister, $src$$Register);
__ vpbroadcastq($dst$$XMMRegister, $dst$$XMMRegister, vlen_enc);
} else {
assert(vlen == 4, "sanity");
__ movdq($dst$$XMMRegister, $src$$Register);
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
__ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister);
}
%}
ins_pipe( pipe_slow );
@ -4382,14 +4368,14 @@ instruct ReplL_mem(vec dst, memory mem) %{
match(Set dst (ReplicateL (LoadL mem)));
format %{ "replicateL $dst,$mem" %}
ins_encode %{
uint vlen = Matcher::vector_length(this);
if (vlen == 2) {
int vlen_enc = vector_length_encoding(this);
if (VM_Version::supports_avx2()) {
__ vpbroadcastq($dst$$XMMRegister, $mem$$Address, vlen_enc);
} else if (VM_Version::supports_sse3()) {
__ movddup($dst$$XMMRegister, $mem$$Address);
} else {
__ movq($dst$$XMMRegister, $mem$$Address);
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
} else {
assert(VM_Version::supports_avx2(), "sanity");
int vlen_enc = vector_length_encoding(this);
__ vpbroadcastq($dst$$XMMRegister, $mem$$Address, vlen_enc);
}
%}
ins_pipe( pipe_slow );
@ -4400,8 +4386,9 @@ instruct ReplL_imm(vec dst, immL con) %{
match(Set dst (ReplicateL con));
format %{ "replicateL $dst,$con" %}
ins_encode %{
InternalAddress addr = $constantaddress(T_LONG, vreplicate_imm(T_LONG, $con$$constant, Matcher::vector_length(this)));
__ load_vector($dst$$XMMRegister, addr, Matcher::vector_length_in_bytes(this));
InternalAddress addr = $constantaddress(T_LONG, vreplicate_imm(T_LONG, $con$$constant, 1));
int vlen = Matcher::vector_length_in_bytes(this);
__ load_constant_vector(T_LONG, $dst$$XMMRegister, addr, vlen);
%}
ins_pipe( pipe_slow );
%}
@ -4410,21 +4397,19 @@ instruct ReplL_zero(vec dst, immL0 zero) %{
match(Set dst (ReplicateL zero));
format %{ "replicateL $dst,$zero" %}
ins_encode %{
int vlen = Matcher::vector_length(this);
if (vlen == 2) {
__ pxor($dst$$XMMRegister, $dst$$XMMRegister);
} else {
int vlen_enc = vector_length_encoding(this);
int vlen_enc = vector_length_encoding(this);
if (VM_Version::supports_evex() && !VM_Version::supports_avx512vl()) {
__ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vlen_enc);
} else {
__ pxor($dst$$XMMRegister, $dst$$XMMRegister);
}
%}
ins_pipe( fpu_reg_reg );
%}
instruct ReplL_M1(vec dst, immL_M1 con) %{
predicate(UseAVX > 0);
predicate(UseSSE >= 2);
match(Set dst (ReplicateL con));
effect(TEMP dst);
format %{ "vallones $dst" %}
ins_encode %{
int vector_len = vector_length_encoding(this);
@ -4435,38 +4420,43 @@ instruct ReplL_M1(vec dst, immL_M1 con) %{
// ====================ReplicateF=======================================
instruct ReplF_reg(vec dst, vlRegF src) %{
instruct vReplF_reg(vec dst, vlRegF src) %{
predicate(UseAVX > 0);
match(Set dst (ReplicateF src));
format %{ "replicateF $dst,$src" %}
ins_encode %{
uint vlen = Matcher::vector_length(this);
int vlen_enc = vector_length_encoding(this);
if (vlen <= 4) {
__ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x00);
} else if (VM_Version::supports_avx2()) {
int vlen_enc = vector_length_encoding(this);
__ vpermilps($dst$$XMMRegister, $src$$XMMRegister, 0x00, Assembler::AVX_128bit);
} else if (VM_Version::supports_avx2()) {
__ vbroadcastss($dst$$XMMRegister, $src$$XMMRegister, vlen_enc); // reg-to-reg variant requires AVX2
} else {
assert(vlen == 8, "sanity");
__ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x00);
__ vpermilps($dst$$XMMRegister, $src$$XMMRegister, 0x00, Assembler::AVX_128bit);
__ vinsertf128_high($dst$$XMMRegister, $dst$$XMMRegister);
}
%}
ins_pipe( pipe_slow );
%}
instruct ReplF_reg(vec dst, vlRegF src) %{
predicate(UseAVX == 0);
match(Set dst (ReplicateF src));
format %{ "replicateF $dst,$src" %}
ins_encode %{
__ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x00);
%}
ins_pipe( pipe_slow );
%}
instruct ReplF_mem(vec dst, memory mem) %{
predicate(UseAVX > 0);
match(Set dst (ReplicateF (LoadF mem)));
format %{ "replicateF $dst,$mem" %}
ins_encode %{
uint vlen = Matcher::vector_length(this);
if (vlen <= 4) {
__ movdl($dst$$XMMRegister, $mem$$Address);
__ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
} else {
assert(VM_Version::supports_avx(), "sanity");
int vlen_enc = vector_length_encoding(this);
__ vbroadcastss($dst$$XMMRegister, $mem$$Address, vlen_enc);
}
int vlen_enc = vector_length_encoding(this);
__ vbroadcastss($dst$$XMMRegister, $mem$$Address, vlen_enc);
%}
ins_pipe( pipe_slow );
%}
@ -4476,8 +4466,10 @@ instruct ReplF_imm(vec dst, immF con) %{
match(Set dst (ReplicateF con));
format %{ "replicateF $dst,$con" %}
ins_encode %{
InternalAddress addr = $constantaddress(T_FLOAT, vreplicate_imm(T_FLOAT, $con$$constant, Matcher::vector_length(this)));
__ load_vector($dst$$XMMRegister, addr, Matcher::vector_length_in_bytes(this));
InternalAddress addr = $constantaddress(T_FLOAT, vreplicate_imm(T_FLOAT, $con$$constant,
VM_Version::supports_sse3() ? (VM_Version::supports_avx() ? 1 : 2) : 2));
int vlen = Matcher::vector_length_in_bytes(this);
__ load_constant_vector(T_FLOAT, $dst$$XMMRegister, addr, vlen);
%}
ins_pipe( pipe_slow );
%}
@ -4486,12 +4478,11 @@ instruct ReplF_zero(vec dst, immF0 zero) %{
match(Set dst (ReplicateF zero));
format %{ "replicateF $dst,$zero" %}
ins_encode %{
uint vlen = Matcher::vector_length(this);
if (vlen <= 4) {
__ xorps($dst$$XMMRegister, $dst$$XMMRegister);
int vlen_enc = vector_length_encoding(this);
if (VM_Version::supports_evex() && !VM_Version::supports_avx512vldq()) {
__ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vlen_enc);
} else {
int vlen_enc = vector_length_encoding(this);
__ vpxor($dst$$XMMRegister,$dst$$XMMRegister, $dst$$XMMRegister, vlen_enc); // 512bit vxorps requires AVX512DQ
__ xorps($dst$$XMMRegister, $dst$$XMMRegister);
}
%}
ins_pipe( fpu_reg_reg );
@ -4500,37 +4491,46 @@ instruct ReplF_zero(vec dst, immF0 zero) %{
// ====================ReplicateD=======================================
// Replicate double (8 bytes) scalar to be vector
instruct ReplD_reg(vec dst, vlRegD src) %{
instruct vReplD_reg(vec dst, vlRegD src) %{
predicate(UseSSE >= 3);
match(Set dst (ReplicateD src));
format %{ "replicateD $dst,$src" %}
ins_encode %{
uint vlen = Matcher::vector_length(this);
if (vlen == 2) {
__ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x44);
int vlen_enc = vector_length_encoding(this);
if (vlen <= 2) {
__ movddup($dst$$XMMRegister, $src$$XMMRegister);
} else if (VM_Version::supports_avx2()) {
int vlen_enc = vector_length_encoding(this);
__ vbroadcastsd($dst$$XMMRegister, $src$$XMMRegister, vlen_enc); // reg-to-reg variant requires AVX2
} else {
assert(vlen == 4, "sanity");
__ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x44);
__ movddup($dst$$XMMRegister, $src$$XMMRegister);
__ vinsertf128_high($dst$$XMMRegister, $dst$$XMMRegister);
}
%}
ins_pipe( pipe_slow );
%}
instruct ReplD_reg(vec dst, vlRegD src) %{
predicate(UseSSE < 3);
match(Set dst (ReplicateD src));
format %{ "replicateD $dst,$src" %}
ins_encode %{
__ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x44);
%}
ins_pipe( pipe_slow );
%}
instruct ReplD_mem(vec dst, memory mem) %{
predicate(UseSSE >= 3);
match(Set dst (ReplicateD (LoadD mem)));
format %{ "replicateD $dst,$mem" %}
ins_encode %{
uint vlen = Matcher::vector_length(this);
if (vlen == 2) {
__ movq($dst$$XMMRegister, $mem$$Address);
__ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x44);
} else {
assert(VM_Version::supports_avx(), "sanity");
if (Matcher::vector_length(this) >= 4) {
int vlen_enc = vector_length_encoding(this);
__ vbroadcastsd($dst$$XMMRegister, $mem$$Address, vlen_enc);
} else {
__ movddup($dst$$XMMRegister, $mem$$Address);
}
%}
ins_pipe( pipe_slow );
@ -4541,8 +4541,9 @@ instruct ReplD_imm(vec dst, immD con) %{
match(Set dst (ReplicateD con));
format %{ "replicateD $dst,$con" %}
ins_encode %{
InternalAddress addr = $constantaddress(T_DOUBLE, vreplicate_imm(T_DOUBLE, $con$$constant, Matcher::vector_length(this)));
__ load_vector($dst$$XMMRegister, addr, Matcher::vector_length_in_bytes(this));
InternalAddress addr = $constantaddress(T_DOUBLE, vreplicate_imm(T_DOUBLE, $con$$constant, 1));
int vlen = Matcher::vector_length_in_bytes(this);
__ load_constant_vector(T_DOUBLE, $dst$$XMMRegister, addr, vlen);
%}
ins_pipe( pipe_slow );
%}
@ -4551,12 +4552,11 @@ instruct ReplD_zero(vec dst, immD0 zero) %{
match(Set dst (ReplicateD zero));
format %{ "replicateD $dst,$zero" %}
ins_encode %{
uint vlen = Matcher::vector_length(this);
if (vlen == 2) {
__ xorpd($dst$$XMMRegister, $dst$$XMMRegister);
int vlen_enc = vector_length_encoding(this);
if (VM_Version::supports_evex() && !VM_Version::supports_avx512vldq()) {
__ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vlen_enc);
} else {
int vlen_enc = vector_length_encoding(this);
__ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vlen_enc); // 512bit vxorps requires AVX512DQ
__ xorps($dst$$XMMRegister, $dst$$XMMRegister);
}
%}
ins_pipe( fpu_reg_reg );
@ -8335,7 +8335,7 @@ instruct storeMask8B_avx(vec dst, vec src, immI_8 size, vec vtmp) %{
effect(TEMP_DEF dst, TEMP vtmp);
ins_encode %{
int vlen_enc = Assembler::AVX_128bit;
__ vpshufps($dst$$XMMRegister, $src$$XMMRegister, $src$$XMMRegister, 0x88, Assembler::AVX_256bit);
__ vshufps($dst$$XMMRegister, $src$$XMMRegister, $src$$XMMRegister, 0x88, Assembler::AVX_256bit);
__ vextracti128($vtmp$$XMMRegister, $dst$$XMMRegister, 0x1);
__ vblendps($dst$$XMMRegister, $dst$$XMMRegister, $vtmp$$XMMRegister, 0xC, vlen_enc);
__ vpxor($vtmp$$XMMRegister, $vtmp$$XMMRegister, $vtmp$$XMMRegister, vlen_enc);

View File

@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "logging/log.hpp"
#include "os_posix.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/os.inline.hpp"
#include "services/attachListener.hpp"

View File

@ -45,6 +45,7 @@
#include "misc_aix.hpp"
#include "oops/oop.inline.hpp"
#include "os_aix.inline.hpp"
#include "os_posix.hpp"
#include "porting_aix.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -26,12 +26,11 @@
#ifndef OS_AIX_OS_AIX_HPP
#define OS_AIX_OS_AIX_HPP
// Information about the protection of the page at address '0' on this os.
static bool zero_page_read_protected() { return false; }
#include "runtime/os.hpp"
// Class Aix defines the interface to the Aix operating systems.
class Aix {
class os::Aix {
friend class os;
private:
@ -177,6 +176,9 @@ class Aix {
// (on AIX, using libperfstat, on PASE with libo4.so).
// Returns true if ok, false if error.
static bool get_meminfo(meminfo_t* pmi);
static bool platform_print_native_stack(outputStream* st, void* context, char *buf, int buf_size);
static void* resolve_function_descriptor(void* p);
};
#endif // OS_AIX_OS_AIX_HPP

View File

@ -26,11 +26,16 @@
#ifndef OS_AIX_OS_AIX_INLINE_HPP
#define OS_AIX_OS_AIX_INLINE_HPP
// os_aix.hpp included by os.hpp
#include "os_aix.hpp"
#include "runtime/os.hpp"
#include "os_posix.inline.hpp"
// Information about the protection of the page at address '0' on this os.
inline bool os::zero_page_read_protected() {
return false;
}
inline bool os::uses_stack_guard_pages() {
return true;
}

View File

@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "logging/log.hpp"
#include "os_posix.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/os.inline.hpp"
#include "services/attachListener.hpp"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,12 +25,11 @@
#ifndef OS_BSD_OS_BSD_HPP
#define OS_BSD_OS_BSD_HPP
#include "runtime/os.hpp"
// Bsd_OS defines the interface to Bsd operating systems
// Information about the protection of the page at address '0' on this os.
static bool zero_page_read_protected() { return true; }
class Bsd {
class os::Bsd {
friend class os;
#ifdef __APPLE__

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,11 +25,15 @@
#ifndef OS_BSD_OS_BSD_INLINE_HPP
#define OS_BSD_OS_BSD_INLINE_HPP
// os_bsd.hpp included by os.hpp
#include "os_bsd.hpp"
#include "runtime/os.hpp"
#include "os_posix.inline.hpp"
inline bool os::zero_page_read_protected() {
return true;
}
inline bool os::uses_stack_guard_pages() {
return true;
}

View File

@ -27,6 +27,7 @@
#include "memory/allocation.inline.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/os.inline.hpp"
#include "os_posix.hpp"
#include "services/attachListener.hpp"
#include <unistd.h>

View File

@ -30,6 +30,7 @@
#include "cgroupV2Subsystem_linux.hpp"
#include "logging/log.hpp"
#include "memory/allocation.hpp"
#include "os_linux.hpp"
#include "runtime/globals.hpp"
#include "runtime/os.hpp"
#include "utilities/globalDefinitions.hpp"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,6 +25,7 @@
#include "gc/z/zErrno.hpp"
#include "gc/z/zNUMA.hpp"
#include "gc/z/zSyscall_linux.hpp"
#include "os_linux.hpp"
#include "runtime/globals.hpp"
#include "runtime/os.hpp"
#include "utilities/debug.hpp"

View File

@ -32,6 +32,7 @@
#include "gc/z/zPhysicalMemoryBacking_linux.hpp"
#include "gc/z/zSyscall_linux.hpp"
#include "logging/log.hpp"
#include "os_linux.hpp"
#include "runtime/init.hpp"
#include "runtime/os.hpp"
#include "runtime/safefetch.hpp"

View File

@ -28,6 +28,7 @@
#include "runtime/globals.hpp"
#include "runtime/os.hpp"
#include "logging/log.hpp"
#include "os_linux.hpp"
#include "osContainer_linux.hpp"
#include "cgroupSubsystem_linux.hpp"

View File

@ -4455,6 +4455,89 @@ void os::Linux::numa_init() {
}
}
#if defined(IA32) && !defined(ZERO)
/*
* Work-around (execute code at a high address) for broken NX emulation using CS limit,
* Red Hat patch "Exec-Shield" (IA32 only).
*
* Map and execute at a high VA to prevent CS lazy updates race with SMP MM
* invalidation.Further code generation by the JVM will no longer cause CS limit
* updates.
*
* Affects IA32: RHEL 5 & 6, Ubuntu 10.04 (LTS), 10.10, 11.04, 11.10, 12.04.
* @see JDK-8023956
*/
static void workaround_expand_exec_shield_cs_limit() {
assert(os::Linux::initial_thread_stack_bottom() != NULL, "sanity");
size_t page_size = os::vm_page_size();
/*
* JDK-8197429
*
* Expand the stack mapping to the end of the initial stack before
* attempting to install the codebuf. This is needed because newer
* Linux kernels impose a distance of a megabyte between stack
* memory and other memory regions. If we try to install the
* codebuf before expanding the stack the installation will appear
* to succeed but we'll get a segfault later if we expand the stack
* in Java code.
*
*/
if (os::is_primordial_thread()) {
address limit = os::Linux::initial_thread_stack_bottom();
if (! DisablePrimordialThreadGuardPages) {
limit += StackOverflow::stack_red_zone_size() +
StackOverflow::stack_yellow_zone_size();
}
os::Linux::expand_stack_to(limit);
}
/*
* Take the highest VA the OS will give us and exec
*
* Although using -(pagesz) as mmap hint works on newer kernel as you would
* think, older variants affected by this work-around don't (search forward only).
*
* On the affected distributions, we understand the memory layout to be:
*
* TASK_LIMIT= 3G, main stack base close to TASK_LIMT.
*
* A few pages south main stack will do it.
*
* If we are embedded in an app other than launcher (initial != main stack),
* we don't have much control or understanding of the address space, just let it slide.
*/
char* hint = (char*)(os::Linux::initial_thread_stack_bottom() -
(StackOverflow::stack_guard_zone_size() + page_size));
char* codebuf = os::attempt_reserve_memory_at(hint, page_size);
if (codebuf == NULL) {
// JDK-8197429: There may be a stack gap of one megabyte between
// the limit of the stack and the nearest memory region: this is a
// Linux kernel workaround for CVE-2017-1000364. If we failed to
// map our codebuf, try again at an address one megabyte lower.
hint -= 1 * M;
codebuf = os::attempt_reserve_memory_at(hint, page_size);
}
if ((codebuf == NULL) || (!os::commit_memory(codebuf, page_size, true))) {
return; // No matter, we tried, best effort.
}
MemTracker::record_virtual_memory_type((address)codebuf, mtInternal);
log_info(os)("[CS limit NX emulation work-around, exec code at: %p]", codebuf);
// Some code to exec: the 'ret' instruction
codebuf[0] = 0xC3;
// Call the code in the codebuf
__asm__ volatile("call *%0" : : "r"(codebuf));
// keep the page mapped so CS limit isn't reduced.
}
#endif // defined(IA32) && !defined(ZERO)
// this is called _after_ the global arguments have been parsed
jint os::init_2(void) {

View File

@ -25,12 +25,11 @@
#ifndef OS_LINUX_OS_LINUX_HPP
#define OS_LINUX_OS_LINUX_HPP
// Linux_OS defines the interface to Linux operating systems
#include "runtime/os.hpp"
// Information about the protection of the page at address '0' on this os.
static bool zero_page_read_protected() { return true; }
// os::Linux defines the interface to Linux operating systems
class Linux {
class os::Linux {
friend class CgroupSubsystem;
friend class os;
friend class OSContainer;
@ -157,6 +156,7 @@ class Linux {
// Stack overflow handling
static bool manually_expand_stack(JavaThread * t, address addr);
static void expand_stack_to(address bottom);
// fast POSIX clocks support
static void fast_thread_clock_init(void);
@ -198,7 +198,6 @@ class Linux {
private:
static void numa_init();
static void expand_stack_to(address bottom);
typedef int (*sched_getcpu_func_t)(void);
typedef int (*numa_node_to_cpus_func_t)(int node, unsigned long *buffer, int bufferlen);
@ -428,6 +427,8 @@ class Linux {
static const GrowableArray<int>* numa_nindex_to_node() {
return _nindex_to_node;
}
void* resolve_function_descriptor(void* p);
};
#endif // OS_LINUX_OS_LINUX_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,11 +25,15 @@
#ifndef OS_LINUX_OS_LINUX_INLINE_HPP
#define OS_LINUX_OS_LINUX_INLINE_HPP
// os_linux.hpp included by os.hpp
#include "os_linux.hpp"
#include "runtime/os.hpp"
#include "os_posix.inline.hpp"
inline bool os::zero_page_read_protected() {
return true;
}
inline bool os::uses_stack_guard_pages() {
return true;
}

View File

@ -26,6 +26,7 @@
#include "jvm.h"
#include "memory/allocation.inline.hpp"
#include "os_linux.inline.hpp"
#include "os_posix.hpp"
#include "runtime/os.hpp"
#include "runtime/os_perf.hpp"
#include "runtime/vm_version.hpp"

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2021 SAP SE. All rights reserved.
* Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "logging/log.hpp"
#include "os_linux.hpp"
#include "runtime/os.hpp"
#include "utilities/debug.hpp"
#include "utilities/ostream.hpp"

View File

@ -24,9 +24,7 @@
#include "jvm.h"
#ifdef LINUX
#include "classfile/classLoader.hpp"
#endif
#include "jvmtifiles/jvmti.h"
#include "logging/log.hpp"
#include "memory/allocation.inline.hpp"
@ -51,6 +49,9 @@
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
#include "utilities/vmError.hpp"
#ifdef LINUX
#include "os_linux.hpp"
#endif
#include <dirent.h>
#include <dlfcn.h>
@ -554,7 +555,7 @@ void os::Posix::print_umask(outputStream* st, mode_t umsk) {
st->print((umsk & S_IXOTH) ? "x" : "-");
}
void os::Posix::print_user_info(outputStream* st) {
void os::print_user_info(outputStream* st) {
unsigned id = (unsigned) ::getuid();
st->print("uid : %u ", id);
id = (unsigned) ::geteuid();
@ -568,13 +569,13 @@ void os::Posix::print_user_info(outputStream* st) {
mode_t umsk = ::umask(0);
::umask(umsk);
st->print("umask: %04o (", (unsigned) umsk);
print_umask(st, umsk);
os::Posix::print_umask(st, umsk);
st->print_cr(")");
st->cr();
}
// Print all active locale categories, one line each
void os::Posix::print_active_locale(outputStream* st) {
void os::print_active_locale(outputStream* st) {
st->print_cr("Active Locale:");
// Posix is quiet about how exactly LC_ALL is implemented.
// Just print it out too, in case LC_ALL is held separately
@ -1128,8 +1129,8 @@ bool os::Posix::handle_stack_overflow(JavaThread* thread, address addr, address
"enabled executable stack (see man page execstack(8))");
} else {
#if !defined(AIX) && !defined(__APPLE__)
// bsd and aix don't have this
#ifdef LINUX
// This only works with os::Linux::manually_expand_stack()
// Accessing stack address below sp may cause SEGV if current
// thread has MAP_GROWSDOWN stack. This should only happen when
@ -1147,7 +1148,7 @@ bool os::Posix::handle_stack_overflow(JavaThread* thread, address addr, address
}
#else
tty->print_raw_cr("SIGSEGV happened inside stack but outside yellow and red zone.");
#endif // AIX or BSD
#endif // LINUX
}
return false;
}
@ -2010,3 +2011,8 @@ void os::die() {
::abort();
}
}
const char* os::file_separator() { return "/"; }
const char* os::line_separator() { return "\n"; }
const char* os::path_separator() { return ":"; }

View File

@ -25,6 +25,10 @@
#ifndef OS_POSIX_OS_POSIX_HPP
#define OS_POSIX_OS_POSIX_HPP
#include "runtime/os.hpp"
#include <errno.h>
// Note: the Posix API aims to capture functionality available on all Posix
// compliant platforms, but in practice the implementations may depend on
// non-Posix functionality. For example, the use of lseek64 and ftruncate64.
@ -34,12 +38,19 @@
// behaviour in API's that are defined by Posix. For example, that SIGSTKSZ
// is not defined as a constant as of Glibc 2.34.
// File conventions
static const char* file_separator() { return "/"; }
static const char* line_separator() { return "\n"; }
static const char* path_separator() { return ":"; }
// macros for restartable system calls
class Posix {
#define RESTARTABLE(_cmd, _result) do { \
_result = _cmd; \
} while(((int)_result == OS_ERR) && (errno == EINTR))
#define RESTARTABLE_RETURN_INT(_cmd) do { \
int _result; \
RESTARTABLE(_cmd, _result); \
return _result; \
} while(false)
class os::Posix {
friend class os;
protected:
@ -81,8 +92,6 @@ public:
static void print_umask(outputStream* st, mode_t umsk);
static void print_user_info(outputStream* st);
// Set PC into context. Needed for continuation after signal.
static address ucontext_get_pc(const ucontext_t* ctx);
static void ucontext_set_pc(ucontext_t* ctx, address pc);
@ -92,8 +101,6 @@ public:
static bool handle_stack_overflow(JavaThread* thread, address addr, address pc,
const void* ucVoid,
address* stub);
static void print_active_locale(outputStream* st);
};
#endif // OS_POSIX_OS_POSIX_HPP

View File

@ -25,7 +25,7 @@
#ifndef OS_POSIX_OS_POSIX_INLINE_HPP
#define OS_POSIX_OS_POSIX_INLINE_HPP
// os_posix.hpp included by os.hpp
#include "os_posix.hpp"
#include "runtime/mutex.hpp"
#include "runtime/os.hpp"
@ -34,18 +34,6 @@
#include <sys/socket.h>
#include <netdb.h>
// macros for restartable system calls
#define RESTARTABLE(_cmd, _result) do { \
_result = _cmd; \
} while(((int)_result == OS_ERR) && (errno == EINTR))
#define RESTARTABLE_RETURN_INT(_cmd) do { \
int _result; \
RESTARTABLE(_cmd, _result); \
return _result; \
} while(false)
// Aix does not have NUMA support but need these for compilation.
inline bool os::numa_has_static_binding() { AIX_ONLY(ShouldNotReachHere();) return true; }
inline bool os::numa_has_group_homing() { AIX_ONLY(ShouldNotReachHere();) return false; }

View File

@ -37,6 +37,9 @@
#include "runtime/perfMemory.hpp"
#include "services/memTracker.hpp"
#include "utilities/exceptions.hpp"
#if defined(LINUX)
#include "os_linux.hpp"
#endif
// put OS-includes here
# include <sys/types.h>

View File

@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "os_posix.hpp"
#include "runtime/os.hpp"
#include "runtime/safefetch.hpp"
#include "utilities/globalDefinitions.hpp"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -24,6 +24,7 @@
#include "precompiled.hpp"
#ifndef __APPLE__
#include "os_posix.hpp"
#include "runtime/os.hpp"
// POSIX unnamed semaphores are not supported on OS X.
#include "semaphore_posix.hpp"

View File

@ -29,6 +29,7 @@
#include "code/compiledMethod.hpp"
#include "code/nativeInst.hpp"
#include "logging/log.hpp"
#include "os_posix.hpp"
#include "runtime/atomic.hpp"
#include "runtime/globals.hpp"
#include "runtime/interfaceSupport.inline.hpp"

View File

@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "cds/metaspaceShared.hpp"
#include "os_posix.hpp"
#include "runtime/javaThread.hpp"
#include "runtime/os.hpp"
#include "runtime/safefetch.hpp"

View File

@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "iphlp_interface.hpp"
#include "os_windows.hpp"
#include "runtime/os.hpp"
// IPHLP API

View File

@ -5938,3 +5938,16 @@ void os::print_memory_mappings(char* addr, size_t bytes, outputStream* st) {
}
}
}
// File conventions
const char* os::file_separator() { return "\\"; }
const char* os::line_separator() { return "\r\n"; }
const char* os::path_separator() { return ";"; }
void os::print_user_info(outputStream* st) {
// not implemented yet
}
void os::print_active_locale(outputStream* st) {
// not implemented yet
}

View File

@ -24,25 +24,17 @@
#ifndef OS_WINDOWS_OS_WINDOWS_HPP
#define OS_WINDOWS_OS_WINDOWS_HPP
#include "runtime/os.hpp"
// Win32_OS defines the interface to windows operating systems
// strtok_s is the Windows thread-safe equivalent of POSIX strtok_r
#define strtok_r strtok_s
class outputStream;
class Thread;
#define S_ISCHR(mode) (((mode) & _S_IFCHR) == _S_IFCHR)
#define S_ISFIFO(mode) (((mode) & _S_IFIFO) == _S_IFIFO)
// Information about the protection of the page at address '0' on this os.
static bool zero_page_read_protected() { return true; }
// File conventions
static const char* file_separator() { return "\\"; }
static const char* line_separator() { return "\r\n"; }
static const char* path_separator() { return ";"; }
class win32 {
class os::win32 {
friend class os;
friend unsigned __stdcall thread_native_entry(class Thread*);
friend unsigned __stdcall thread_native_entry(Thread*);
protected:
static int _vm_page_size;
@ -56,6 +48,11 @@ class win32 {
static void print_windows_version(outputStream* st);
static void print_uptime_info(outputStream* st);
static bool platform_print_native_stack(outputStream* st, const void* context,
char *buf, int buf_size);
static bool register_code_area(char *low, char *high);
public:
// Windows-specific interface:
static void initialize_system_info();

View File

@ -25,12 +25,16 @@
#ifndef OS_WINDOWS_OS_WINDOWS_INLINE_HPP
#define OS_WINDOWS_OS_WINDOWS_INLINE_HPP
// os_windows.hpp included by os.hpp
#include "os_windows.hpp"
#include "runtime/javaThread.hpp"
#include "runtime/mutex.hpp"
#include "runtime/os.hpp"
inline bool os::zero_page_read_protected() {
return true;
}
inline bool os::uses_stack_guard_pages() {
return true;
}

View File

@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "os_windows.hpp"
#include "pdh_interface.hpp"
#include "runtime/os.hpp"
#include "utilities/macros.hpp"

View File

@ -34,6 +34,8 @@
#include "interpreter/interpreter.hpp"
#include "memory/allocation.inline.hpp"
#include "nativeInst_ppc.hpp"
#include "os_aix.hpp"
#include "os_posix.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"
#include "porting_aix.hpp"
@ -508,12 +510,14 @@ int os::extra_bang_size_in_bytes() {
return 0;
}
bool os::platform_print_native_stack(outputStream* st, void* context, char *buf, int buf_size) {
bool os::Aix::platform_print_native_stack(outputStream* st, void* context, char *buf, int buf_size) {
AixNativeCallstack::print_callstack_for_context(st, (const ucontext_t*)context, true, buf, (size_t) buf_size);
return true;
}
// HAVE_FUNCTION_DESCRIPTORS
void* os::resolve_function_descriptor(void* p) {
void* os::Aix::resolve_function_descriptor(void* p) {
return ((const FunctionDescriptor*)p)->entry();
}
void os::setup_fpu() {}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2013 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -23,20 +23,19 @@
*
*/
#ifndef OS_CPU_AIX_PPC_OS_AIX_PPC_HPP
#define OS_CPU_AIX_PPC_OS_AIX_PPC_HPP
#ifndef OS_CPU_AIX_PPC_OS_AIX_PPC_INLINE_HPP
#define OS_CPU_AIX_PPC_OS_AIX_PPC_INLINE_HPP
static void setup_fpu() {}
#include "os_aix.hpp"
// Used to register dynamic code cache area with the OS
// Note: Currently only used in 64 bit Windows implementations
static bool register_code_area(char *low, char *high) { return true; }
#define HAVE_PLATFORM_PRINT_NATIVE_STACK 1
inline bool os::platform_print_native_stack(outputStream* st, const void* context,
char *buf, int buf_size) {
return os::Aix::platform_print_native_stack(st, context, buf, buf_size);
}
#define PLATFORM_PRINT_NATIVE_STACK 1
static bool platform_print_native_stack(outputStream* st, void* context,
char *buf, int buf_size);
#define HAVE_FUNCTION_DESCRIPTORS 1
static void* resolve_function_descriptor(void* p);
#endif // OS_CPU_AIX_PPC_OS_AIX_PPC_HPP
#define HAVE_FUNCTION_DESCRIPTORS 1
inline void* os::resolve_function_descriptor(void* p) {
return os::Aix::resolve_function_descriptor(p);
}
#endif // OS_CPU_AIX_PPC_OS_AIX_PPC_INLINE_HPP

View File

@ -35,6 +35,8 @@
#include "interpreter/interpreter.hpp"
#include "logging/log.hpp"
#include "memory/allocation.inline.hpp"
#include "os_bsd.hpp"
#include "os_posix.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"
#include "runtime/arguments.hpp"
@ -337,10 +339,6 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
void os::Bsd::init_thread_fpu_state(void) {
}
bool os::is_allocatable(size_t bytes) {
return true;
}
////////////////////////////////////////////////////////////////////////////////
// thread stack

View File

@ -1,6 +1,5 @@
/*
* Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2010 Red Hat, Inc.
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,13 +22,7 @@
*
*/
#ifndef OS_CPU_BSD_ZERO_OS_BSD_ZERO_HPP
#define OS_CPU_BSD_ZERO_OS_BSD_ZERO_HPP
#ifndef OS_CPU_BSD_AARCH64_OS_BSD_AARCH64_INLINE_HPP
#define OS_CPU_BSD_AARCH64_OS_BSD_AARCH64_INLINE_HPP
static void setup_fpu() {}
// Used to register dynamic code cache area with the OS
// Note: Currently only used in 64 bit Windows implementations
static bool register_code_area(char *low, char *high) { return true; }
#endif // OS_CPU_BSD_ZERO_OS_BSD_ZERO_HPP
#endif // OS_CPU_BSD_AARCH64_OS_BSD_AARCH64_INLINE_HPP

View File

@ -32,6 +32,8 @@
#include "interpreter/interpreter.hpp"
#include "logging/log.hpp"
#include "memory/allocation.inline.hpp"
#include "os_bsd.hpp"
#include "os_posix.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"
#include "runtime/arguments.hpp"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,6 +27,16 @@
#include "runtime/os.hpp"
#if defined(__APPLE__) && defined(COMPATIBLE_CDS_ALIGNMENT)
#define HAVE_CDS_CORE_REGION_ALIGNMENT 1
inline size_t os::cds_core_region_alignment() {
// Core region alignment is 16K to be able to run binaries built on MacOS x64
// on MacOS aarch64.
return (16*K);
}
#endif
// See http://www.technovelty.org/code/c/reading-rdtsc.htl for details
inline jlong os::rdtsc() {
#ifndef AMD64

View File

@ -38,6 +38,8 @@
#include "interpreter/interpreter.hpp"
#include "memory/allocation.inline.hpp"
#include "nativeInst_zero.hpp"
#include "os_bsd.hpp"
#include "os_posix.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"
#include "runtime/arguments.hpp"
@ -360,3 +362,5 @@ void os::current_thread_enable_wx(WXMode mode) {
pthread_jit_write_protect_np(mode == WXExec);
}
#endif
void os::setup_fpu() {}

View File

@ -0,0 +1,28 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_BSD_ZERO_OS_BSD_ZERO_INLINE_HPP
#define OS_CPU_BSD_ZERO_OS_BSD_ZERO_INLINE_HPP
#endif // OS_CPU_BSD_ZERO_OS_BSD_ZERO_INLINE_HPP

View File

@ -33,6 +33,8 @@
#include "code/nativeInst.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/allocation.inline.hpp"
#include "os_linux.hpp"
#include "os_posix.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"
#include "runtime/arguments.hpp"

View File

@ -1,39 +0,0 @@
/*
* Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_LINUX_AARCH64_OS_LINUX_AARCH64_HPP
#define OS_CPU_LINUX_AARCH64_OS_LINUX_AARCH64_HPP
#if defined(COMPATIBLE_CDS_ALIGNMENT)
#define CDS_CORE_REGION_ALIGNMENT (64*K)
#endif
static void setup_fpu();
// Used to register dynamic code cache area with the OS
// Note: Currently only used in 64 bit Windows implementations
static bool register_code_area(char *low, char *high) { return true; }
#endif // OS_CPU_LINUX_AARCH64_OS_LINUX_AARCH64_HPP

View File

@ -1,6 +1,5 @@
/*
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,12 +22,16 @@
*
*/
#ifndef OS_CPU_LINUX_S390_OS_LINUX_S390_HPP
#define OS_CPU_LINUX_S390_OS_LINUX_S390_HPP
#ifndef OS_CPU_LINUX_AARCH64_OS_LINUX_AARCH64_INLINE_HPP
#define OS_CPU_LINUX_AARCH64_OS_LINUX_AARCH64_INLINE_HPP
static void setup_fpu() {}
#include "runtime/os.hpp"
// Used to register dynamic code cache area with the OS.
static bool register_code_area(char *low, char *high) { return true; }
#if defined(COMPATIBLE_CDS_ALIGNMENT)
#define HAVE_CDS_CORE_REGION_ALIGNMENT 1
inline size_t os::cds_core_region_alignment() {
return (64*K);
}
#endif
#endif // OS_CPU_LINUX_S390_OS_LINUX_S390_HPP
#endif // OS_CPU_LINUX_AARCH64_OS_LINUX_AARCH64_INLINE_HPP

View File

@ -31,6 +31,8 @@
#include "interpreter/interpreter.hpp"
#include "memory/allocation.inline.hpp"
#include "nativeInst_arm.hpp"
#include "os_linux.hpp"
#include "os_posix.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"
#include "runtime/arguments.hpp"

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,13 +22,8 @@
*
*/
#ifndef OS_CPU_LINUX_ARM_OS_LINUX_ARM_HPP
#define OS_CPU_LINUX_ARM_OS_LINUX_ARM_HPP
#ifndef OS_CPU_LINUX_ARM_OS_LINUX_ARM_INLINE_HPP
#define OS_CPU_LINUX_ARM_OS_LINUX_ARM_INLINE_HPP
static void setup_fpu();
// Used to register dynamic code cache area with the OS
// Note: Currently only used in 64 bit Windows implementations
static bool register_code_area(char *low, char *high) { return true; }
#endif // OS_CPU_LINUX_ARM_OS_LINUX_ARM_HPP
#endif // OS_CPU_LINUX_ARM_OS_LINUX_ARM_INLINE_HPP

View File

@ -34,6 +34,8 @@
#include "interpreter/interpreter.hpp"
#include "memory/allocation.inline.hpp"
#include "nativeInst_ppc.hpp"
#include "os_linux.hpp"
#include "os_posix.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"
#include "runtime/arguments.hpp"
@ -43,6 +45,7 @@
#include "runtime/javaCalls.hpp"
#include "runtime/javaThread.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/os.inline.hpp"
#include "runtime/osThread.hpp"
#include "runtime/safepointMechanism.hpp"
#include "runtime/sharedRuntime.hpp"
@ -515,7 +518,9 @@ int os::extra_bang_size_in_bytes() {
}
#ifdef HAVE_FUNCTION_DESCRIPTORS
void* os::resolve_function_descriptor(void* p) {
void* os::Linux::resolve_function_descriptor(void* p) {
return ((const FunctionDescriptor*)p)->entry();
}
#endif
void os::setup_fpu() {}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2013 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -23,19 +23,17 @@
*
*/
#ifndef OS_CPU_LINUX_PPC_OS_LINUX_PPC_HPP
#define OS_CPU_LINUX_PPC_OS_LINUX_PPC_HPP
#ifndef OS_CPU_LINUX_PPC_OS_LINUX_PPC_INLINE_HPP
#define OS_CPU_LINUX_PPC_OS_LINUX_PPC_INLINE_HPP
static void setup_fpu() {}
// Used to register dynamic code cache area with the OS
// Note: Currently only used in 64 bit Windows implementations
static bool register_code_area(char *low, char *high) { return true; }
#include "os_linux.hpp"
#if !defined(ABI_ELFv2)
// ppc (not ppcle) has function descriptors
#define HAVE_FUNCTION_DESCRIPTORS 1
static void* resolve_function_descriptor(void* p);
#define HAVE_FUNCTION_DESCRIPTORS 1
inline void* os::resolve_function_descriptor(void* p) {
return os::Linux::resolve_function_descriptor(p);
}
#endif
#endif // OS_CPU_LINUX_PPC_OS_LINUX_PPC_HPP
#endif // OS_CPU_LINUX_PPC_OS_LINUX_PPC_INLINE_HPP

View File

@ -54,10 +54,6 @@ inline void OrderAccess::fence() {
}
inline void OrderAccess::cross_modify_fence_impl() {
asm volatile("fence.i" : : : "memory");
if (UseConservativeFence) {
asm volatile("fence ir, ir" : : : "memory");
}
}
#endif // OS_CPU_LINUX_RISCV_ORDERACCESS_LINUX_RISCV_HPP

View File

@ -33,6 +33,8 @@
#include "interpreter/interpreter.hpp"
#include "jvm.h"
#include "memory/allocation.inline.hpp"
#include "os_linux.hpp"
#include "os_posix.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"
#include "runtime/arguments.hpp"

View File

@ -1,54 +0,0 @@
/*
* Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_LINUX_RISCV_VM_OS_LINUX_RISCV_HPP
#define OS_CPU_LINUX_RISCV_VM_OS_LINUX_RISCV_HPP
static void setup_fpu();
// Used to register dynamic code cache area with the OS
// Note: Currently only used in 64 bit Windows implementations
static bool register_code_area(char *low, char *high) { return true; }
// SYSCALL_RISCV_FLUSH_ICACHE is used to flush instruction cache. The "fence.i" instruction
// only work on the current hart, so kernel provides the icache flush syscall to flush icache
// on each hart. You can pass a flag to determine a global or local icache flush.
static void icache_flush(long int start, long int end)
{
const int SYSCALL_RISCV_FLUSH_ICACHE = 259;
register long int __a7 asm ("a7") = SYSCALL_RISCV_FLUSH_ICACHE;
register long int __a0 asm ("a0") = start;
register long int __a1 asm ("a1") = end;
// the flush can be applied to either all threads or only the current.
// 0 means a global icache flush, and the icache flush will be applied
// to other harts concurrently executing.
register long int __a2 asm ("a2") = 0;
__asm__ volatile ("ecall\n\t"
: "+r" (__a0)
: "r" (__a0), "r" (__a1), "r" (__a2), "r" (__a7)
: "memory");
}
#endif // OS_CPU_LINUX_RISCV_VM_OS_LINUX_RISCV_HPP

View File

@ -0,0 +1,28 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_LINUX_RISCV_OS_LINUX_RISCV_INLINE_HPP
#define OS_CPU_LINUX_RISCV_OS_LINUX_RISCV_INLINE_HPP
#endif // OS_CPU_LINUX_RISCV_OS_LINUX_RISCV_INLINE_HPP

View File

@ -36,6 +36,8 @@
#include "interpreter/interpreter.hpp"
#include "memory/allocation.inline.hpp"
#include "nativeInst_s390.hpp"
#include "os_linux.hpp"
#include "os_posix.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"
#include "runtime/arguments.hpp"
@ -477,3 +479,5 @@ int os::extra_bang_size_in_bytes() {
// z/Architecture does not require the additional stack bang.
return 0;
}
void os::setup_fpu() {}

View File

@ -0,0 +1,28 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_LINUX_S390_OS_LINUX_S390_INLINE_HPP
#define OS_CPU_LINUX_S390_OS_LINUX_S390_INLINE_HPP
#endif // OS_CPU_LINUX_S390_OS_LINUX_S390_INLINE_HPP

View File

@ -32,6 +32,8 @@
#include "interpreter/interpreter.hpp"
#include "logging/log.hpp"
#include "memory/allocation.inline.hpp"
#include "os_linux.hpp"
#include "os_posix.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"
#include "runtime/frame.inline.hpp"
@ -651,83 +653,6 @@ void os::verify_stack_alignment() {
}
#endif
/*
* IA32 only: execute code at a high address in case buggy NX emulation is present. I.e. avoid CS limit
* updates (JDK-8023956).
*/
void os::workaround_expand_exec_shield_cs_limit() {
#if defined(IA32)
assert(Linux::initial_thread_stack_bottom() != NULL, "sanity");
size_t page_size = os::vm_page_size();
/*
* JDK-8197429
*
* Expand the stack mapping to the end of the initial stack before
* attempting to install the codebuf. This is needed because newer
* Linux kernels impose a distance of a megabyte between stack
* memory and other memory regions. If we try to install the
* codebuf before expanding the stack the installation will appear
* to succeed but we'll get a segfault later if we expand the stack
* in Java code.
*
*/
if (os::is_primordial_thread()) {
address limit = Linux::initial_thread_stack_bottom();
if (! DisablePrimordialThreadGuardPages) {
limit += StackOverflow::stack_red_zone_size() +
StackOverflow::stack_yellow_zone_size();
}
os::Linux::expand_stack_to(limit);
}
/*
* Take the highest VA the OS will give us and exec
*
* Although using -(pagesz) as mmap hint works on newer kernel as you would
* think, older variants affected by this work-around don't (search forward only).
*
* On the affected distributions, we understand the memory layout to be:
*
* TASK_LIMIT= 3G, main stack base close to TASK_LIMT.
*
* A few pages south main stack will do it.
*
* If we are embedded in an app other than launcher (initial != main stack),
* we don't have much control or understanding of the address space, just let it slide.
*/
char* hint = (char*)(Linux::initial_thread_stack_bottom() -
(StackOverflow::stack_guard_zone_size() + page_size));
char* codebuf = os::attempt_reserve_memory_at(hint, page_size);
if (codebuf == NULL) {
// JDK-8197429: There may be a stack gap of one megabyte between
// the limit of the stack and the nearest memory region: this is a
// Linux kernel workaround for CVE-2017-1000364. If we failed to
// map our codebuf, try again at an address one megabyte lower.
hint -= 1 * M;
codebuf = os::attempt_reserve_memory_at(hint, page_size);
}
if ((codebuf == NULL) || (!os::commit_memory(codebuf, page_size, true))) {
return; // No matter, we tried, best effort.
}
MemTracker::record_virtual_memory_type((address)codebuf, mtInternal);
log_info(os)("[CS limit NX emulation work-around, exec code at: %p]", codebuf);
// Some code to exec: the 'ret' instruction
codebuf[0] = 0xC3;
// Call the code in the codebuf
__asm__ volatile("call *%0" : : "r"(codebuf));
// keep the page mapped so CS limit isn't reduced.
#endif
}
int os::extra_bang_size_in_bytes() {
// JDK-8050147 requires the full cache line bang for x86.
return VM_Version::L1_line_size();

View File

@ -1,51 +0,0 @@
/*
* Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_LINUX_X86_OS_LINUX_X86_HPP
#define OS_CPU_LINUX_X86_OS_LINUX_X86_HPP
static void setup_fpu();
static bool supports_sse();
static juint cpu_microcode_revision();
static jlong rdtsc();
// Used to register dynamic code cache area with the OS
// Note: Currently only used in 64 bit Windows implementations
static bool register_code_area(char *low, char *high) { return true; }
/*
* Work-around for broken NX emulation using CS limit, Red Hat patch "Exec-Shield"
* (IA32 only).
*
* Map and execute at a high VA to prevent CS lazy updates race with SMP MM
* invalidation.Further code generation by the JVM will no longer cause CS limit
* updates.
*
* Affects IA32: RHEL 5 & 6, Ubuntu 10.04 (LTS), 10.10, 11.04, 11.10, 12.04.
* @see JDK-8023956
*/
static void workaround_expand_exec_shield_cs_limit();
#endif // OS_CPU_LINUX_X86_OS_LINUX_X86_HPP

View File

@ -33,6 +33,8 @@
#include "interpreter/interpreter.hpp"
#include "memory/allocation.inline.hpp"
#include "nativeInst_zero.hpp"
#include "os_linux.hpp"
#include "os_posix.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"
#include "runtime/arguments.hpp"
@ -398,3 +400,5 @@ int os::extra_bang_size_in_bytes() {
// Zero does not require an additional stack banging.
return 0;
}
void os::setup_fpu() {}

View File

@ -1,48 +0,0 @@
/*
* Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2010, 2018, Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_LINUX_ZERO_OS_LINUX_ZERO_HPP
#define OS_CPU_LINUX_ZERO_OS_LINUX_ZERO_HPP
static void setup_fpu() {}
// Used to register dynamic code cache area with the OS
// Note: Currently only used in 64 bit Windows implementations
static bool register_code_area(char *low, char *high) { return true; }
/*
* Work-around for broken NX emulation using CS limit, Red Hat patch "Exec-Shield"
* (IA32 only).
*
* Map and execute at a high VA to prevent CS lazy updates race with SMP MM
* invalidation.Further code generation by the JVM will no longer cause CS limit
* updates.
*
* Affects IA32: RHEL 5 & 6, Ubuntu 10.04 (LTS), 10.10, 11.04, 11.10, 12.04.
* @see JDK-8023956
*/
static void workaround_expand_exec_shield_cs_limit();
#endif // OS_CPU_LINUX_ZERO_OS_LINUX_ZERO_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Microsoft Corporation. All rights reserved.
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,15 +22,9 @@
*
*/
#ifndef OS_CPU_WINDOWS_AARCH64_OS_WINDOWS_AARCH64_HPP
#define OS_CPU_WINDOWS_AARCH64_OS_WINDOWS_AARCH64_HPP
#ifndef OS_CPU_LINUX_ZERO_OS_LINUX_ZERO_INLINE_HPP
#define OS_CPU_LINUX_ZERO_OS_LINUX_ZERO_INLINE_HPP
static void setup_fpu();
static bool supports_sse();
static bool register_code_area(char *low, char *high) {
// Using Vectored Exception Handling
return true;
}
#endif // OS_CPU_WINDOWS_AARCH64_OS_WINDOWS_AARCH64_HPP
#endif // OS_CPU_LINUX_ZERO_OS_LINUX_ZERO_INLINE_HPP

View File

@ -33,6 +33,7 @@
#include "code/nativeInst.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/allocation.inline.hpp"
#include "os_windows.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"
#include "runtime/arguments.hpp"

View File

@ -27,4 +27,9 @@
#include "runtime/os.hpp"
inline bool os::register_code_area(char *low, char *high) {
// Using Vectored Exception Handling
return true;
}
#endif // OS_CPU_WINDOWS_AARCH64_OS_WINDOWS_AARCH64_INLINE_HPP

View File

@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "os_windows.hpp"
#include "runtime/os.hpp"
void MacroAssembler::int3() {

View File

@ -32,6 +32,7 @@
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_x86.hpp"
#include "os_windows.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"
#include "runtime/arguments.hpp"
@ -41,6 +42,7 @@
#include "runtime/javaCalls.hpp"
#include "runtime/javaThread.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/os.inline.hpp"
#include "runtime/osThread.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
@ -165,7 +167,7 @@ typedef struct {
// Arguments: low and high are the address of the full reserved
// codeCache area
//
bool os::register_code_area(char *low, char *high) {
bool os::win32::register_code_area(char *low, char *high) {
#ifdef AMD64
ResourceMark rm;
@ -209,7 +211,7 @@ bool os::register_code_area(char *low, char *high) {
return true;
}
#ifdef AMD64
#ifdef HAVE_PLATFORM_PRINT_NATIVE_STACK
/*
* Windows/x64 does not use stack frames the way expected by Java:
* [1] in most cases, there is no frame pointer. All locals are addressed via RSP
@ -221,8 +223,8 @@ bool os::register_code_area(char *low, char *high) {
* while (...) {... fr = os::get_sender_for_C_frame(&fr); }
* loop in vmError.cpp. We need to roll our own loop.
*/
bool os::platform_print_native_stack(outputStream* st, const void* context,
char *buf, int buf_size)
bool os::win32::platform_print_native_stack(outputStream* st, const void* context,
char *buf, int buf_size)
{
CONTEXT ctx;
if (context != NULL) {
@ -293,7 +295,7 @@ bool os::platform_print_native_stack(outputStream* st, const void* context,
return true;
}
#endif // AMD64
#endif // HAVE_PLATFORM_PRINT_NATIVE_STACK
address os::fetch_frame_from_context(const void* ucVoid,
intptr_t** ret_sp, intptr_t** ret_fp) {
@ -558,3 +560,7 @@ int os::extra_bang_size_in_bytes() {
// JDK-8050147 requires the full cache line bang for x86.
return VM_Version::L1_line_size();
}
bool os::supports_sse() {
return true;
}

View File

@ -1,46 +0,0 @@
/*
* Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef OS_CPU_WINDOWS_X86_OS_WINDOWS_X86_HPP
#define OS_CPU_WINDOWS_X86_OS_WINDOWS_X86_HPP
//
// NOTE: we are back in class os here, not win32
//
static void setup_fpu();
static bool supports_sse() { return true; }
static juint cpu_microcode_revision();
static jlong rdtsc();
static bool register_code_area(char *low, char *high);
#ifdef AMD64
#define PLATFORM_PRINT_NATIVE_STACK 1
static bool platform_print_native_stack(outputStream* st, const void* context,
char *buf, int buf_size);
#endif
#endif // OS_CPU_WINDOWS_X86_OS_WINDOWS_X86_HPP

Some files were not shown because too many files have changed in this diff Show More