8166651: OrderAccess::load_acquire &etc should have const parameters

Added const qualifiers to load/load_acquire source parameters.

Reviewed-by: dholmes, coleenp, adinn, eosterlund
This commit is contained in:
Kim Barrett 2017-06-05 19:07:47 -04:00
parent 451b8b5d32
commit 5ea9bb3e56
29 changed files with 128 additions and 132 deletions

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2014 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -46,7 +46,7 @@ inline void Atomic::store (jlong store_value, volatile jlong* dest) { *
inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; }
inline jlong Atomic::load(volatile jlong* src) { return *src; }
inline jlong Atomic::load(const volatile jlong* src) { return *src; }
//
// machine barrier instructions:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2014 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -78,10 +78,10 @@ inline void OrderAccess::acquire() { inlasm_lwsync(); }
inline void OrderAccess::release() { inlasm_lwsync(); }
inline void OrderAccess::fence() { inlasm_sync(); }
template<> inline jbyte OrderAccess::specialized_load_acquire<jbyte> (volatile jbyte* p) { register jbyte t = load(p); inlasm_acquire_reg(t); return t; }
template<> inline jshort OrderAccess::specialized_load_acquire<jshort>(volatile jshort* p) { register jshort t = load(p); inlasm_acquire_reg(t); return t; }
template<> inline jint OrderAccess::specialized_load_acquire<jint> (volatile jint* p) { register jint t = load(p); inlasm_acquire_reg(t); return t; }
template<> inline jlong OrderAccess::specialized_load_acquire<jlong> (volatile jlong* p) { register jlong t = load(p); inlasm_acquire_reg(t); return t; }
template<> inline jbyte OrderAccess::specialized_load_acquire<jbyte> (const volatile jbyte* p) { register jbyte t = load(p); inlasm_acquire_reg(t); return t; }
template<> inline jshort OrderAccess::specialized_load_acquire<jshort>(const volatile jshort* p) { register jshort t = load(p); inlasm_acquire_reg(t); return t; }
template<> inline jint OrderAccess::specialized_load_acquire<jint> (const volatile jint* p) { register jint t = load(p); inlasm_acquire_reg(t); return t; }
template<> inline jlong OrderAccess::specialized_load_acquire<jlong> (const volatile jlong* p) { register jlong t = load(p); inlasm_acquire_reg(t); return t; }
#undef inlasm_sync
#undef inlasm_lwsync

View File

@ -153,7 +153,7 @@ inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void*
return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
}
inline jlong Atomic::load(volatile jlong* src) { return *src; }
inline jlong Atomic::load(const volatile jlong* src) { return *src; }
#else // !AMD64
@ -181,7 +181,7 @@ inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* des
extern "C" {
// defined in bsd_x86.s
jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong, bool);
void _Atomic_move_long(volatile jlong* src, volatile jlong* dst);
void _Atomic_move_long(const volatile jlong* src, volatile jlong* dst);
}
inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) {
@ -196,7 +196,7 @@ inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void*
return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
}
inline jlong Atomic::load(volatile jlong* src) {
inline jlong Atomic::load(const volatile jlong* src) {
volatile jlong dest;
_Atomic_move_long(src, &dest);
return dest;

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2004, 2017, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -659,7 +659,7 @@ SYMBOL(_Atomic_cmpxchg_long):
# Support for jlong Atomic::load and Atomic::store.
# void _Atomic_move_long(volatile jlong* src, volatile jlong* dst)
# void _Atomic_move_long(const volatile jlong* src, volatile jlong* dst)
.p2align 4,,15
ELF_TYPE(_Atomic_move_long,@function)
SYMBOL(_Atomic_move_long):

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2011, 2015, Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -316,7 +316,7 @@ inline void* Atomic::cmpxchg_ptr(void* exchange_value,
order);
}
inline jlong Atomic::load(volatile jlong* src) {
inline jlong Atomic::load(const volatile jlong* src) {
volatile jlong dest;
os::atomic_copy64(src, &dest);
return dest;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -35,7 +35,7 @@
static bool register_code_area(char *low, char *high) { return true; }
// Atomically copy 64 bits of data
static void atomic_copy64(volatile void *src, volatile void *dst) {
static void atomic_copy64(const volatile void *src, volatile void *dst) {
#if defined(PPC32)
double tmp;
asm volatile ("lfd %0, 0(%1)\n"
@ -49,7 +49,7 @@
: "=r"(tmp)
: "a"(src), "a"(dst));
#else
*(jlong *) dst = *(jlong *) src;
*(jlong *) dst = *(const jlong *) src;
#endif
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -157,6 +157,6 @@ inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void
order);
}
inline jlong Atomic::load(volatile jlong* src) { return *src; }
inline jlong Atomic::load(const volatile jlong* src) { return *src; }
#endif // OS_CPU_LINUX_AARCH64_VM_ATOMIC_LINUX_AARCH64_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -50,30 +50,28 @@ inline void OrderAccess::fence() {
FULL_MEM_BARRIER;
}
inline jbyte OrderAccess::load_acquire(volatile jbyte* p)
inline jbyte OrderAccess::load_acquire(const volatile jbyte* p)
{ jbyte data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
inline jshort OrderAccess::load_acquire(volatile jshort* p)
inline jshort OrderAccess::load_acquire(const volatile jshort* p)
{ jshort data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
inline jint OrderAccess::load_acquire(volatile jint* p)
inline jint OrderAccess::load_acquire(const volatile jint* p)
{ jint data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
inline jlong OrderAccess::load_acquire(volatile jlong* p)
inline jlong OrderAccess::load_acquire(const volatile jlong* p)
{ jlong data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
inline jubyte OrderAccess::load_acquire(volatile jubyte* p)
inline jubyte OrderAccess::load_acquire(const volatile jubyte* p)
{ jubyte data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
inline jushort OrderAccess::load_acquire(volatile jushort* p)
inline jushort OrderAccess::load_acquire(const volatile jushort* p)
{ jushort data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
inline juint OrderAccess::load_acquire(volatile juint* p)
inline juint OrderAccess::load_acquire(const volatile juint* p)
{ juint data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
inline julong OrderAccess::load_acquire(volatile julong* p)
inline julong OrderAccess::load_acquire(const volatile julong* p)
{ julong data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
inline jfloat OrderAccess::load_acquire(volatile jfloat* p)
inline jfloat OrderAccess::load_acquire(const volatile jfloat* p)
{ jfloat data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
inline jdouble OrderAccess::load_acquire(volatile jdouble* p)
inline jdouble OrderAccess::load_acquire(const volatile jdouble* p)
{ jdouble data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p)
inline intptr_t OrderAccess::load_ptr_acquire(const volatile intptr_t* p)
{ intptr_t data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
inline void* OrderAccess::load_ptr_acquire(volatile void* p)
{ void* data; __atomic_load((void* volatile *)p, &data, __ATOMIC_ACQUIRE); return data; }
inline void* OrderAccess::load_ptr_acquire(const volatile void* p)
{ void* data; __atomic_load((void* const volatile *)p, &data, __ATOMIC_ACQUIRE); return data; }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -38,8 +38,8 @@
static bool register_code_area(char *low, char *high) { return true; }
// Atomically copy 64 bits of data
static void atomic_copy64(volatile void *src, volatile void *dst) {
*(jlong *) dst = *(jlong *) src;
static void atomic_copy64(const volatile void *src, volatile void *dst) {
*(jlong *) dst = *(const jlong *) src;
}
#endif // OS_CPU_LINUX_AARCH64_VM_OS_LINUX_AARCH64_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -56,7 +56,7 @@ inline void Atomic::store (jint store_value, volatile jint* dest) { *
inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; }
inline jlong Atomic::load (volatile jlong* src) {
inline jlong Atomic::load (const volatile jlong* src) {
assert(((intx)src & (sizeof(jlong)-1)) == 0, "Atomic load jlong mis-aligned");
#ifdef AARCH64
return *src;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -131,7 +131,7 @@ inline void OrderAccess::fence() { dmb_sy(); }
#ifdef AARCH64
template<> inline jbyte OrderAccess::specialized_load_acquire<jbyte>(volatile jbyte* p) {
template<> inline jbyte OrderAccess::specialized_load_acquire<jbyte>(const volatile jbyte* p) {
volatile jbyte result;
__asm__ volatile(
"ldarb %w[res], [%[ptr]]"
@ -141,7 +141,7 @@ template<> inline jbyte OrderAccess::specialized_load_acquire<jbyte>(volatile
return result;
}
template<> inline jshort OrderAccess::specialized_load_acquire<jshort>(volatile jshort* p) {
template<> inline jshort OrderAccess::specialized_load_acquire<jshort>(const volatile jshort* p) {
volatile jshort result;
__asm__ volatile(
"ldarh %w[res], [%[ptr]]"
@ -151,7 +151,7 @@ template<> inline jshort OrderAccess::specialized_load_acquire<jshort>(volatil
return result;
}
template<> inline jint OrderAccess::specialized_load_acquire<jint>(volatile jint* p) {
template<> inline jint OrderAccess::specialized_load_acquire<jint>(const volatile jint* p) {
volatile jint result;
__asm__ volatile(
"ldar %w[res], [%[ptr]]"
@ -161,16 +161,16 @@ template<> inline jint OrderAccess::specialized_load_acquire<jint>(volatile
return result;
}
template<> inline jfloat OrderAccess::specialized_load_acquire<jfloat>(volatile jfloat* p) {
return jfloat_cast(specialized_load_acquire((volatile jint*)p));
template<> inline jfloat OrderAccess::specialized_load_acquire<jfloat>(const volatile jfloat* p) {
return jfloat_cast(specialized_load_acquire((const volatile jint*)p));
}
// This is implicit as jlong and intptr_t are both "long int"
//template<> inline jlong OrderAccess::specialized_load_acquire(volatile jlong* p) {
// return (volatile jlong)specialized_load_acquire((volatile intptr_t*)p);
//template<> inline jlong OrderAccess::specialized_load_acquire(const volatile jlong* p) {
// return (volatile jlong)specialized_load_acquire((const volatile intptr_t*)p);
//}
template<> inline intptr_t OrderAccess::specialized_load_acquire<intptr_t>(volatile intptr_t* p) {
template<> inline intptr_t OrderAccess::specialized_load_acquire<intptr_t>(const volatile intptr_t* p) {
volatile intptr_t result;
__asm__ volatile(
"ldar %[res], [%[ptr]]"
@ -180,8 +180,8 @@ template<> inline intptr_t OrderAccess::specialized_load_acquire<intptr_t>(volat
return result;
}
template<> inline jdouble OrderAccess::specialized_load_acquire<jdouble>(volatile jdouble* p) {
return jdouble_cast(specialized_load_acquire((volatile intptr_t*)p));
template<> inline jdouble OrderAccess::specialized_load_acquire<jdouble>(const volatile jdouble* p) {
return jdouble_cast(specialized_load_acquire((const volatile intptr_t*)p));
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -618,11 +618,11 @@ jlong os::atomic_cmpxchg_long_bootstrap(jlong compare_value, jlong exchange_valu
*dest = exchange_value;
return old_value;
}
typedef jlong load_long_func_t(volatile jlong*);
typedef jlong load_long_func_t(const volatile jlong*);
load_long_func_t* os::atomic_load_long_func = os::atomic_load_long_bootstrap;
jlong os::atomic_load_long_bootstrap(volatile jlong* src) {
jlong os::atomic_load_long_bootstrap(const volatile jlong* src) {
// try to use the stub:
load_long_func_t* func = CAST_TO_FN_PTR(load_long_func_t*, StubRoutines::atomic_load_long_entry());

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -49,7 +49,7 @@
jlong exchange_value,
volatile jlong *dest);
static jlong (*atomic_load_long_func)(volatile jlong*);
static jlong (*atomic_load_long_func)(const volatile jlong*);
static void (*atomic_store_long_func)(jlong, volatile jlong*);
@ -63,7 +63,7 @@
static jlong atomic_cmpxchg_long_bootstrap(jlong, jlong, volatile jlong*);
static jlong atomic_load_long_bootstrap(volatile jlong*);
static jlong atomic_load_long_bootstrap(const volatile jlong*);
static void atomic_store_long_bootstrap(jlong, volatile jlong*);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2014 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -46,7 +46,7 @@ inline void Atomic::store (jlong store_value, volatile jlong* dest) { *
inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; }
inline jlong Atomic::load(volatile jlong* src) { return *src; }
inline jlong Atomic::load(const volatile jlong* src) { return *src; }
//
// machine barrier instructions:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2014 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -80,10 +80,10 @@ inline void OrderAccess::acquire() { inlasm_lwsync(); }
inline void OrderAccess::release() { inlasm_lwsync(); }
inline void OrderAccess::fence() { inlasm_sync(); }
template<> inline jbyte OrderAccess::specialized_load_acquire<jbyte> (volatile jbyte* p) { register jbyte t = load(p); inlasm_acquire_reg(t); return t; }
template<> inline jshort OrderAccess::specialized_load_acquire<jshort>(volatile jshort* p) { register jshort t = load(p); inlasm_acquire_reg(t); return t; }
template<> inline jint OrderAccess::specialized_load_acquire<jint> (volatile jint* p) { register jint t = load(p); inlasm_acquire_reg(t); return t; }
template<> inline jlong OrderAccess::specialized_load_acquire<jlong> (volatile jlong* p) { register jlong t = load(p); inlasm_acquire_reg(t); return t; }
template<> inline jbyte OrderAccess::specialized_load_acquire<jbyte> (const volatile jbyte* p) { register jbyte t = load(p); inlasm_acquire_reg(t); return t; }
template<> inline jshort OrderAccess::specialized_load_acquire<jshort>(const volatile jshort* p) { register jshort t = load(p); inlasm_acquire_reg(t); return t; }
template<> inline jint OrderAccess::specialized_load_acquire<jint> (const volatile jint* p) { register jint t = load(p); inlasm_acquire_reg(t); return t; }
template<> inline jlong OrderAccess::specialized_load_acquire<jlong> (const volatile jlong* p) { register jlong t = load(p); inlasm_acquire_reg(t); return t; }
#undef inlasm_sync
#undef inlasm_lwsync

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -522,6 +522,6 @@ intptr_t Atomic::cmpxchg_ptr(intptr_t xchg_val, volatile intptr_t* dest, intptr_
return (intptr_t)cmpxchg((jlong)xchg_val, (volatile jlong*)dest, (jlong)cmp_val, unused);
}
inline jlong Atomic::load(volatile jlong* src) { return *src; }
inline jlong Atomic::load(const volatile jlong* src) { return *src; }
#endif // OS_CPU_LINUX_S390_VM_ATOMIC_LINUX_S390_INLINE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -74,10 +74,10 @@ inline void OrderAccess::acquire() { inlasm_zarch_acquire(); }
inline void OrderAccess::release() { inlasm_zarch_release(); }
inline void OrderAccess::fence() { inlasm_zarch_sync(); }
template<> inline jbyte OrderAccess::specialized_load_acquire<jbyte> (volatile jbyte* p) { register jbyte t = *p; inlasm_zarch_acquire(); return t; }
template<> inline jshort OrderAccess::specialized_load_acquire<jshort>(volatile jshort* p) { register jshort t = *p; inlasm_zarch_acquire(); return t; }
template<> inline jint OrderAccess::specialized_load_acquire<jint> (volatile jint* p) { register jint t = *p; inlasm_zarch_acquire(); return t; }
template<> inline jlong OrderAccess::specialized_load_acquire<jlong> (volatile jlong* p) { register jlong t = *p; inlasm_zarch_acquire(); return t; }
template<> inline jbyte OrderAccess::specialized_load_acquire<jbyte> (const volatile jbyte* p) { register jbyte t = *p; inlasm_zarch_acquire(); return t; }
template<> inline jshort OrderAccess::specialized_load_acquire<jshort>(const volatile jshort* p) { register jshort t = *p; inlasm_zarch_acquire(); return t; }
template<> inline jint OrderAccess::specialized_load_acquire<jint> (const volatile jint* p) { register jint t = *p; inlasm_zarch_acquire(); return t; }
template<> inline jlong OrderAccess::specialized_load_acquire<jlong> (const volatile jlong* p) { register jlong t = *p; inlasm_zarch_acquire(); return t; }
#undef inlasm_compiler_barrier
#undef inlasm_zarch_sync

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -49,7 +49,7 @@ inline void Atomic::dec (volatile jint* dest) { (void)add (-1, dest);
inline void Atomic::dec_ptr(volatile intptr_t* dest) { (void)add_ptr(-1, dest); }
inline void Atomic::dec_ptr(volatile void* dest) { (void)add_ptr(-1, dest); }
inline jlong Atomic::load(volatile jlong* src) { return *src; }
inline jlong Atomic::load(const volatile jlong* src) { return *src; }
inline jint Atomic::add (jint add_value, volatile jint* dest) {
intptr_t rv;

View File

@ -153,7 +153,7 @@ inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void*
return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
}
inline jlong Atomic::load(volatile jlong* src) { return *src; }
inline jlong Atomic::load(const volatile jlong* src) { return *src; }
#else // !AMD64
@ -181,7 +181,7 @@ inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* des
extern "C" {
// defined in linux_x86.s
jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong);
void _Atomic_move_long(volatile jlong* src, volatile jlong* dst);
void _Atomic_move_long(const volatile jlong* src, volatile jlong* dst);
}
inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) {
@ -196,7 +196,7 @@ inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void*
return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
}
inline jlong Atomic::load(volatile jlong* src) {
inline jlong Atomic::load(const volatile jlong* src) {
volatile jlong dest;
_Atomic_move_long(src, &dest);
return dest;

View File

@ -634,7 +634,7 @@ _Atomic_cmpxchg_long:
# Support for jlong Atomic::load and Atomic::store.
# void _Atomic_move_long(volatile jlong* src, volatile jlong* dst)
# void _Atomic_move_long(const volatile jlong* src, volatile jlong* dst)
.p2align 4,,15
.type _Atomic_move_long,@function
_Atomic_move_long:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2011, 2015, Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -310,7 +310,7 @@ inline void* Atomic::cmpxchg_ptr(void* exchange_value,
order);
}
inline jlong Atomic::load(volatile jlong* src) {
inline jlong Atomic::load(const volatile jlong* src) {
volatile jlong dest;
os::atomic_copy64(src, &dest);
return dest;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -35,7 +35,7 @@
static bool register_code_area(char *low, char *high) { return true; }
// Atomically copy 64 bits of data
static void atomic_copy64(volatile void *src, volatile void *dst) {
static void atomic_copy64(const volatile void *src, volatile void *dst) {
#if defined(PPC32)
double tmp;
asm volatile ("lfd %0, 0(%1)\n"
@ -49,7 +49,7 @@
: "=r"(tmp)
: "a"(src), "a"(dst));
#else
*(jlong *) dst = *(jlong *) src;
*(jlong *) dst = *(const jlong *) src;
#endif
}

View File

@ -52,7 +52,7 @@ inline void Atomic::dec_ptr(volatile void* dest) { (void)add_ptr(-1, dest);
inline void Atomic::store(jlong store_value, jlong* dest) { *dest = store_value; }
inline void Atomic::store(jlong store_value, volatile jlong* dest) { *dest = store_value; }
inline jlong Atomic::load(volatile jlong* src) { return *src; }
inline jlong Atomic::load(const volatile jlong* src) { return *src; }
#ifdef _GNU_SOURCE

View File

@ -115,7 +115,7 @@ inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void*
return (void*)_Atomic_cmpxchg_long((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value);
}
inline jlong Atomic::load(volatile jlong* src) { return *src; }
inline jlong Atomic::load(const volatile jlong* src) { return *src; }
#else // !AMD64
@ -143,9 +143,9 @@ inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void*
return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
}
extern "C" void _Atomic_move_long(volatile jlong* src, volatile jlong* dst);
extern "C" void _Atomic_move_long(const volatile jlong* src, volatile jlong* dst);
inline jlong Atomic::load(volatile jlong* src) {
inline jlong Atomic::load(const volatile jlong* src) {
volatile jlong dest;
_Atomic_move_long(src, &dest);
return dest;

View File

@ -107,7 +107,7 @@
.end
// Support for jlong Atomic::load and Atomic::store.
// void _Atomic_move_long(volatile jlong* src, volatile jlong* dst)
// void _Atomic_move_long(const volatile jlong* src, volatile jlong* dst)
.inline _Atomic_move_long,2
movl 0(%esp), %eax // src
fildll (%eax)

View File

@ -130,7 +130,7 @@ inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void*
return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
}
inline jlong Atomic::load(volatile jlong* src) { return *src; }
inline jlong Atomic::load(const volatile jlong* src) { return *src; }
#else // !AMD64
@ -249,7 +249,7 @@ inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void*
return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
}
inline jlong Atomic::load(volatile jlong* src) {
inline jlong Atomic::load(const volatile jlong* src) {
volatile jlong dest;
volatile jlong* pdest = &dest;
__asm {

View File

@ -72,7 +72,7 @@ class Atomic : AllStatic {
inline static void store_ptr(void* store_value, volatile void* dest);
// See comment above about using jlong atomics on 32-bit platforms
inline static jlong load(volatile jlong* src);
inline static jlong load(const volatile jlong* src);
// Atomically add to a location. Returns updated value. add*() provide:
// <fence> add-value-to-dest <membar StoreLoad|StoreStore>

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -264,20 +264,19 @@ class OrderAccess : AllStatic {
static void release();
static void fence();
static jbyte load_acquire(volatile jbyte* p);
static jshort load_acquire(volatile jshort* p);
static jint load_acquire(volatile jint* p);
static jlong load_acquire(volatile jlong* p);
static jubyte load_acquire(volatile jubyte* p);
static jushort load_acquire(volatile jushort* p);
static juint load_acquire(volatile juint* p);
static julong load_acquire(volatile julong* p);
static jfloat load_acquire(volatile jfloat* p);
static jdouble load_acquire(volatile jdouble* p);
static jbyte load_acquire(const volatile jbyte* p);
static jshort load_acquire(const volatile jshort* p);
static jint load_acquire(const volatile jint* p);
static jlong load_acquire(const volatile jlong* p);
static jubyte load_acquire(const volatile jubyte* p);
static jushort load_acquire(const volatile jushort* p);
static juint load_acquire(const volatile juint* p);
static julong load_acquire(const volatile julong* p);
static jfloat load_acquire(const volatile jfloat* p);
static jdouble load_acquire(const volatile jdouble* p);
static intptr_t load_ptr_acquire(volatile intptr_t* p);
static void* load_ptr_acquire(volatile void* p);
static void* load_ptr_acquire(const volatile void* p);
static intptr_t load_ptr_acquire(const volatile intptr_t* p);
static void* load_ptr_acquire(const volatile void* p);
static void release_store(volatile jbyte* p, jbyte v);
static void release_store(volatile jshort* p, jshort v);
@ -314,7 +313,7 @@ class OrderAccess : AllStatic {
static void StubRoutines_fence();
// Give platforms a variation point to specialize.
template<typename T> static T specialized_load_acquire (volatile T* p );
template<typename T> static T specialized_load_acquire (const volatile T* p);
template<typename T> static void specialized_release_store (volatile T* p, T v);
template<typename T> static void specialized_release_store_fence(volatile T* p, T v);
@ -322,7 +321,7 @@ class OrderAccess : AllStatic {
static void ordered_store(volatile FieldType* p, FieldType v);
template<typename FieldType, ScopedFenceType FenceType>
static FieldType ordered_load(volatile FieldType* p);
static FieldType ordered_load(const volatile FieldType* p);
static void store(volatile jbyte* p, jbyte v);
static void store(volatile jshort* p, jshort v);
@ -331,12 +330,12 @@ class OrderAccess : AllStatic {
static void store(volatile jdouble* p, jdouble v);
static void store(volatile jfloat* p, jfloat v);
static jbyte load (volatile jbyte* p);
static jshort load (volatile jshort* p);
static jint load (volatile jint* p);
static jlong load (volatile jlong* p);
static jdouble load (volatile jdouble* p);
static jfloat load (volatile jfloat* p);
static jbyte load(const volatile jbyte* p);
static jshort load(const volatile jshort* p);
static jint load(const volatile jint* p);
static jlong load(const volatile jlong* p);
static jdouble load(const volatile jdouble* p);
static jfloat load(const volatile jfloat* p);
// The following store_fence methods are deprecated and will be removed
// when all repos conform to the new generalized OrderAccess.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -47,25 +47,24 @@ inline void OrderAccess::ordered_store(volatile FieldType* p, FieldType v) {
}
template <typename FieldType, ScopedFenceType FenceType>
inline FieldType OrderAccess::ordered_load(volatile FieldType* p) {
inline FieldType OrderAccess::ordered_load(const volatile FieldType* p) {
ScopedFence<FenceType> f((void*)p);
return load(p);
}
inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { return specialized_load_acquire(p); }
inline jshort OrderAccess::load_acquire(volatile jshort* p) { return specialized_load_acquire(p); }
inline jint OrderAccess::load_acquire(volatile jint* p) { return specialized_load_acquire(p); }
inline jlong OrderAccess::load_acquire(volatile jlong* p) { return specialized_load_acquire(p); }
inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { return specialized_load_acquire(p); }
inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { return specialized_load_acquire(p); }
inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { return (jubyte) specialized_load_acquire((volatile jbyte*)p); }
inline jushort OrderAccess::load_acquire(volatile jushort* p) { return (jushort)specialized_load_acquire((volatile jshort*)p); }
inline juint OrderAccess::load_acquire(volatile juint* p) { return (juint) specialized_load_acquire((volatile jint*)p); }
inline julong OrderAccess::load_acquire(volatile julong* p) { return (julong) specialized_load_acquire((volatile jlong*)p); }
inline jbyte OrderAccess::load_acquire(const volatile jbyte* p) { return specialized_load_acquire(p); }
inline jshort OrderAccess::load_acquire(const volatile jshort* p) { return specialized_load_acquire(p); }
inline jint OrderAccess::load_acquire(const volatile jint* p) { return specialized_load_acquire(p); }
inline jlong OrderAccess::load_acquire(const volatile jlong* p) { return specialized_load_acquire(p); }
inline jfloat OrderAccess::load_acquire(const volatile jfloat* p) { return specialized_load_acquire(p); }
inline jdouble OrderAccess::load_acquire(const volatile jdouble* p) { return specialized_load_acquire(p); }
inline jubyte OrderAccess::load_acquire(const volatile jubyte* p) { return (jubyte) specialized_load_acquire((const volatile jbyte*)p); }
inline jushort OrderAccess::load_acquire(const volatile jushort* p) { return (jushort)specialized_load_acquire((const volatile jshort*)p); }
inline juint OrderAccess::load_acquire(const volatile juint* p) { return (juint) specialized_load_acquire((const volatile jint*)p); }
inline julong OrderAccess::load_acquire(const volatile julong* p) { return (julong) specialized_load_acquire((const volatile jlong*)p); }
inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { return (intptr_t)specialized_load_acquire(p); }
inline void* OrderAccess::load_ptr_acquire(volatile void* p) { return (void*)specialized_load_acquire((volatile intptr_t*)p); }
inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { return (void*)specialized_load_acquire((volatile intptr_t*)p); }
inline intptr_t OrderAccess::load_ptr_acquire(const volatile intptr_t* p) { return (intptr_t)specialized_load_acquire(p); }
inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { return (void*)specialized_load_acquire((const volatile intptr_t*)p); }
inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { specialized_release_store(p, v); }
inline void OrderAccess::release_store(volatile jshort* p, jshort v) { specialized_release_store(p, v); }
@ -98,7 +97,7 @@ inline void OrderAccess::release_store_ptr_fence(volatile void* p, void*
// The following methods can be specialized using simple template specialization
// in the platform specific files for optimization purposes. Otherwise the
// generalized variant is used.
template<typename T> inline T OrderAccess::specialized_load_acquire (volatile T* p) { return ordered_load<T, X_ACQUIRE>(p); }
template<typename T> inline T OrderAccess::specialized_load_acquire (const volatile T* p) { return ordered_load<T, X_ACQUIRE>(p); }
template<typename T> inline void OrderAccess::specialized_release_store (volatile T* p, T v) { ordered_store<T, RELEASE_X>(p, v); }
template<typename T> inline void OrderAccess::specialized_release_store_fence(volatile T* p, T v) { ordered_store<T, RELEASE_X_FENCE>(p, v); }
@ -111,12 +110,12 @@ inline void OrderAccess::store(volatile jlong* p, jlong v) { Atomic::store(v
inline void OrderAccess::store(volatile jdouble* p, jdouble v) { Atomic::store(jlong_cast(v), (volatile jlong*)p); }
inline void OrderAccess::store(volatile jfloat* p, jfloat v) { *p = v; }
inline jbyte OrderAccess::load(volatile jbyte* p) { return *p; }
inline jshort OrderAccess::load(volatile jshort* p) { return *p; }
inline jint OrderAccess::load(volatile jint* p) { return *p; }
inline jlong OrderAccess::load(volatile jlong* p) { return Atomic::load(p); }
inline jdouble OrderAccess::load(volatile jdouble* p) { return jdouble_cast(Atomic::load((volatile jlong*)p)); }
inline jfloat OrderAccess::load(volatile jfloat* p) { return *p; }
inline jbyte OrderAccess::load(const volatile jbyte* p) { return *p; }
inline jshort OrderAccess::load(const volatile jshort* p) { return *p; }
inline jint OrderAccess::load(const volatile jint* p) { return *p; }
inline jlong OrderAccess::load(const volatile jlong* p) { return Atomic::load(p); }
inline jdouble OrderAccess::load(const volatile jdouble* p) { return jdouble_cast(Atomic::load((const volatile jlong*)p)); }
inline jfloat OrderAccess::load(const volatile jfloat* p) { return *p; }
#endif // VM_HAS_GENERALIZED_ORDER_ACCESS