Add support for Windows ARM builds
This commit is contained in:
parent
29fb87e1d5
commit
ec021f5429
16 changed files with 95 additions and 94 deletions
|
@ -123,8 +123,8 @@ Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
|
|||
// ensure that no later memory access can be reordered ahead of the operation.
|
||||
// "Release" operations ensure that no previous memory access can be reordered
|
||||
// after the operation. "Barrier" operations have both "Acquire" and "Release"
|
||||
// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
|
||||
// access.
|
||||
// semantics. A MemoryBarrierInternal() has "Barrier" semantics, but does no
|
||||
// memory access.
|
||||
Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
|
||||
Atomic32 old_value,
|
||||
Atomic32 new_value);
|
||||
|
@ -132,10 +132,10 @@ Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
|
|||
Atomic32 old_value,
|
||||
Atomic32 new_value);
|
||||
|
||||
#if defined(__MINGW32__) && defined(MemoryBarrier)
|
||||
#undef MemoryBarrier
|
||||
#endif
|
||||
void MemoryBarrier();
|
||||
// This function was renamed from MemoryBarrier to MemoryBarrierInternal
|
||||
// because MemoryBarrier is a define in Windows ARM builds and we do not
|
||||
// undefine it because we call it from this function.
|
||||
void MemoryBarrierInternal();
|
||||
void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value);
|
||||
void Acquire_Store(volatile Atomic32* ptr, Atomic32 value);
|
||||
void Release_Store(volatile Atomic32* ptr, Atomic32 value);
|
||||
|
@ -180,7 +180,7 @@ Atomic64 Release_Load(volatile const Atomic64* ptr);
|
|||
#include <google/protobuf/stubs/atomicops_internals_tsan.h>
|
||||
// MSVC.
|
||||
#elif defined(_MSC_VER)
|
||||
#if defined(GOOGLE_PROTOBUF_ARCH_IA32) || defined(GOOGLE_PROTOBUF_ARCH_X64)
|
||||
#if defined(GOOGLE_PROTOBUF_ARCH_IA32) || defined(GOOGLE_PROTOBUF_ARCH_X64) || defined(GOOGLE_PROTOBUF_ARCH_ARM)
|
||||
#include <google/protobuf/stubs/atomicops_internals_x86_msvc.h>
|
||||
#else
|
||||
#error GOOGLE_PROTOBUF_ATOMICOPS_ERROR
|
||||
|
|
|
@ -37,7 +37,7 @@ namespace google {
|
|||
namespace protobuf {
|
||||
namespace internal {
|
||||
|
||||
inline void MemoryBarrier() {
|
||||
inline void MemoryBarrierInternal() {
|
||||
__asm__ __volatile__ ("dmb ish" ::: "memory"); // NOLINT
|
||||
}
|
||||
|
||||
|
@ -117,9 +117,9 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
|
|||
|
||||
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
|
||||
Atomic32 increment) {
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
Atomic32 result = NoBarrier_AtomicIncrement(ptr, increment);
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -128,7 +128,7 @@ inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
|
|||
Atomic32 old_value,
|
||||
Atomic32 new_value) {
|
||||
Atomic32 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
|
||||
return prev;
|
||||
}
|
||||
|
@ -136,7 +136,7 @@ inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
|
|||
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
|
||||
Atomic32 old_value,
|
||||
Atomic32 new_value) {
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
Atomic32 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
|
||||
|
||||
return prev;
|
||||
|
@ -148,7 +148,7 @@ inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
|
|||
|
||||
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
|
||||
*ptr = value;
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
}
|
||||
|
||||
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
|
||||
|
@ -178,7 +178,7 @@ inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
|
|||
}
|
||||
|
||||
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
return *ptr;
|
||||
}
|
||||
|
||||
|
@ -253,9 +253,9 @@ inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
|
|||
|
||||
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
|
||||
Atomic64 increment) {
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
Atomic64 result = NoBarrier_AtomicIncrement(ptr, increment);
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -264,7 +264,7 @@ inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
|
|||
Atomic64 old_value,
|
||||
Atomic64 new_value) {
|
||||
Atomic64 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
|
||||
return prev;
|
||||
}
|
||||
|
@ -272,7 +272,7 @@ inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
|
|||
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
|
||||
Atomic64 old_value,
|
||||
Atomic64 new_value) {
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
Atomic64 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
|
||||
|
||||
return prev;
|
||||
|
@ -284,7 +284,7 @@ inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
|
|||
|
||||
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
|
||||
*ptr = value;
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
}
|
||||
|
||||
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
|
||||
|
@ -314,7 +314,7 @@ inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
|
|||
}
|
||||
|
||||
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
return *ptr;
|
||||
}
|
||||
|
||||
|
|
|
@ -115,17 +115,17 @@ inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
|
|||
*ptr = value;
|
||||
}
|
||||
|
||||
inline void MemoryBarrier() {
|
||||
inline void MemoryBarrierInternal() {
|
||||
pLinuxKernelMemoryBarrier();
|
||||
}
|
||||
|
||||
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
|
||||
*ptr = value;
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
}
|
||||
|
||||
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
*ptr = value;
|
||||
}
|
||||
|
||||
|
@ -135,12 +135,12 @@ inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
|
|||
|
||||
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
|
||||
Atomic32 value = *ptr;
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
return value;
|
||||
}
|
||||
|
||||
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
return *ptr;
|
||||
}
|
||||
|
||||
|
|
|
@ -110,17 +110,17 @@ inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
|
|||
*ptr = value;
|
||||
}
|
||||
|
||||
inline void MemoryBarrier() {
|
||||
inline void MemoryBarrierInternal() {
|
||||
__sync_synchronize();
|
||||
}
|
||||
|
||||
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
|
||||
*ptr = value;
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
}
|
||||
|
||||
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
*ptr = value;
|
||||
}
|
||||
|
||||
|
@ -130,12 +130,12 @@ inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
|
|||
|
||||
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
|
||||
Atomic32 value = *ptr;
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
return value;
|
||||
}
|
||||
|
||||
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
return *ptr;
|
||||
}
|
||||
|
||||
|
|
|
@ -52,7 +52,7 @@ typedef volatile std::atomic<Atomic32>* AtomicLocation32;
|
|||
static_assert(sizeof(*(AtomicLocation32) nullptr) == sizeof(Atomic32),
|
||||
"incompatible 32-bit atomic layout");
|
||||
|
||||
inline void MemoryBarrier() {
|
||||
inline void MemoryBarrierInternal() {
|
||||
#if defined(__GLIBCXX__)
|
||||
// Work around libstdc++ bug 51038 where atomic_thread_fence was declared but
|
||||
// not defined, leading to the linker complaining about undefined references.
|
||||
|
@ -119,7 +119,7 @@ inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
|
|||
|
||||
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
|
||||
((AtomicLocation32)ptr)->store(value, std::memory_order_relaxed);
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
}
|
||||
|
||||
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
|
||||
|
@ -135,7 +135,7 @@ inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
|
|||
}
|
||||
|
||||
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
return ((AtomicLocation32)ptr)->load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
|
@ -202,7 +202,7 @@ inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
|
|||
|
||||
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
|
||||
((AtomicLocation64)ptr)->store(value, std::memory_order_relaxed);
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
}
|
||||
|
||||
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
|
||||
|
@ -218,7 +218,7 @@ inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
|
|||
}
|
||||
|
||||
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
return ((AtomicLocation64)ptr)->load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
|
|
|
@ -78,7 +78,7 @@ inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
|
|||
__atomic_store_n(ptr, value, __ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
inline void MemoryBarrier() {
|
||||
inline void MemoryBarrierInternal() {
|
||||
__sync_synchronize();
|
||||
}
|
||||
|
||||
|
|
|
@ -73,7 +73,7 @@ inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
|
|||
return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
|
||||
}
|
||||
|
||||
inline void MemoryBarrier() {
|
||||
inline void MemoryBarrierInternal() {
|
||||
OSMemoryBarrier();
|
||||
}
|
||||
|
||||
|
@ -103,11 +103,11 @@ inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
|
|||
|
||||
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
|
||||
*ptr = value;
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
}
|
||||
|
||||
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
*ptr = value;
|
||||
}
|
||||
|
||||
|
@ -117,12 +117,12 @@ inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
|
|||
|
||||
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
|
||||
Atomic32 value = *ptr;
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
return value;
|
||||
}
|
||||
|
||||
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
return *ptr;
|
||||
}
|
||||
|
||||
|
@ -193,11 +193,11 @@ inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
|
|||
|
||||
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
|
||||
*ptr = value;
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
}
|
||||
|
||||
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
*ptr = value;
|
||||
}
|
||||
|
||||
|
@ -207,12 +207,12 @@ inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
|
|||
|
||||
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
|
||||
Atomic64 value = *ptr;
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
return value;
|
||||
}
|
||||
|
||||
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
return *ptr;
|
||||
}
|
||||
|
||||
|
|
|
@ -125,8 +125,8 @@ inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
|
|||
// ensure that no later memory access can be reordered ahead of the operation.
|
||||
// "Release" operations ensure that no previous memory access can be reordered
|
||||
// after the operation. "Barrier" operations have both "Acquire" and "Release"
|
||||
// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
|
||||
// access.
|
||||
// semantics. A MemoryBarrierInternal() has "Barrier" semantics, but does no
|
||||
// memory access.
|
||||
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
|
||||
Atomic32 old_value,
|
||||
Atomic32 new_value) {
|
||||
|
@ -149,17 +149,17 @@ inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
|
|||
*ptr = value;
|
||||
}
|
||||
|
||||
inline void MemoryBarrier() {
|
||||
inline void MemoryBarrierInternal() {
|
||||
__asm__ __volatile__("sync" : : : "memory");
|
||||
}
|
||||
|
||||
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
|
||||
*ptr = value;
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
}
|
||||
|
||||
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
*ptr = value;
|
||||
}
|
||||
|
||||
|
@ -169,12 +169,12 @@ inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
|
|||
|
||||
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
|
||||
Atomic32 value = *ptr;
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
return value;
|
||||
}
|
||||
|
||||
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
return *ptr;
|
||||
}
|
||||
|
||||
|
@ -247,9 +247,9 @@ inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
|
|||
|
||||
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
|
||||
Atomic64 increment) {
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
Atomic64 res = NoBarrier_AtomicIncrement(ptr, increment);
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -257,20 +257,20 @@ inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
|
|||
// ensure that no later memory access can be reordered ahead of the operation.
|
||||
// "Release" operations ensure that no previous memory access can be reordered
|
||||
// after the operation. "Barrier" operations have both "Acquire" and "Release"
|
||||
// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
|
||||
// access.
|
||||
// semantics. A MemoryBarrierInternal() has "Barrier" semantics, but does no
|
||||
// memory access.
|
||||
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
|
||||
Atomic64 old_value,
|
||||
Atomic64 new_value) {
|
||||
Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
return res;
|
||||
}
|
||||
|
||||
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
|
||||
Atomic64 old_value,
|
||||
Atomic64 new_value) {
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
|
||||
}
|
||||
|
||||
|
@ -280,11 +280,11 @@ inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
|
|||
|
||||
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
|
||||
*ptr = value;
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
}
|
||||
|
||||
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
*ptr = value;
|
||||
}
|
||||
|
||||
|
@ -294,12 +294,12 @@ inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
|
|||
|
||||
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
|
||||
Atomic64 value = *ptr;
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
return value;
|
||||
}
|
||||
|
||||
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
return *ptr;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -93,7 +93,7 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
|
|||
return result;
|
||||
}
|
||||
|
||||
inline void MemoryBarrier(void) {
|
||||
inline void MemoryBarrierInternal(void) {
|
||||
asm volatile (
|
||||
" lwsync \n\t"
|
||||
" isync \n\t"
|
||||
|
|
|
@ -97,22 +97,22 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
|
|||
|
||||
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
|
||||
Atomic32 increment) {
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
return res;
|
||||
}
|
||||
|
||||
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
|
||||
Atomic32 old_value, Atomic32 new_value) {
|
||||
Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
return res;
|
||||
}
|
||||
|
||||
inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
|
||||
Atomic32 old_value, Atomic32 new_value) {
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
|
||||
return res;
|
||||
}
|
||||
|
@ -121,15 +121,15 @@ inline void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value) {
|
|||
*ptr = value;
|
||||
}
|
||||
|
||||
inline void MemoryBarrier() { __asm__ __volatile__("sync" : : : "memory"); }
|
||||
inline void MemoryBarrierInternal() { __asm__ __volatile__("sync" : : : "memory"); }
|
||||
|
||||
inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
|
||||
*ptr = value;
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
}
|
||||
|
||||
inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
*ptr = value;
|
||||
}
|
||||
|
||||
|
@ -137,12 +137,12 @@ inline Atomic32 NoBarrier_Load(volatile const Atomic32 *ptr) { return *ptr; }
|
|||
|
||||
inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
|
||||
Atomic32 value = *ptr;
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
return value;
|
||||
}
|
||||
|
||||
inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
return *ptr;
|
||||
}
|
||||
|
||||
|
|
|
@ -54,16 +54,16 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
|
|||
return (Atomic32)atomic_add_32_nv((volatile uint32_t*)ptr, (uint32_t)increment);
|
||||
}
|
||||
|
||||
inline void MemoryBarrier(void) {
|
||||
inline void MemoryBarrierInternal(void) {
|
||||
membar_producer();
|
||||
membar_consumer();
|
||||
}
|
||||
|
||||
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
|
||||
Atomic32 increment) {
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
Atomic32 ret = NoBarrier_AtomicIncrement(ptr, increment);
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -72,7 +72,7 @@ inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
|
|||
Atomic32 old_value,
|
||||
Atomic32 new_value) {
|
||||
Atomic32 ret = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -80,7 +80,7 @@ inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
|
|||
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
|
||||
Atomic32 old_value,
|
||||
Atomic32 new_value) {
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
|
||||
}
|
||||
|
||||
|
@ -129,9 +129,9 @@ inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 incre
|
|||
}
|
||||
|
||||
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment) {
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
Atomic64 ret = atomic_add_64_nv((volatile uint64_t*)ptr, increment);
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -139,14 +139,14 @@ inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
|
|||
Atomic64 old_value,
|
||||
Atomic64 new_value) {
|
||||
Atomic64 ret = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
return ret;
|
||||
}
|
||||
|
||||
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
|
||||
Atomic64 old_value,
|
||||
Atomic64 new_value) {
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
|
||||
}
|
||||
|
||||
|
|
|
@ -206,7 +206,7 @@ inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
|
|||
return cmp;
|
||||
}
|
||||
|
||||
inline void MemoryBarrier() {
|
||||
inline void MemoryBarrierInternal() {
|
||||
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
|
||||
}
|
||||
|
||||
|
|
|
@ -119,18 +119,18 @@ inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
|
|||
|
||||
// 64-bit implementations of memory barrier can be simpler, because it
|
||||
// "mfence" is guaranteed to exist.
|
||||
inline void MemoryBarrier() {
|
||||
inline void MemoryBarrierInternal() {
|
||||
__asm__ __volatile__("mfence" : : : "memory");
|
||||
}
|
||||
|
||||
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
|
||||
*ptr = value;
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
inline void MemoryBarrier() {
|
||||
inline void MemoryBarrierInternal() {
|
||||
if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
|
||||
__asm__ __volatile__("mfence" : : : "memory");
|
||||
} else { // mfence is faster but not present on PIII
|
||||
|
@ -168,7 +168,7 @@ inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
|
|||
}
|
||||
|
||||
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
return *ptr;
|
||||
}
|
||||
|
||||
|
@ -225,7 +225,7 @@ inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
|
|||
|
||||
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
|
||||
*ptr = value;
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
}
|
||||
|
||||
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
|
||||
|
@ -262,7 +262,7 @@ inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
|
|||
}
|
||||
|
||||
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
return *ptr;
|
||||
}
|
||||
|
||||
|
|
|
@ -44,9 +44,10 @@ namespace google {
|
|||
namespace protobuf {
|
||||
namespace internal {
|
||||
|
||||
inline void MemoryBarrier() {
|
||||
// We use MemoryBarrier from WinNT.h
|
||||
::MemoryBarrier();
|
||||
inline void MemoryBarrierInternal() {
|
||||
// On ARM this is a define while on x86/x64 this is
|
||||
// a function declared in WinNT.h
|
||||
MemoryBarrier();
|
||||
}
|
||||
|
||||
Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
|
||||
|
|
|
@ -82,7 +82,7 @@ inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
|
|||
}
|
||||
|
||||
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
return *ptr;
|
||||
}
|
||||
|
||||
|
@ -125,7 +125,7 @@ inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
|
|||
}
|
||||
|
||||
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
|
||||
MemoryBarrier();
|
||||
MemoryBarrierInternal();
|
||||
return *ptr;
|
||||
}
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@
|
|||
#elif defined(__QNX__)
|
||||
#define GOOGLE_PROTOBUF_ARCH_ARM_QNX 1
|
||||
#define GOOGLE_PROTOBUF_ARCH_32_BIT 1
|
||||
#elif defined(__ARMEL__)
|
||||
#elif defined(_M_ARM) || defined(__ARMEL__)
|
||||
#define GOOGLE_PROTOBUF_ARCH_ARM 1
|
||||
#define GOOGLE_PROTOBUF_ARCH_32_BIT 1
|
||||
#elif defined(__aarch64__)
|
||||
|
|
Loading…
Add table
Reference in a new issue