Skip to content

Commit

Permalink
atomic: Use native <atomic> when compiling as C++ with MSVC
Browse files Browse the repository at this point in the history
  • Loading branch information
lhmouse committed Jan 18, 2024
1 parent f356b8e commit b4aa27c
Showing 1 changed file with 90 additions and 82 deletions.
172 changes: 90 additions & 82 deletions mcfgthread/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,70 +7,78 @@

#include "fwd.h"

#if defined __GNUC__ || defined __clang__ || defined __STDC_NO_ATOMICS__
/* Use native types. */
# define __MCF_atomic(...) volatile __VA_ARGS__

# define __MCF_memory_order_rlx __ATOMIC_RELAXED
# define __MCF_memory_order_acq __ATOMIC_ACQUIRE
# define __MCF_memory_order_rel __ATOMIC_RELEASE
# define __MCF_memory_order_arl __ATOMIC_ACQ_REL
# define __MCF_memory_order_cst __ATOMIC_SEQ_CST

# define __MCF_atomic_load(p,o) __atomic_load_n(p,o)
# define __MCF_atomic_store(p,v,o) __atomic_store_n(p,v,o)
# define __MCF_atomic_xchg(p,v,o) __atomic_exchange_n(p,v,o)
# define __MCF_atomic_cmpxchg(p,c,v,o,f) __atomic_compare_exchange_n(p,c,v,0,o,f)
# define __MCF_atomic_cmpxchgw(p,c,v,o,f) __atomic_compare_exchange_n(p,c,v,1,o,f)
# define __MCF_atomic_xadd(p,v,o) __atomic_fetch_add(p,v,o)
# define __MCF_atomic_xsub(p,v,o) __atomic_fetch_sub(p,v,o)
#endif

#if !defined __MCF_atomic && defined __cplusplus && (__cplusplus >= 201103L)
/* Use the C++11 standard library. */
# include <atomic>
# define __MCF_atomic(...) ::std::atomic<__VA_ARGS__>

# define __MCF_memory_order_rlx ::std::memory_order_relaxed
# define __MCF_memory_order_acq ::std::memory_order_acquire
# define __MCF_memory_order_rel ::std::memory_order_release
# define __MCF_memory_order_arl ::std::memory_order_acq_rel
# define __MCF_memory_order_cst ::std::memory_order_seq_cst

# define __MCF_atomic_load(p,o) (p)->load(o)
# define __MCF_atomic_store(p,v,o) (p)->store(v,o)
# define __MCF_atomic_xchg(p,v,o) (p)->exchange(v,o)
# define __MCF_atomic_cmpxchg(p,c,v,o,f) (p)->compare_exchange_strong(*(c),v,o,f)
# define __MCF_atomic_cmpxchgw(p,c,v,o,f) (p)->compare_exchange_weak(*(c),v,o,f)
# define __MCF_atomic_xadd(p,v,o) (p)->fetch_add(v,o)
# define __MCF_atomic_xsub(p,v,o) (p)->fetch_sub(v,o)
#endif

#if !defined __MCF_atomic
/* Use the C11 standard library. Microsoft Visual Studio 2022 has experimental
* support for this, but it seems to suffice. */
# include <stdatomic.h>
# define __MCF_atomic(...) _Atomic(__VA_ARGS__)

# define __MCF_memory_order_rlx memory_order_relaxed
# define __MCF_memory_order_acq memory_order_acquire
# define __MCF_memory_order_rel memory_order_release
# define __MCF_memory_order_arl memory_order_acq_rel
# define __MCF_memory_order_cst memory_order_seq_cst

# define __MCF_atomic_load(p,o) atomic_load_explicit(p,o)
# define __MCF_atomic_store(p,v,o) atomic_store_explicit(p,v,o)
# define __MCF_atomic_xchg(p,v,o) atomic_exchange_explicit(p,v,o)
# define __MCF_atomic_cmpxchg(p,c,v,o,f) atomic_compare_exchange_strong_explicit(p,c,v,o,f)
# define __MCF_atomic_cmpxchgw(p,c,v,o,f) atomic_compare_exchange_weak_explicit(p,c,v,o,f)
# define __MCF_atomic_xadd(p,v,o) atomic_fetch_add_explicit(p,v,o)
# define __MCF_atomic_xsub(p,v,o) atomic_fetch_sub_explicit(p,v,o)
#endif

__MCF_C_DECLARATIONS_BEGIN
#ifndef __MCF_ATOMIC_IMPORT
# define __MCF_ATOMIC_IMPORT
# define __MCF_ATOMIC_INLINE __MCF_GNU_INLINE
#endif

#ifdef __GNUC__
/* Use GCC and Clang bultins which map to C++11 atomic functions precisely. GCC
* 4.7 or Clang 3.1 is required. */
# define __MCF__Atomic

# define __MCF_atomic_succ_rlx __ATOMIC_RELAXED
# define __MCF_atomic_succ_acq __ATOMIC_ACQUIRE
# define __MCF_atomic_succ_rel __ATOMIC_RELEASE
# define __MCF_atomic_succ_arl __ATOMIC_ACQ_REL
# define __MCF_atomic_succ_cst __ATOMIC_SEQ_CST

# define __MCF_atomic_fail_rlx __ATOMIC_RELAXED
# define __MCF_atomic_fail_acq __ATOMIC_ACQUIRE
# define __MCF_atomic_fail_rel __ATOMIC_RELAXED
# define __MCF_atomic_fail_arl __ATOMIC_ACQUIRE
# define __MCF_atomic_fail_cst __ATOMIC_SEQ_CST

# define __MCF_atomic_load(p, ord) __atomic_load_n(p, ord)
# define __MCF_atomic_store(p, val, ord) __atomic_store_n(p, val, ord)
# define __MCF_atomic_xchg(p, val, ord) __atomic_exchange_n(p, val, ord)
# define __MCF_atomic_xadd(p, val, ord) __atomic_fetch_add(p, val, ord)
# define __MCF_atomic_xsub(p, val, ord) __atomic_fetch_sub(p, val, ord)

# define __MCF_atomic_cmpxchg(p, pcmp, val, ord_succ, ord_fail) \
__atomic_compare_exchange_n(p, pcmp, val, 0, ord_succ, ord_fail)

# define __MCF_atomic_cmpxchg_weak(p, pcmp, val, ord_succ, ord_fail) \
__atomic_compare_exchange_n(p, pcmp, val, 1, ord_succ, ord_fail)
#else
/* Use C11 standard macros, provided by the system C library. In the case of
* MSVC, Visual Studio 2022 is required. */
# include <stdatomic.h>
# define __MCF__Atomic __MCF_C11(_Atomic)

# define __MCF_atomic_succ_rlx memory_order_relaxed
# define __MCF_atomic_succ_acq memory_order_acquire
# define __MCF_atomic_succ_rel memory_order_release
# define __MCF_atomic_succ_arl memory_order_acq_rel
# define __MCF_atomic_succ_cst memory_order_seq_cst

# define __MCF_atomic_fail_rlx memory_order_relaxed
# define __MCF_atomic_fail_acq memory_order_acquire
# define __MCF_atomic_fail_rel memory_order_relaxed
# define __MCF_atomic_fail_arl memory_order_acquire
# define __MCF_atomic_fail_cst memory_order_seq_cst

# define __MCF_atomic_load(p, ord) atomic_load_explicit(p, ord)
# define __MCF_atomic_store(p, val, ord) atomic_store_explicit(p, val, ord)
# define __MCF_atomic_xchg(p, val, ord) atomic_exchange_explicit(p, val, ord)
# define __MCF_atomic_xadd(p, val, ord) atomic_fetch_add_explicit(p, val, ord)
# define __MCF_atomic_xsub(p, val, ord) atomic_fetch_sub_explicit(p, val, ord)

# define __MCF_atomic_cmpxchg(p, pcmp, val, ord_succ, ord_fail) \
atomic_compare_exchange_strong_explicit(p, pcmp, val, ord_succ, ord_fail)

# define __MCF_atomic_cmpxchg_weak(p, pcmp, val, ord_succ, ord_fail) \
atomic_compare_exchange_weak_explicit(p, pcmp, val, ord_succ, ord_fail)
#endif
/* Define memory orders for failed compare-and-swap operations. */
#define __MCF_memory_order_f_rlx __MCF_memory_order_rlx
#define __MCF_memory_order_f_acq __MCF_memory_order_acq
#define __MCF_memory_order_f_rel __MCF_memory_order_rlx
#define __MCF_memory_order_f_arl __MCF_memory_order_acq
#define __MCF_memory_order_f_cst __MCF_memory_order_cst

/* Perform an atomic load operation. `mem` shall point to an atomic object of the
* specified width. The first function returns the value as an integer. The second
Expand All @@ -88,17 +96,17 @@ __MCF_C_DECLARATIONS_BEGIN
_MCF_atomic_load_##WIDTH##_##ORDER(__MCF_zp_r __mem) \
__MCF_NOEXCEPT \
{ \
return __MCF_atomic_load((const __MCF__Atomic INTEGER*) __mem, \
__MCF_atomic_succ_##ORDER); \
return __MCF_atomic_load((const __MCF_atomic(INTEGER)*) __mem, \
__MCF_memory_order_##ORDER); \
} \
\
__MCF_ALWAYS_INLINE \
void \
_MCF_atomic_load_p##WIDTH##_##ORDER(__MCF_zp __res, __MCF_zp_r __mem) \
__MCF_NOEXCEPT \
{ \
*(INTEGER*) __res = __MCF_atomic_load((const __MCF__Atomic INTEGER*) __mem, \
__MCF_atomic_succ_##ORDER); \
*(INTEGER*) __res = __MCF_atomic_load((const __MCF_atomic(INTEGER)*) __mem, \
__MCF_memory_order_##ORDER); \
}

__MCF_atomic_load_(8, rlx, int8_t)
Expand Down Expand Up @@ -141,17 +149,17 @@ __MCF_atomic_load_(z, cst, size_t)
_MCF_atomic_store_##WIDTH##_##ORDER(__MCF_zp __mem, INTEGER __val) \
__MCF_NOEXCEPT \
{ \
__MCF_atomic_store((__MCF__Atomic INTEGER*) __mem, __val, \
__MCF_atomic_succ_##ORDER); \
__MCF_atomic_store((__MCF_atomic(INTEGER)*) __mem, __val, \
__MCF_memory_order_##ORDER); \
} \
\
__MCF_ALWAYS_INLINE \
void \
_MCF_atomic_store_p##WIDTH##_##ORDER(__MCF_zp __mem, __MCF_zp_r __src) \
__MCF_NOEXCEPT \
{ \
__MCF_atomic_store((__MCF__Atomic INTEGER*) __mem, *(const INTEGER*) __src, \
__MCF_atomic_succ_##ORDER); \
__MCF_atomic_store((__MCF_atomic(INTEGER)*) __mem, *(const INTEGER*) __src, \
__MCF_memory_order_##ORDER); \
}

__MCF_atomic_store_(8, rlx, int8_t)
Expand Down Expand Up @@ -195,17 +203,17 @@ __MCF_atomic_store_(z, cst, size_t)
_MCF_atomic_xchg_##WIDTH##_##ORDER(__MCF_zp __mem, INTEGER __val) \
__MCF_NOEXCEPT \
{ \
return __MCF_atomic_xchg((__MCF__Atomic INTEGER*) __mem, __val, \
__MCF_atomic_succ_##ORDER); \
return __MCF_atomic_xchg((__MCF_atomic(INTEGER)*) __mem, __val, \
__MCF_memory_order_##ORDER); \
} \
\
__MCF_ALWAYS_INLINE \
void \
_MCF_atomic_xchg_p##WIDTH##_##ORDER(__MCF_zp __res, __MCF_zp __mem, __MCF_zp_r __src) \
__MCF_NOEXCEPT \
{ \
*(INTEGER*) __res = __MCF_atomic_xchg((__MCF__Atomic INTEGER*) __mem, \
*(const INTEGER*) __src, __MCF_atomic_succ_##ORDER); \
*(INTEGER*) __res = __MCF_atomic_xchg((__MCF_atomic(INTEGER)*) __mem, \
*(const INTEGER*) __src, __MCF_memory_order_##ORDER); \
}

__MCF_atomic_xchg_(8, rlx, int8_t)
Expand Down Expand Up @@ -263,17 +271,17 @@ __MCF_atomic_xchg_(z, cst, size_t)
_MCF_atomic_cmpxchg_##WIDTH##_##ORDER(__MCF_zp __mem, INTEGER* __restrict __cmp, INTEGER __val) \
__MCF_NOEXCEPT \
{ \
return __MCF_atomic_cmpxchg((__MCF__Atomic INTEGER*) __mem, __cmp, __val, \
__MCF_atomic_succ_##ORDER, __MCF_atomic_fail_##ORDER); \
return __MCF_atomic_cmpxchg((__MCF_atomic(INTEGER)*) __mem, __cmp, __val, \
__MCF_memory_order_##ORDER, __MCF_memory_order_f_##ORDER); \
} \
\
__MCF_ALWAYS_INLINE \
bool \
_MCF_atomic_cmpxchg_p##WIDTH##_##ORDER(__MCF_zp __mem, __MCF_zp __cmp, __MCF_zp __val) \
__MCF_NOEXCEPT \
{ \
return __MCF_atomic_cmpxchg_weak((__MCF__Atomic INTEGER*) __mem, (INTEGER*) __cmp, \
*(const INTEGER*) __val, __MCF_atomic_succ_##ORDER, __MCF_atomic_fail_##ORDER); \
return __MCF_atomic_cmpxchg((__MCF_atomic(INTEGER)*) __mem, (INTEGER*) __cmp, \
*(const INTEGER*) __val, __MCF_memory_order_##ORDER, __MCF_memory_order_f_##ORDER); \
}

__MCF_atomic_cmpxchg_(8, rlx, int8_t)
Expand Down Expand Up @@ -331,17 +339,17 @@ __MCF_atomic_cmpxchg_(z, cst, size_t)
_MCF_atomic_cmpxchg_weak_##WIDTH##_##ORDER(__MCF_zp __mem, INTEGER* __restrict __cmp, INTEGER __val) \
__MCF_NOEXCEPT \
{ \
return __MCF_atomic_cmpxchg_weak((__MCF__Atomic INTEGER*) __mem, __cmp, __val, \
__MCF_atomic_succ_##ORDER, __MCF_atomic_fail_##ORDER); \
return __MCF_atomic_cmpxchgw((__MCF_atomic(INTEGER)*) __mem, __cmp, __val, \
__MCF_memory_order_##ORDER, __MCF_memory_order_f_##ORDER); \
} \
\
__MCF_ALWAYS_INLINE \
bool \
_MCF_atomic_cmpxchg_weak_p##WIDTH##_##ORDER(__MCF_zp __mem, __MCF_zp __cmp, __MCF_zp __val) \
__MCF_NOEXCEPT \
{ \
return __MCF_atomic_cmpxchg_weak((__MCF__Atomic INTEGER*) __mem, (INTEGER*) __cmp, \
*(const INTEGER*) __val, __MCF_atomic_succ_##ORDER, __MCF_atomic_fail_##ORDER); \
return __MCF_atomic_cmpxchgw((__MCF_atomic(INTEGER)*) __mem, (INTEGER*) __cmp, \
*(const INTEGER*) __val, __MCF_memory_order_##ORDER, __MCF_memory_order_f_##ORDER); \
}

__MCF_atomic_cmpxchg_weak_(8, rlx, int8_t)
Expand Down Expand Up @@ -392,7 +400,7 @@ __MCF_atomic_cmpxchg_weak_(z, cst, size_t)
_MCF_atomic_xadd_##WIDTH##_##ORDER(__MCF_zp __mem, INTEGER __val) \
__MCF_NOEXCEPT \
{ \
return __MCF_atomic_xadd((__MCF__Atomic INTEGER*) __mem, __val, __MCF_atomic_succ_##ORDER); \
return __MCF_atomic_xadd((__MCF_atomic(INTEGER)*) __mem, __val, __MCF_memory_order_##ORDER); \
}

__MCF_atomic_xadd_(8, rlx, int8_t)
Expand Down Expand Up @@ -431,8 +439,8 @@ __MCF_atomic_xadd_(z, rel, size_t)
__MCF_atomic_xadd_(z, arl, size_t)
__MCF_atomic_xadd_(z, cst, size_t)

/* Subtract `val` from an atomic object. `mem` shall point to an atomic object of
* the specified width. These function return the old value.
/* Subtract `val` from an atomic object. `mem` shall point to an atomic object
* of the specified width. These function return the old value.
*
* [INTEGER]
* _MCF_atomic_xsub_[WIDTH]_[ORDER](void* mem, [INTEGER] val);
Expand All @@ -443,7 +451,7 @@ __MCF_atomic_xadd_(z, cst, size_t)
_MCF_atomic_xsub_##WIDTH##_##ORDER(__MCF_zp __mem, INTEGER __val) \
__MCF_NOEXCEPT \
{ \
return __MCF_atomic_xsub((__MCF__Atomic INTEGER*) __mem, __val, __MCF_atomic_succ_##ORDER); \
return __MCF_atomic_xsub((__MCF_atomic(INTEGER)*) __mem, __val, __MCF_memory_order_##ORDER); \
}

__MCF_atomic_xsub_(8, rlx, int8_t)
Expand Down

0 comments on commit b4aa27c

Please sign in to comment.