diff --git a/octa/atomic.h b/octa/atomic.h new file mode 100644 index 0000000..ab906d3 --- /dev/null +++ b/octa/atomic.h @@ -0,0 +1,1300 @@ +/* Atomics for OctaSTD. Supports GCC/Clang and possibly MSVC. + * + * This file is part of OctaSTD. See COPYING.md for futher information. + */ + +#ifndef OCTA_ATOMIC_H +#define OCTA_ATOMIC_H + +#include +#include + +#include "octa/types.h" +#include "octa/type_traits.h" + +namespace octa { + typedef enum MemoryOrder { + memory_order_relaxed = 0, + memory_order_consume, + memory_order_acquire, + memory_order_release, + memory_order_acq_rel, + memory_order_seq_cst + } MemoryOrder; + + template + struct __OctaAtomicBase { + __OctaAtomicBase() noexcept {} + explicit __OctaAtomicBase(T v) noexcept: value(v) {} + T value; + }; + + template T __octa_atomic_create(); + + template + EnableIfvalue = __octa_atomic_create()), char> + __octa_test_atomic_assignable(int); + + template + int __octa_test_atomic_assignable(...); + + template + struct __OctaCanAtomicAssign { + static constexpr bool value + = (sizeof(__octa_test_atomic_assignable(1)) == sizeof(char)); + }; + + template + static inline EnableIf< + __OctaCanAtomicAssign *, T>::value + > __octa_atomic_init(volatile __OctaAtomicBase *a, T v) { + a->value = v; + } + + template + static inline EnableIf< + !__OctaCanAtomicAssign *, T>::value && + __OctaCanAtomicAssign< __OctaAtomicBase *, T>::value + > __octa_atomic_init(volatile __OctaAtomicBase *a, T v) { + volatile char *to = (volatile char *)(&a->value); + volatile char *end = to + sizeof(T); + char *from = (char *)(&v); + while (to != end) *to++ =*from++; + } + + template + static inline void __octa_atomic_init(__OctaAtomicBase *a, T v) { + a->value = v; + } + + /* MSVC support + * + * untested, might not work + * + * reference: https://github.com/openvswitch/ovs/blob/master/lib/ovs-atomic-msvc.h + */ + +#ifdef _MSC_VER + +#define ATOMIC_BOOL_LOCK_FREE 2 +#define ATOMIC_CHAR_LOCK_FREE 2 +#define ATOMIC_CHAR16_T_LOCK_FREE 2 +#define ATOMIC_CHAR32_T_LOCK_FREE 2 +#define ATOMIC_WCHAR_T_LOCK_FREE 2 +#define ATOMIC_SHORT_LOCK_FREE 2 +#define ATOMIC_INT_LOCK_FREE 2 +#define ATOMIC_LONG_LOCK_FREE 2 +#define ATOMIC_LLONG_LOCK_FREE 2 +#define ATOMIC_POINTER_LOCK_FREE 2 + + static inline void __octa_atomic_thread_fence(MemoryOrder ord) { + if (ord > memory_order_consume) _ReadWriteBarrier(); + if (ord == memory_order_seq_cst) { + MemoryBarrier(); + if (ord > memory_order_consume) _ReadWriteBarrier(); + } + } + + static inline void __octa_atomic_signal_fence(MemoryOrder ord) { + if (ord > memory_order_consume) _ReadWriteBarrier(); + } + + static inline bool __octa_atomic_is_lock_free(size_t size) { + return size <= sizeof(void *); + } + +#define __OCTA_MSVC_ATOMIC_STORE_N(n, dst, src, ord) \ + if (ord == memory_order_seq_cst) { \ + InterlockedExchange##n((volatile int##n##_t *)(dst), \ + (int##n##_t)(src)); \ + } else { \ + *(dst) = (src); \ + } + +#define __OCTA_MSVC_ATOMIC_STORE_32(dst, src, ord) \ + if (ord == memory_order_seq_cst) { \ + InterlockedExchange((volatile int32_t *)(dst), (int32_t)(src)); \ + } else { \ + *(dst) = (src); \ + } + +#define __OCTA_MSVC_ATOMIC_STORE_64(dst, src, ord) \ + if (ord == memory_order_relaxed) { \ + InterlockedExchangeNoFence64((volatile int64_t *)(dst), \ + (int64_t)(src)); \ + } else { \ + InterlockedExchange64((volatile int64_t *)(dst), (int64_t)(src)); \ + } + +#define __OCTA_MSVC_ATOMIC_STORE(dst, src, ord) \ + if (sizeof(*dst) == 1) { \ + __OCTA_MSVC_ATOMIC_STORE_N(8, dst, src, ord); \ + } else if (sizeof(*dst) == 2) { \ + __OCTA_MSVC_ATOMIC_STORE_N(16, dst, src, ord); \ + } else if (sizeof(*dst) == 4) { \ + __OCTA_MSVC_ATOMIC_STORE_32(32, dst, src, ord); \ + } else if (sizeof(*dst) == 8) { \ + __OCTA_MSVC_ATOMIC_STORE_64(64, dst, src, ord); \ + } else { \ + abort(); \ + } + + template + static inline void __octa_atomic_store(volatile __OctaAtomicBase *a, + T v, MemoryOrder ord) { + __OCTA_MSVC_ATOMIC_STORE(&a->value, v, __octa_to_gcc_order(ord)); + } + + template + static inline void __octa_atomic_store(__OctaAtomicBase *a, + T v, MemoryOrder ord) { + __OCTA_MSVC_ATOMIC_STORE(&a->value, v, __octa_to_gcc_order(ord)); + } + +#undef __OCTA_MSVC_ATOMIC_STORE_N +#undef __OCTA_MSVC_ATOMIC_STORE + +#define __OCTA_MSVC_ATOMIC_LOAD_N(src, dst, ord) \ + *(dst) = *(src); + +#define __OCTA_MSVC_ATOMIC_LOAD_64(src, dst, ord) \ + __pragma(warning(push)) \ + __pragma(warning(disable:4047)) \ + *(dst) = InterlockedOr64((volatile int64_t *)(src), 0); \ + __pragma(warning(pop)) + +#define __OCTA_MSVC_ATOMIC_LOAD(src, dst, order) \ + if (sizeof(*dst) == 1 || sizeof(*dst) == 2 || sizeof(*dst) == 4) { \ + __OCTA_MSVC_ATOMIC_LOAD_N(src, dst, ord); \ + } else if (sizeof(*dst) == 8) { \ + __OCTA_MSVC_ATOMIC_LOAD_64(src, dst, ord); \ + } + + template + static inline T __octa_atomic_load(volatile __OctaAtomicBase *a, + MemoryOrder ord) { + T r; + __OCTA_MSVC_ATOMIC_LOAD(&a->value, &r, ord); + return r; + } + + template + static inline T __octa_atomic_load(__OctaAtomicBase *a, + MemoryOrder ord) { + T r; + __OCTA_MSVC_ATOMIC_LOAD(&a->value, &r, ord); + return r; + } + +#undef __OCTA_MSVC_ATOMIC_LOAD_N +#undef __OCTA_MSVC_ATOMIC_LOAD_64 +#undef __OCTA_MSVC_ATOMIC_LOAD + +#define __OCTA_MSVC_ATOMIC_EXCHANGE(dst, src, ret) \ + if (sizeof(*dst) == 1) { \ + *(ret) = InterlockedExchange8((volatile int8_t *)(dst), (int8_t)(src)); \ + } else if (sizeof(*dst) == 2) { \ + *(ret) = InterlockedExchange16((volatile int16_t *)(dst), (int16_t)(src)); \ + } else if (sizeof(*dst) == 4) { \ + *(ret) = InterlockedExchange((volatile int32_t *)(dst), (int32_t)(src)); \ + } else if (sizeof(*dst) == 8) { \ + *(ret) = InterlockedExchange64((volatile int64_t *)(dst), (int64_t)(src)); \ + } else { \ + abort(); \ + } + + template + static inline T __octa_atomic_exchange(volatile __OctaAtomicBase *a, + T v, memory_order) { + T r; + __OCTA_MSVC_ATOMIC_EXCHANGE(&a->value, v, &r); + return r; + } + + template + static inline T __octa_atomic_exchange(__OctaAtomicBase *a, + T v, memory_order) { + T r; + __OCTA_MSVC_ATOMIC_EXCHANGE(&a->value, v, &r); + return r; + } + +#undef __OCTA_MSVC_ATOMIC_EXCHANGE + + static inline bool __octa_msvc_atomic_compare_exchange8( + volatile int8_t *dst, int8_t *exp, int8_t src + ) { + int8_t prev = _InterlockedCompareExchange8(dst, src, *exp); + if (prev == *exp) return true; + *exp = prev; return false; + } + + static inline bool __octa_msvc_atomic_compare_exchange16( + volatile int16_t *dst, int16_t *exp, int16_t src + ) { + int16_t prev = InterlockedCompareExchange16(dst, src, *exp); + if (prev == *exp) return true; + *exp = prev; return false; + } + + static inline bool __octa_msvc_atomic_compare_exchange32( + volatile int32_t *dst, int32_t *exp, int32_t src + ) { + int32_t prev = InterlockedCompareExchange(dst, src, *exp); + if (prev == *exp) return true; + *exp = prev; return false; + } + + static inline bool __octa_msvc_atomic_compare_exchange64( + volatile int64_t *dst, int64_t *exp, int64_t src + ) { + int64_t prev = InterlockedCompareExchange64(dst, src, *exp); + if (prev == *exp) return true; + *exp = prev; return false; + } + + static inline bool __octa_msvc_atomic_compare_exchange_unreachable() { + static_assert(false, "atomic operation with size > 8 bytes"); + return true; + } + +#define __OCTA_MSVC_ATOMIC_COMPARE_EXCHANGE(dst, exp, src) \ + ((sizeof(*dst) == 1) ? \ + __octa_msvc_atomic_compare_exchange8((volatile int8_t *)(dst), \ + (int8_t *)(exp), (int8_t)(src)); \ + : ((sizeof(*dst) == 2) ? \ + __octa_msvc_atomic_compare_exchange16((volatile int16_t *)(dst), \ + (int16_t *)(exp), (int16_t)(src)); \ + : ((sizeof(*dst) == 4) ? \ + __octa_msvc_atomic_compare_exchange32((volatile int32_t *)(dst), \ + (int32_t *)(exp), (int32_t)(src)); \ + : ((sizeof(*dst) == 8) ? \ + __octa_msvc_atomic_compare_exchange64((volatile int64_t *)(dst), \ + (int64_t *)(exp), (int64_t)(src)); \ + : __octa_msvc_atomic_compare_exchange_unreachable())))); + + template + static inline bool __octa_atomic_compare_exchange_strong( + volatile __OctaAtomicBase *a, T *expected, T v, + memory_order, memory_order + ) { + return __OCTA_MSVC_ATOMIC_COMPARE_EXCHANGE(&a->value, expected, v); + } + + template + static inline bool __octa_atomic_compare_exchange_strong( + __OctaAtomicBase *a, T *expected, T v, + memory_order, memory_order + ) { + return __OCTA_MSVC_ATOMIC_COMPARE_EXCHANGE(&a->value, expected, v); + } + + template + static inline bool __octa_atomic_compare_exchange_weak( + volatile __OctaAtomicBase *a, T *expected, T v, + memory_order, memory_order + ) { + return __OCTA_MSVC_ATOMIC_COMPARE_EXCHANGE(&a->value, expected, v); + } + + template + static inline bool __octa_atomic_compare_exchange_weak( + __OctaAtomicBase *a, T *expected, T v, + memory_order, memory_order + ) { + return __OCTA_MSVC_ATOMIC_COMPARE_EXCHANGE(&a->value, expected, v); + } + +#undef __OCTA_MSVC_ATOMIC_COMPARE_EXCHANGE + +#define __OCTA_MSVC_ATOMIC_FETCH_OP(opn, dst, val, ret) \ + if (sizeof(*dst) == 1) { \ + *(ret) = _InterlockedExchange##opn##8((volatile int8_t *)dst, (int8_t)val); \ + } else if (sizeof(*dst) == 2) { \ + *(ret) = _InterlockedExchange##opn##16((volatile int16_t *)dst, (int16_t)val); \ + } else if (sizeof(*dst) == 4) { \ + *(ret) = InterlockedExchange##opn((volatile int32_t *)dst, (int32_t)val); \ + } else if (sizeof(*dst) == 8) { \ + *(ret) = _InterlockedExchange##opn##64((volatile int64_t *)dst, (int64_t)val); \ + } else { \ + abort(); \ + } + + template + static inline T __octa_atomic_fetch_add(volatile __OctaAtomicBase *a, + U d, memory_order) { + T r; + __OCTA_MSVC_ATOMIC_FETCH_OP(Add, &a->value, d, &r); + return r; + } + + template + static inline T __octa_atomic_fetch_add(__OctaAtomicBase *a, + U d, memory_order) { + T r; + __OCTA_MSVC_ATOMIC_FETCH_OP(Add, &a->value, d, &r); + return r; + } + + template + static inline T __octa_atomic_fetch_sub(volatile __OctaAtomicBase *a, + U d, memory_order) { + T r; + __OCTA_MSVC_ATOMIC_FETCH_OP(Sub, &a->value, d, &r); + return r; + } + + template + static inline T __octa_atomic_fetch_sub(__OctaAtomicBase *a, + U d, memory_order) { + T r; + __OCTA_MSVC_ATOMIC_FETCH_OP(Sub, &a->value, d, &r); + return r; + } + +#undef __OCTA_MSVC_ATOMIC_FETCH_OP + +#define __OCTA_MSVC_ATOMIC_FETCH_OP(opn, dst, val, ret) \ + if (sizeof(*dst) == 1) { \ + *(ret) = Interlocked##opn##8((volatile int8_t *)dst, (int8_t)val); \ + } else if (sizeof(*dst) == 2) { \ + *(ret) = Interlocked##opn##16((volatile int16_t *)dst, (int16_t)val); \ + } else if (sizeof(*dst) == 4) { \ + *(ret) = Interlocked##opn((volatile int32_t *)dst, (int32_t)val); \ + } else if (sizeof(*dst) == 8) { \ + *(ret) = Interlocked##opn##64((volatile int64_t *)dst, (int64_t)val); \ + } else { \ + abort(); \ + } + + template + static inline T __octa_atomic_fetch_and(volatile __OctaAtomicBase *a, + T pattern, memory_order) { + T r; + __OCTA_MSVC_ATOMIC_FETCH_OP(And, &a->value, d, &r); + return r; + } + + template + static inline T __octa_atomic_fetch_and(__OctaAtomicBase *a, + T pattern, memory_order) { + T r; + __OCTA_MSVC_ATOMIC_FETCH_OP(And, &a->value, d, &r); + return r; + } + + template + static inline T __octa_atomic_fetch_or(volatile __OctaAtomicBase *a, + T pattern, memory_order) { + T r; + __OCTA_MSVC_ATOMIC_FETCH_OP(Or, &a->value, d, &r); + return r; + } + + template + static inline T __octa_atomic_fetch_or(__OctaAtomicBase *a, + T pattern, memory_order) { + T r; + __OCTA_MSVC_ATOMIC_FETCH_OP(Or, &a->value, d, &r); + return r; + } + + template + static inline T __octa_atomic_fetch_xor(volatile __OctaAtomicBase *a, + T pattern, memory_order) { + T r; + __OCTA_MSVC_ATOMIC_FETCH_OP(Xor, &a->value, d, &r); + return r; + } + + template + static inline T __octa_atomic_fetch_xor(__OctaAtomicBase *a, + T pattern, memory_order) { + T r; + __OCTA_MSVC_ATOMIC_FETCH_OP(Xor, &a->value, d, &r); + return r; + } + +#undef __OCTA_MSVC_ATOMIC_FETCH_OP + +#else + + /* GCC, Clang support + * + * libc++ used for reference + */ + +#ifdef __GNUC__ + +#define ATOMIC_BOOL_LOCK_FREE __GCC_ATOMIC_BOOL_LOCK_FREE +#define ATOMIC_CHAR_LOCK_FREE __GCC_ATOMIC_CHAR_LOCK_FREE +#define ATOMIC_CHAR16_T_LOCK_FREE __GCC_ATOMIC_CHAR16_T_LOCK_FREE +#define ATOMIC_CHAR32_T_LOCK_FREE __GCC_ATOMIC_CHAR32_T_LOCK_FREE +#define ATOMIC_WCHAR_T_LOCK_FREE __GCC_ATOMIC_WCHAR_T_LOCK_FREE +#define ATOMIC_SHORT_LOCK_FREE __GCC_ATOMIC_SHORT_LOCK_FREE +#define ATOMIC_INT_LOCK_FREE __GCC_ATOMIC_INT_LOCK_FREE +#define ATOMIC_LONG_LOCK_FREE __GCC_ATOMIC_LONG_LOCK_FREE +#define ATOMIC_LLONG_LOCK_FREE __GCC_ATOMIC_LLONG_LOCK_FREE +#define ATOMIC_POINTER_LOCK_FREE __GCC_ATOMIC_POINTER_LOCK_FREE + + static inline constexpr int __octa_to_gcc_order(MemoryOrder ord) { + return ((ord == memory_order_relaxed) ? __ATOMIC_RELAXED : + ((ord == memory_order_acquire) ? __ATOMIC_ACQUIRE : + ((ord == memory_order_release) ? __ATOMIC_RELEASE : + ((ord == memory_order_seq_cst) ? __ATOMIC_SEQ_CST : + ((ord == memory_order_acq_rel) ? __ATOMIC_ACQ_REL : + __ATOMIC_CONSUME))))); + } + + static inline constexpr int __octa_to_gcc_failure_order(MemoryOrder ord) { + return ((ord == memory_order_relaxed) ? __ATOMIC_RELAXED : + ((ord == memory_order_acquire) ? __ATOMIC_ACQUIRE : + ((ord == memory_order_release) ? __ATOMIC_RELAXED : + ((ord == memory_order_seq_cst) ? __ATOMIC_SEQ_CST : + ((ord == memory_order_acq_rel) ? __ATOMIC_ACQUIRE : + __ATOMIC_CONSUME))))); + } + + static inline void __octa_atomic_thread_fence(MemoryOrder ord) { + __atomic_thread_fence(__octa_to_gcc_order(ord)); + } + + static inline void __octa_atomic_signal_fence(MemoryOrder ord) { + __atomic_signal_fence(__octa_to_gcc_order(ord)); + } + + static inline bool __octa_atomic_is_lock_free(size_t size) { + /* return __atomic_is_lock_free(size, 0); cannot be used on some platforms */ + return size <= sizeof(void *); + } + + template + static inline void __octa_atomic_store(volatile __OctaAtomicBase *a, + T v, MemoryOrder ord) { + __atomic_store(&a->value, &v, __octa_to_gcc_order(ord)); + } + + template + static inline void __octa_atomic_store(__OctaAtomicBase *a, + T v, MemoryOrder ord) { + __atomic_store(&a->value, &v, __octa_to_gcc_order(ord)); + } + + template + static inline T __octa_atomic_load(volatile __OctaAtomicBase *a, + MemoryOrder ord) { + T r; + __atomic_load(&a->value, &r, __octa_to_gcc_order(ord)); + return r; + } + + template + static inline T __octa_atomic_load(__OctaAtomicBase *a, + MemoryOrder ord) { + T r; + __atomic_load(&a->value, &r, __octa_to_gcc_order(ord)); + return r; + } + + template + static inline T __octa_atomic_exchange(volatile __OctaAtomicBase *a, + T v, MemoryOrder ord) { + T r; + __atomic_exchange(&a->value, &v, &r, __octa_to_gcc_order(ord)); + return r; + } + + template + static inline T __octa_atomic_exchange(__OctaAtomicBase *a, + T v, MemoryOrder ord) { + T r; + __atomic_exchange(&a->value, &v, &r, __octa_to_gcc_order(ord)); + return r; + } + + template + static inline bool __octa_atomic_compare_exchange_strong( + volatile __OctaAtomicBase *a, T *expected, T v, + MemoryOrder success, MemoryOrder failure + ) { + return __atomic_compare_exchange(&a->value, expected, &v, false, + __octa_to_gcc_order(success), __octa_to_gcc_failure_order(failure)); + } + + template + static inline bool __octa_atomic_compare_exchange_strong( + __OctaAtomicBase *a, T *expected, T v, + MemoryOrder success, MemoryOrder failure + ) { + return __atomic_compare_exchange(&a->value, expected, &v, false, + __octa_to_gcc_order(success), __octa_to_gcc_failure_order(failure)); + } + + template + static inline bool __octa_atomic_compare_exchange_weak( + volatile __OctaAtomicBase *a, T *expected, T v, + MemoryOrder success, MemoryOrder failure + ) { + return __atomic_compare_exchange(&a->value, expected, &v, true, + __octa_to_gcc_order(success), __octa_to_gcc_failure_order(failure)); + } + + template + static inline bool __octa_atomic_compare_exchange_weak( + __OctaAtomicBase *a, T *expected, T v, + MemoryOrder success, MemoryOrder failure + ) { + return __atomic_compare_exchange(&a->value, expected, &v, true, + __octa_to_gcc_order(success), __octa_to_gcc_failure_order(failure)); + } + + template + struct __OctaSkipAmt { static constexpr size_t value = 1; }; + + template + struct __OctaSkipAmt { static constexpr size_t value = sizeof(T); }; + + template struct __OctaSkipAmt {}; + template struct __OctaSkipAmt {}; + + template + static inline T __octa_atomic_fetch_add(volatile __OctaAtomicBase *a, + U d, MemoryOrder ord) { + return __atomic_fetch_add(&a->value, d * __OctaSkipAmt::value, + __octa_to_gcc_order(ord)); + } + + template + static inline T __octa_atomic_fetch_add(__OctaAtomicBase *a, + U d, MemoryOrder ord) { + return __atomic_fetch_add(&a->value, d * __OctaSkipAmt::value, + __octa_to_gcc_order(ord)); + } + + template + static inline T __octa_atomic_fetch_sub(volatile __OctaAtomicBase *a, + U d, MemoryOrder ord) { + return __atomic_fetch_sub(&a->value, d * __OctaSkipAmt::value, + __octa_to_gcc_order(ord)); + } + + template + static inline T __octa_atomic_fetch_sub(__OctaAtomicBase *a, + U d, MemoryOrder ord) { + return __atomic_fetch_sub(&a->value, d * __OctaSkipAmt::value, + __octa_to_gcc_order(ord)); + } + + template + static inline T __octa_atomic_fetch_and(volatile __OctaAtomicBase *a, + T pattern, MemoryOrder ord) { + return __atomic_fetch_and(&a->value, pattern, + __octa_to_gcc_order(ord)); + } + + template + static inline T __octa_atomic_fetch_and(__OctaAtomicBase *a, + T pattern, MemoryOrder ord) { + return __atomic_fetch_and(&a->value, pattern, + __octa_to_gcc_order(ord)); + } + + template + static inline T __octa_atomic_fetch_or(volatile __OctaAtomicBase *a, + T pattern, MemoryOrder ord) { + return __atomic_fetch_or(&a->value, pattern, + __octa_to_gcc_order(ord)); + } + + template + static inline T __octa_atomic_fetch_or(__OctaAtomicBase *a, + T pattern, MemoryOrder ord) { + return __atomic_fetch_or(&a->value, pattern, + __octa_to_gcc_order(ord)); + } + + template + static inline T __octa_atomic_fetch_xor(volatile __OctaAtomicBase *a, + T pattern, MemoryOrder ord) { + return __atomic_fetch_xor(&a->value, pattern, + __octa_to_gcc_order(ord)); + } + + template + static inline T __octa_atomic_fetch_xor(__OctaAtomicBase *a, + T pattern, MemoryOrder ord) { + return __atomic_fetch_xor(&a->value, pattern, + __octa_to_gcc_order(ord)); + } +#else +# error Unsupported compiler +#endif +#endif + + template inline T kill_dependency(T v) noexcept { + return v; + } + + template::value && !IsSame::value> + struct __OctaAtomic { + mutable __OctaAtomicBase p_a; + + __OctaAtomic() noexcept = default; + + constexpr __OctaAtomic(T v) noexcept: p_a(v) {} + + __OctaAtomic(const __OctaAtomic &) = delete; + + __OctaAtomic &operator=(const __OctaAtomic &) = delete; + __OctaAtomic &operator=(const __OctaAtomic &) volatile = delete; + + bool is_lock_free() const volatile noexcept { + return __octa_atomic_is_lock_free(sizeof(T)); + } + + bool is_lock_free() const noexcept { + return __octa_atomic_is_lock_free(sizeof(T)); + } + + void store(T v, MemoryOrder ord = memory_order_seq_cst) + volatile noexcept { + __octa_atomic_store(&p_a, v, ord); + } + + void store(T v, MemoryOrder ord = memory_order_seq_cst) noexcept { + __octa_atomic_store(&p_a, v, ord); + } + + T load(MemoryOrder ord = memory_order_seq_cst) const volatile noexcept { + return __octa_atomic_load(&p_a, ord); + } + + T load(MemoryOrder ord = memory_order_seq_cst) const noexcept { + return __octa_atomic_load(&p_a, ord); + } + + operator T() const volatile noexcept { return load(); } + operator T() const noexcept { return load(); } + + T exchange(T v, MemoryOrder ord = memory_order_seq_cst) + volatile noexcept { + return __octa_atomic_exchange(&p_a, v, ord); + } + + T exchange(T v, MemoryOrder ord = memory_order_seq_cst) noexcept { + return __octa_atomic_exchange(&p_a, v, ord); + } + + bool compare_exchange_weak(T &e, T v, MemoryOrder s, MemoryOrder f) + volatile noexcept { + return __octa_atomic_compare_exchange_weak(&p_a, &e, v, s, f); + } + + bool compare_exchange_weak(T& e, T v, MemoryOrder s, MemoryOrder f) + noexcept { + return __octa_atomic_compare_exchange_weak(&p_a, &e, v, s, f); + } + + bool compare_exchange_strong(T& e, T v, MemoryOrder s, MemoryOrder f) + volatile noexcept { + return __octa_atomic_compare_exchange_strong(&p_a, &e, v, s, f); + } + + bool compare_exchange_strong(T& e, T v, MemoryOrder s, MemoryOrder f) + noexcept { + return __octa_atomic_compare_exchange_strong(&p_a, &e, v, s, f); + } + + bool compare_exchange_weak(T& e, T v, MemoryOrder ord + = memory_order_seq_cst) + volatile noexcept { + return __octa_atomic_compare_exchange_weak(&p_a, &e, v, ord, ord); + } + + bool compare_exchange_weak(T& e, T v, MemoryOrder ord + = memory_order_seq_cst) + noexcept { + return __octa_atomic_compare_exchange_weak(&p_a, &e, v, ord, ord); + } + + bool compare_exchange_strong(T& e, T v, MemoryOrder ord + = memory_order_seq_cst) + volatile noexcept { + return __octa_atomic_compare_exchange_strong(&p_a, &e, v, ord, ord); + } + + bool compare_exchange_strong(T& e, T v, MemoryOrder ord + = memory_order_seq_cst) + noexcept { + return __octa_atomic_compare_exchange_strong(&p_a, &e, v, ord, ord); + } + }; + + template + struct __OctaAtomic: __OctaAtomic { + typedef __OctaAtomic base_t; + + __OctaAtomic() noexcept = default; + + constexpr __OctaAtomic(T v) noexcept: base_t(v) {} + + T fetch_add(T op, MemoryOrder ord = memory_order_seq_cst) + volatile noexcept { + return __octa_atomic_fetch_add(&this->p_a, op, ord); + } + + T fetch_add(T op, MemoryOrder ord = memory_order_seq_cst) noexcept { + return __octa_atomic_fetch_add(&this->p_a, op, ord); + } + + T fetch_sub(T op, MemoryOrder ord = memory_order_seq_cst) + volatile noexcept { + return __octa_atomic_fetch_sub(&this->p_a, op, ord); + } + + T fetch_sub(T op, MemoryOrder ord = memory_order_seq_cst) noexcept { + return __octa_atomic_fetch_sub(&this->p_a, op, ord); + } + + T fetch_and(T op, MemoryOrder ord = memory_order_seq_cst) + volatile noexcept { + return __octa_atomic_fetch_and(&this->p_a, op, ord); + } + + T fetch_and(T op, MemoryOrder ord = memory_order_seq_cst) noexcept { + return __octa_atomic_fetch_and(&this->p_a, op, ord); + } + + T fetch_or(T op, MemoryOrder ord = memory_order_seq_cst) + volatile noexcept { + return __octa_atomic_fetch_or(&this->p_a, op, ord); + } + + T fetch_or(T op, MemoryOrder ord = memory_order_seq_cst) noexcept { + return __octa_atomic_fetch_or(&this->p_a, op, ord); + } + + T fetch_xor(T op, MemoryOrder ord = memory_order_seq_cst) + volatile noexcept { + return __octa_atomic_fetch_xor(&this->p_a, op, ord); + } + + T fetch_xor(T op, MemoryOrder ord = memory_order_seq_cst) noexcept { + return __octa_atomic_fetch_xor(&this->p_a, op, ord); + } + + T operator++(int) volatile noexcept { return fetch_add(T(1)); } + T operator++(int) noexcept { return fetch_add(T(1)); } + T operator--(int) volatile noexcept { return fetch_sub(T(1)); } + T operator--(int) noexcept { return fetch_sub(T(1)); } + T operator++( ) volatile noexcept { return fetch_add(T(1)) + T(1); } + T operator++( ) noexcept { return fetch_add(T(1)) + T(1); } + T operator--( ) volatile noexcept { return fetch_sub(T(1)) - T(1); } + T operator--( ) noexcept { return fetch_sub(T(1)) - T(1); } + + T operator+=(T op) volatile noexcept { return fetch_add(op) + op; } + T operator+=(T op) noexcept { return fetch_add(op) + op; } + T operator-=(T op) volatile noexcept { return fetch_sub(op) - op; } + T operator-=(T op) noexcept { return fetch_sub(op) - op; } + T operator&=(T op) volatile noexcept { return fetch_and(op) & op; } + T operator&=(T op) noexcept { return fetch_and(op) & op; } + T operator|=(T op) volatile noexcept { return fetch_or (op) | op; } + T operator|=(T op) noexcept { return fetch_or (op) | op; } + T operator^=(T op) volatile noexcept { return fetch_xor(op) ^ op; } + T operator^=(T op) noexcept { return fetch_xor(op) ^ op; } + }; + + template + struct Atomic: __OctaAtomic { + typedef __OctaAtomic base_t; + + Atomic() noexcept = default; + + constexpr Atomic(T v) noexcept: base_t(v) {} + + T operator=(T v) volatile noexcept { + base_t::store(v); return v; + } + + T operator=(T v) noexcept { + base_t::store(v); return v; + } + }; + + template + struct Atomic: __OctaAtomic { + typedef __OctaAtomic base_t; + + Atomic() noexcept = default; + + constexpr Atomic(T *v) noexcept: base_t(v) {} + + T *operator=(T *v) volatile noexcept { + base_t::store(v); return v; + } + + T *operator=(T *v) noexcept { + base_t::store(v); return v; + } + + T *fetch_add(ptrdiff_t op, MemoryOrder ord = memory_order_seq_cst) + volatile noexcept { + return __octa_atomic_fetch_add(&this->p_a, op, ord); + } + + T *fetch_add(ptrdiff_t op, MemoryOrder ord = memory_order_seq_cst) + noexcept { + return __octa_atomic_fetch_add(&this->p_a, op, ord); + } + + T *fetch_sub(ptrdiff_t op, MemoryOrder ord = memory_order_seq_cst) + volatile noexcept { + return __octa_atomic_fetch_sub(&this->p_a, op, ord); + } + + T *fetch_sub(ptrdiff_t op, MemoryOrder ord = memory_order_seq_cst) + noexcept { + return __octa_atomic_fetch_sub(&this->p_a, op, ord); + } + + + T *operator++(int) volatile noexcept { return fetch_add(1); } + T *operator++(int) noexcept { return fetch_add(1); } + T *operator--(int) volatile noexcept { return fetch_sub(1); } + T *operator--(int) noexcept { return fetch_sub(1); } + T *operator++( ) volatile noexcept { return fetch_add(1) + 1; } + T *operator++( ) noexcept { return fetch_add(1) + 1; } + T *operator--( ) volatile noexcept { return fetch_sub(1) - 1; } + T *operator--( ) noexcept { return fetch_sub(1) - 1; } + + T *operator+=(ptrdiff_t op) volatile noexcept { return fetch_add(op) + op; } + T *operator+=(ptrdiff_t op) noexcept { return fetch_add(op) + op; } + T *operator-=(ptrdiff_t op) volatile noexcept { return fetch_sub(op) - op; } + T *operator-=(ptrdiff_t op) noexcept { return fetch_sub(op) - op; } + }; + + template + inline bool atomic_is_lock_free(const volatile Atomic *a) noexcept { + return a->is_lock_free(); + } + + template + inline bool atomic_is_lock_free(const Atomic *a) noexcept { + return a->is_lock_free(); + } + + template + inline void atomic_init(volatile Atomic *a, T v) noexcept { + __octa_atomic_init(&a->p_a, v); + } + + template + inline void atomic_init(Atomic *a, T v) noexcept { + __octa_atomic_init(&a->p_a, v); + } + + template + inline void atomic_store(volatile Atomic *a, T v) noexcept { + a->store(v); + } + + template + inline void atomic_store(Atomic *a, T v) noexcept { + a->store(v); + } + + template + inline void atomic_store_explicit(volatile Atomic *a, T v, + MemoryOrder ord) + noexcept { + a->store(v, ord); + } + + template + inline void atomic_store_explicit(Atomic *a, T v, MemoryOrder ord) + noexcept { + a->store(v, ord); + } + + template + inline T atomic_load(const volatile Atomic *a) noexcept { + return a->load(); + } + + template + inline T atomic_load(const Atomic *a) noexcept { + return a->load(); + } + + template + inline T atomic_load_explicit(const volatile Atomic *a, + MemoryOrder ord) + noexcept { + return a->load(ord); + } + + template + inline T atomic_load_explicit(const Atomic *a, MemoryOrder ord) + noexcept { + return a->load(ord); + } + + template + inline T atomic_exchange(volatile Atomic *a, T v) noexcept { + return a->exchange(v); + } + + template + inline T atomic_exchange(Atomic *a, T v) noexcept { + return a->exchange(v); + } + + template + inline T atomic_exchange_explicit(volatile Atomic *a, T v, + MemoryOrder ord) + noexcept { + return a->exchange(v, ord); + } + + template + inline T atomic_exchange_explicit(Atomic *a, T v, MemoryOrder ord) + noexcept { + return a->exchange(v, ord); + } + + template + inline bool atomic_compare_exchange_weak(volatile Atomic *a, T *e, T v) + noexcept { + return a->compare_exchange_weak(*e, v); + } + + template + inline bool atomic_compare_exchange_weak(Atomic *a, T *e, T v) noexcept { + return a->compare_exchange_weak(*e, v); + } + + template + inline bool atomic_compare_exchange_strong(volatile Atomic *a, T *e, T v) + noexcept { + return a->compare_exchange_strong(*e, v); + } + + template + inline bool atomic_compare_exchange_strong(Atomic *a, T *e, T v) + noexcept { + return a->compare_exchange_strong(*e, v); + } + + template + inline bool atomic_compare_exchange_weak_explicit(volatile Atomic *a, + T *e, T v, + MemoryOrder s, + MemoryOrder f) + noexcept { + return a->compare_exchange_weak(*e, v, s, f); + } + + template + inline bool atomic_compare_exchange_weak_explicit(Atomic *a, T *e, T v, + MemoryOrder s, + MemoryOrder f) + noexcept { + return a->compare_exchange_weak(*e, v, s, f); + } + + template + inline bool atomic_compare_exchange_strong_explicit(volatile Atomic *a, + T *e, T v, + MemoryOrder s, + MemoryOrder f) + noexcept { + return a->compare_exchange_strong(*e, v, s, f); + } + + template + inline bool atomic_compare_exchange_strong_explicit(Atomic *a, T *e, T v, + MemoryOrder s, + MemoryOrder f) + noexcept { + return a->compare_exchange_strong(*e, v, s, f); + } + + template + inline EnableIf::value && !IsSame::value, T> + atomic_fetch_add(volatile Atomic *a, T op) noexcept { + return a->fetch_add(op); + } + + template + inline EnableIf::value && !IsSame::value, T> + atomic_fetch_add(Atomic *a, T op) noexcept { + return a->fetch_add(op); + } + + template + inline T *atomic_fetch_add(volatile Atomic *a, ptrdiff_t op) noexcept { + return a->fetch_add(op); + } + + template + inline T *atomic_fetch_add(Atomic *a, ptrdiff_t op) noexcept { + return a->fetch_add(op); + } + + template + inline EnableIf::value && !IsSame::value, T> + atomic_fetch_add_explicit(volatile Atomic *a, T op, MemoryOrder ord) + noexcept { + return a->fetch_add(op, ord); + } + + template + inline EnableIf::value && !IsSame::value, T> + atomic_fetch_add_explicit(Atomic *a, T op, MemoryOrder ord) noexcept { + return a->fetch_add(op, ord); + } + + template + inline T *atomic_fetch_add_explicit(volatile Atomic *a, ptrdiff_t op, + MemoryOrder ord) + noexcept { + return a->fetch_add(op, ord); + } + + template + inline T *atomic_fetch_add_explicit(Atomic *a, ptrdiff_t op, + MemoryOrder ord) + noexcept { + return a->fetch_add(op, ord); + } + + template + inline EnableIf::value && !IsSame::value, T> + atomic_fetch_sub(volatile Atomic *a, T op) noexcept { + return a->fetch_sub(op); + } + + template + inline EnableIf::value && !IsSame::value, T> + atomic_fetch_sub(Atomic *a, T op) noexcept { + return a->fetch_sub(op); + } + + template + inline T *atomic_fetch_sub(volatile Atomic *a, ptrdiff_t op) noexcept { + return a->fetch_sub(op); + } + + template + inline T *atomic_fetch_sub(Atomic *a, ptrdiff_t op) noexcept { + return a->fetch_sub(op); + } + + template + inline EnableIf::value && !IsSame::value, T> + atomic_fetch_sub_explicit(volatile Atomic *a, T op, MemoryOrder ord) + noexcept { + return a->fetch_sub(op, ord); + } + + template + inline EnableIf::value && !IsSame::value, T> + atomic_fetch_sub_explicit(Atomic *a, T op, MemoryOrder ord) noexcept { + return a->fetch_sub(op, ord); + } + + template + inline T *atomic_fetch_sub_explicit(volatile Atomic *a, ptrdiff_t op, + MemoryOrder ord) + noexcept { + return a->fetch_sub(op, ord); + } + + template + inline T *atomic_fetch_sub_explicit(Atomic *a, ptrdiff_t op, + MemoryOrder ord) + noexcept { + return a->fetch_sub(op, ord); + } + + template + inline EnableIf::value && !IsSame::value, T> + atomic_fetch_and(volatile Atomic *a, T op) noexcept { + return a->fetch_and(op); + } + + template + inline EnableIf::value && !IsSame::value, T> + atomic_fetch_and(Atomic *a, T op) noexcept { + return a->fetch_and(op); + } + + template + inline EnableIf::value && !IsSame::value, T> + atomic_fetch_and_explicit(volatile Atomic *a, T op, MemoryOrder ord) + noexcept { + return a->fetch_and(op, ord); + } + + template + inline EnableIf::value && !IsSame::value, T> + atomic_fetch_and_explicit(Atomic *a, T op, MemoryOrder ord) noexcept { + return a->fetch_and(op, ord); + } + + template + inline EnableIf::value && !IsSame::value, T> + atomic_fetch_or(volatile Atomic *a, T op) noexcept { + return a->fetch_or(op); + } + + template + inline EnableIf::value && !IsSame::value, T> + atomic_fetch_or(Atomic *a, T op) noexcept { + return a->fetch_or(op); + } + + template + inline EnableIf::value && !IsSame::value, T> + atomic_fetch_or_explicit(volatile Atomic *a, T op, MemoryOrder ord) + noexcept { + return a->fetch_or(op, ord); + } + + template + inline EnableIf::value && !IsSame::value, T> + atomic_fetch_or_explicit(Atomic *a, T op, MemoryOrder ord) noexcept { + return a->fetch_or(op, ord); + } + + template + inline EnableIf::value && !IsSame::value, T> + atomic_fetch_xor(volatile Atomic *a, T op) noexcept { + return a->fetch_xor(op); + } + + template + inline EnableIf::value && !IsSame::value, T> + atomic_fetch_xor(Atomic *a, T op) noexcept { + return a->fetch_xor(op); + } + + template + inline EnableIf::value && !IsSame::value, T> + atomic_fetch_xor_explicit(volatile Atomic *a, T op, MemoryOrder ord) + noexcept { + return a->fetch_xor(op, ord); + } + + template + inline EnableIf::value && !IsSame::value, T> + atomic_fetch_xor_explicit(Atomic *a, T op, MemoryOrder ord) noexcept { + return a->fetch_xor(op, ord); + } + + struct AtomicFlag { + __OctaAtomicBase p_a; + + AtomicFlag() noexcept = default; + + AtomicFlag(bool b) noexcept: p_a(b) {} + + AtomicFlag(const AtomicFlag &) = delete; + + AtomicFlag &operator=(const AtomicFlag &) = delete; + AtomicFlag &operator=(const AtomicFlag &) volatile = delete; + + bool test_and_set(MemoryOrder ord = memory_order_seq_cst) + volatile noexcept { + return __octa_atomic_exchange(&p_a, true, ord); + } + + bool test_and_set(MemoryOrder ord = memory_order_seq_cst) noexcept { + return __octa_atomic_exchange(&p_a, true, ord); + } + + void clear(MemoryOrder ord = memory_order_seq_cst) volatile noexcept { + __octa_atomic_store(&p_a, false, ord); + } + + void clear(MemoryOrder ord = memory_order_seq_cst) noexcept { + __octa_atomic_store(&p_a, false, ord); + } + }; + + inline bool atomic_flag_test_and_set(volatile AtomicFlag *a) + noexcept { + return a->test_and_set(); + } + + inline bool atomic_flag_test_and_set(AtomicFlag *a) noexcept { + return a->test_and_set(); + } + + inline bool atomic_flag_test_and_set_explicit(volatile AtomicFlag *a, + MemoryOrder ord) + noexcept { + return a->test_and_set(ord); + } + + inline bool atomic_flag_test_and_set_explicit(AtomicFlag *a, + MemoryOrder ord) + noexcept { + return a->test_and_set(ord); + } + + inline void atomic_flag_clear(volatile AtomicFlag *a) noexcept { + a->clear(); + } + + inline void atomic_flag_clear(AtomicFlag *a) noexcept { + a->clear(); + } + + inline void atomic_flag_clear_explicit(volatile AtomicFlag *a, + MemoryOrder ord) + noexcept { + a->clear(ord); + } + + inline void atomic_flag_clear_explicit(AtomicFlag *a, MemoryOrder ord) + noexcept { + a->clear(ord); + } + + inline void atomic_thread_fence(MemoryOrder ord) noexcept { + __octa_atomic_thread_fence(ord); + } + + inline void atomic_signal_fence(MemoryOrder ord) noexcept { + __octa_atomic_signal_fence(ord); + } + + typedef Atomic AtomicBool; + typedef Atomic AtomicChar; + typedef Atomic AtomicSchar; + typedef Atomic AtomicUchar; + typedef Atomic AtomicShort; + typedef Atomic AtomicUshort; + typedef Atomic AtomicInt; + typedef Atomic AtomicUint; + typedef Atomic AtomicLong; + typedef Atomic AtomicUlong; + typedef Atomic AtomicLlong; + typedef Atomic AtomicUllong; + + typedef Atomic AtomicChar16; + typedef Atomic AtomicChar32; + typedef Atomic< wchar_t> AtomicWchar; + + typedef Atomic< intptr_t> AtomicIntptr; + typedef Atomic AtomicUintptr; + typedef Atomic< size_t> AtomicSize; + typedef Atomic AtomicPtrdiff; + +#define ATOMIC_FLAG_INIT {false} +#define ATOMIC_VAR_INIT(__v) {__v} + +} + +#endif \ No newline at end of file