4#define CX_MemoryBarrier __faststorefence
5#define CX_ReadWriteBarrier _ReadWriteBarrier
7__forceinline
void CX_MemoryBarrier(
void)
11 _InterlockedOr(&Barrier, 0);
14#define CX_ReadWriteBarrier _ReadWriteBarrier
16#error "Don't know how to create atomics for this platform for MSVC."
19#include "cx/utils/macros.h"
21#define atomicInit(...) {__VA_ARGS__}
31typedef char _cx_atomic_repr_0_t;
32typedef short _cx_atomic_repr_1_t;
33typedef long _cx_atomic_repr_2_t;
34typedef __int64 _cx_atomic_repr_3_t;
37_atomicFence(AtmoicMemoryOrder mo)
39 CX_ReadWriteBarrier();
40# if defined(_M_ARM) || defined(_M_ARM64)
42 if (mo != ATOMIC_MO_Relaxed) {
45# elif defined(_M_IX86) || defined (_M_X64)
47 if (mo == ATOMIC_MO_SeqCst) {
51# error "Don't know how to create atomics for this platform for MSVC."
53 CX_ReadWriteBarrier();
55#define atomicFence(order) _atomicFence(ATOMIC_MO_##order)
57#define CX_ATOMIC_IL_REPR(lg_size) _cx_atomic_repr_ ## lg_size ## _t
59#define CX_ATOMIC_IL_NAME(base_name, lg_size) tokconcat( \
60 base_name, CX_ATOMIC_IL_SUFFIX(lg_size))
62#define CX_ATOMIC_IL_SUFFIX(lg_size) \
63 tokconcat(CX_ATOMIC_IL_SUFFIX_, lg_size)
65#define CX_ATOMIC_IL_SUFFIX_0 8
66#define CX_ATOMIC_IL_SUFFIX_1 16
67#define CX_ATOMIC_IL_SUFFIX_2
68#define CX_ATOMIC_IL_SUFFIX_3 64
70#define atomic(type) cx_atomic_##type
71#define atomicLoad(type, atomic_ptr, order) \
72 _atomicLoad_##type(atomic_ptr, ATOMIC_MO_##order)
73#define atomicStore(type, atomic_ptr, val, order) \
74 _atomicStore_##type(atomic_ptr, val, ATOMIC_MO_##order)
75#define atomicExchange(type, atomic_ptr, val, order) \
76 _atomicExchange_##type(atomic_ptr, val, ATOMIC_MO_##order)
77#define atomicCompareExchange(type, semantics, atomic_ptr, expected_ptr,\
78 desired, successorder, failorder) \
79 _atomicCompareExchange_##semantics##_##type(atomic_ptr, \
80 expected_ptr, desired, \
81 ATOMIC_MO_##successorder, ATOMIC_MO_##failorder)
83#define atomicFetchAdd(type, atomic_ptr, val, order) \
84 _atomicFetchAdd_##type(atomic_ptr, val, ATOMIC_MO_##order)
85#define atomicFetchSub(type, atomic_ptr, val, order) \
86 _atomicFetchSub_##type(atomic_ptr, val, ATOMIC_MO_##order)
87#define atomicFetchAnd(type, atomic_ptr, val, order) \
88 _atomicFetchAnd_##type(atomic_ptr, val, ATOMIC_MO_##order)
89#define atomicFetchOr(type, atomic_ptr, val, order) \
90 _atomicFetchOr_##type(atomic_ptr, val, ATOMIC_MO_##order)
91#define atomicFetchXor(type, atomic_ptr, val, order) \
92 _atomicFetchXor_##type(atomic_ptr, val, ATOMIC_MO_##order)
94#define CX_GENERATE_ATOMICS(type, short_type, lg_size) \
96 CX_ATOMIC_IL_REPR(lg_size) repr; \
97} cx_atomic_##short_type; \
100_atomicLoad_##short_type(const cx_atomic_##short_type *a, \
101 AtmoicMemoryOrder mo) { \
102 CX_ATOMIC_IL_REPR(lg_size) ret = a->repr; \
103 if (mo != ATOMIC_MO_Relaxed) { \
104 _atomicFence(ATOMIC_MO_Acquire); \
110_atomicStore_##short_type(cx_atomic_##short_type *a, \
111 type val, AtmoicMemoryOrder mo) { \
112 if (mo != ATOMIC_MO_Relaxed) { \
113 _atomicFence(ATOMIC_MO_Release); \
115 a->repr = (CX_ATOMIC_IL_REPR(lg_size)) val; \
116 if (mo == ATOMIC_MO_SeqCst) { \
117 _atomicFence(ATOMIC_MO_SeqCst); \
122_atomicExchange_##short_type(cx_atomic_##short_type *a, type val, \
123 AtmoicMemoryOrder mo) { \
124 return (type)CX_ATOMIC_IL_NAME(_InterlockedExchange, \
125 lg_size)(&a->repr, (CX_ATOMIC_IL_REPR(lg_size))val); \
129_atomicCompareExchange_weak_##short_type(cx_atomic_##short_type *a, \
130 type *expected, type desired, AtmoicMemoryOrder success_mo, \
131 AtmoicMemoryOrder failure_mo) { \
132 CX_ATOMIC_IL_REPR(lg_size) e = \
133 (CX_ATOMIC_IL_REPR(lg_size))*expected; \
134 CX_ATOMIC_IL_REPR(lg_size) d = \
135 (CX_ATOMIC_IL_REPR(lg_size))desired; \
136 CX_ATOMIC_IL_REPR(lg_size) old = \
137 CX_ATOMIC_IL_NAME(_InterlockedCompareExchange, \
138 lg_size)(&a->repr, d, e); \
142 *expected = (type)old; \
148_atomicCompareExchange_strong_##short_type(cx_atomic_##short_type *a, \
149 type *expected, type desired, AtmoicMemoryOrder success_mo, \
150 AtmoicMemoryOrder failure_mo) { \
152 return _atomicCompareExchange_weak_##short_type(a, expected, \
153 desired, success_mo, failure_mo); \
156#define CX_EXTERN_ATOMICS(type, short_type) \
158_atomicLoad_##short_type(const cx_atomic_##short_type *a, \
159 AtmoicMemoryOrder mo); \
162_atomicStore_##short_type(cx_atomic_##short_type *a, \
163 type val, AtmoicMemoryOrder mo); \
166_atomicExchange_##short_type(cx_atomic_##short_type *a, type val, \
167 AtmoicMemoryOrder mo); \
170_atomicCompareExchange_weak_##short_type(cx_atomic_##short_type *a, \
171 type *expected, type desired, AtmoicMemoryOrder success_mo, \
172 AtmoicMemoryOrder failure_mo); \
175_atomicCompareExchange_strong_##short_type(cx_atomic_##short_type *a, \
176 type *expected, type desired, AtmoicMemoryOrder success_mo, \
177 AtmoicMemoryOrder failure_mo);
179#define CX_GENERATE_INT_ATOMICS(type, short_type, lg_size) \
180CX_GENERATE_ATOMICS(type, short_type, lg_size) \
183_atomicFetchAdd_##short_type(cx_atomic_##short_type *a, \
184 type val, AtmoicMemoryOrder mo) { \
185 return (type)CX_ATOMIC_IL_NAME(_InterlockedExchangeAdd, \
186 lg_size)(&a->repr, (CX_ATOMIC_IL_REPR(lg_size))val); \
190_atomicFetchSub_##short_type(cx_atomic_##short_type *a, \
191 type val, AtmoicMemoryOrder mo) { \
196 __pragma(warning(push)) \
197 __pragma(warning(disable: 4146)) \
198 return _atomicFetchAdd_##short_type(a, -val, mo); \
199 __pragma(warning(pop)) \
202_atomicFetchAnd_##short_type(cx_atomic_##short_type *a, \
203 type val, AtmoicMemoryOrder mo) { \
204 return (type)CX_ATOMIC_IL_NAME(_InterlockedAnd, lg_size)( \
205 &a->repr, (CX_ATOMIC_IL_REPR(lg_size))val); \
208_atomicFetchOr_##short_type(cx_atomic_##short_type *a, \
209 type val, AtmoicMemoryOrder mo) { \
210 return (type)CX_ATOMIC_IL_NAME(_InterlockedOr, lg_size)( \
211 &a->repr, (CX_ATOMIC_IL_REPR(lg_size))val); \
214_atomicFetchXor_##short_type(cx_atomic_##short_type *a, \
215 type val, AtmoicMemoryOrder mo) { \
216 return (type)CX_ATOMIC_IL_NAME(_InterlockedXor, lg_size)( \
217 &a->repr, (CX_ATOMIC_IL_REPR(lg_size))val); \
220#define CX_EXTERN_INT_ATOMICS(type, short_type) \
221CX_EXTERN_ATOMICS(type, short_type) \
224atomicFetchAdd_##short_type(cx_atomic_##short_type *a, \
225 type val, AtmoicMemoryOrder mo); \
228atomicFetchSub_##short_type(cx_atomic_##short_type *a, \
229 type val, AtmoicMemoryOrder mo); \
231atomicFetchAnd_##short_type(cx_atomic_##short_type *a, \
232 type val, AtmoicMemoryOrder mo); \
234atomicFetchOr_##short_type(cx_atomic_##short_type *a, \
235 type val, AtmoicMemoryOrder mo); \
237atomicFetchXor_##short_type(cx_atomic_##short_type *a, \
238 type val, AtmoicMemoryOrder mo);