Line data Source code
1 : #ifndef Py_LIMITED_API
2 : #ifndef Py_ATOMIC_H
3 : #define Py_ATOMIC_H
4 : /* XXX: When compilers start offering a stdatomic.h with lock-free
5 : atomic_int and atomic_address types, include that here and rewrite
6 : the atomic operations in terms of it. */
7 :
8 : #include "dynamic_annotations.h"
9 :
10 : #ifdef __cplusplus
11 : extern "C" {
12 : #endif
13 :
14 : /* This is modeled after the atomics interface from C1x, according to
15 : * the draft at
16 : * http://www.open-std.org/JTC1/SC22/wg14/www/docs/n1425.pdf.
17 : * Operations and types are named the same except with a _Py_ prefix
18 : * and have the same semantics.
19 : *
20 : * Beware, the implementations here are deep magic.
21 : */
22 :
23 : typedef enum _Py_memory_order {
24 : _Py_memory_order_relaxed,
25 : _Py_memory_order_acquire,
26 : _Py_memory_order_release,
27 : _Py_memory_order_acq_rel,
28 : _Py_memory_order_seq_cst
29 : } _Py_memory_order;
30 :
31 : typedef struct _Py_atomic_address {
32 : void *_value;
33 : } _Py_atomic_address;
34 :
35 : typedef struct _Py_atomic_int {
36 : int _value;
37 : } _Py_atomic_int;
38 :
39 : /* Only support GCC (for expression statements) and x86 (for simple
40 : * atomic semantics) for now */
41 : #if defined(__GNUC__) && (defined(__i386__) || defined(__amd64))
42 :
43 : static __inline__ void
44 3032 : _Py_atomic_signal_fence(_Py_memory_order order)
45 : {
46 3032 : if (order != _Py_memory_order_relaxed)
47 3032 : __asm__ volatile("":::"memory");
48 3032 : }
49 :
50 : static __inline__ void
51 0 : _Py_atomic_thread_fence(_Py_memory_order order)
52 : {
53 0 : if (order != _Py_memory_order_relaxed)
54 0 : __asm__ volatile("mfence":::"memory");
55 0 : }
56 :
57 : /* Tell the race checker about this operation's effects. */
58 : static __inline__ void
59 2222894 : _Py_ANNOTATE_MEMORY_ORDER(const volatile void *address, _Py_memory_order order)
60 : {
61 : (void)address; /* shut up -Wunused-parameter */
62 2222894 : switch(order) {
63 : case _Py_memory_order_release:
64 : case _Py_memory_order_acq_rel:
65 : case _Py_memory_order_seq_cst:
66 : _Py_ANNOTATE_HAPPENS_BEFORE(address);
67 1 : break;
68 : case _Py_memory_order_relaxed:
69 : case _Py_memory_order_acquire:
70 2222893 : break;
71 : }
72 2222894 : switch(order) {
73 : case _Py_memory_order_acquire:
74 : case _Py_memory_order_acq_rel:
75 : case _Py_memory_order_seq_cst:
76 : _Py_ANNOTATE_HAPPENS_AFTER(address);
77 3031 : break;
78 : case _Py_memory_order_relaxed:
79 : case _Py_memory_order_release:
80 2219863 : break;
81 : }
82 2222894 : }
83 :
84 : #define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
85 : __extension__ ({ \
86 : __typeof__(ATOMIC_VAL) atomic_val = ATOMIC_VAL; \
87 : __typeof__(atomic_val->_value) new_val = NEW_VAL;\
88 : volatile __typeof__(new_val) *volatile_data = &atomic_val->_value; \
89 : _Py_memory_order order = ORDER; \
90 : _Py_ANNOTATE_MEMORY_ORDER(atomic_val, order); \
91 : \
92 : /* Perform the operation. */ \
93 : _Py_ANNOTATE_IGNORE_WRITES_BEGIN(); \
94 : switch(order) { \
95 : case _Py_memory_order_release: \
96 : _Py_atomic_signal_fence(_Py_memory_order_release); \
97 : /* fallthrough */ \
98 : case _Py_memory_order_relaxed: \
99 : *volatile_data = new_val; \
100 : break; \
101 : \
102 : case _Py_memory_order_acquire: \
103 : case _Py_memory_order_acq_rel: \
104 : case _Py_memory_order_seq_cst: \
105 : __asm__ volatile("xchg %0, %1" \
106 : : "+r"(new_val) \
107 : : "m"(atomic_val->_value) \
108 : : "memory"); \
109 : break; \
110 : } \
111 : _Py_ANNOTATE_IGNORE_WRITES_END(); \
112 : })
113 :
114 : #define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
115 : __extension__ ({ \
116 : __typeof__(ATOMIC_VAL) atomic_val = ATOMIC_VAL; \
117 : __typeof__(atomic_val->_value) result; \
118 : volatile __typeof__(result) *volatile_data = &atomic_val->_value; \
119 : _Py_memory_order order = ORDER; \
120 : _Py_ANNOTATE_MEMORY_ORDER(atomic_val, order); \
121 : \
122 : /* Perform the operation. */ \
123 : _Py_ANNOTATE_IGNORE_READS_BEGIN(); \
124 : switch(order) { \
125 : case _Py_memory_order_release: \
126 : case _Py_memory_order_acq_rel: \
127 : case _Py_memory_order_seq_cst: \
128 : /* Loads on x86 are not releases by default, so need a */ \
129 : /* thread fence. */ \
130 : _Py_atomic_thread_fence(_Py_memory_order_release); \
131 : break; \
132 : default: \
133 : /* No fence */ \
134 : break; \
135 : } \
136 : result = *volatile_data; \
137 : switch(order) { \
138 : case _Py_memory_order_acquire: \
139 : case _Py_memory_order_acq_rel: \
140 : case _Py_memory_order_seq_cst: \
141 : /* Loads on x86 are automatically acquire operations so */ \
142 : /* can get by with just a compiler fence. */ \
143 : _Py_atomic_signal_fence(_Py_memory_order_acquire); \
144 : break; \
145 : default: \
146 : /* No fence */ \
147 : break; \
148 : } \
149 : _Py_ANNOTATE_IGNORE_READS_END(); \
150 : result; \
151 : })
152 :
153 : #else /* !gcc x86 */
154 : /* Fall back to other compilers and processors by assuming that simple
155 : volatile accesses are atomic. This is false, so people should port
156 : this. */
157 : #define _Py_atomic_signal_fence(/*memory_order*/ ORDER) ((void)0)
158 : #define _Py_atomic_thread_fence(/*memory_order*/ ORDER) ((void)0)
159 : #define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
160 : ((ATOMIC_VAL)->_value = NEW_VAL)
161 : #define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
162 : ((ATOMIC_VAL)->_value)
163 :
164 : #endif /* !gcc x86 */
165 :
166 : /* Standardized shortcuts. */
167 : #define _Py_atomic_store(ATOMIC_VAL, NEW_VAL) \
168 : _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, _Py_memory_order_seq_cst)
169 : #define _Py_atomic_load(ATOMIC_VAL) \
170 : _Py_atomic_load_explicit(ATOMIC_VAL, _Py_memory_order_seq_cst)
171 :
172 : /* Python-local extensions */
173 :
174 : #define _Py_atomic_store_relaxed(ATOMIC_VAL, NEW_VAL) \
175 : _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, _Py_memory_order_relaxed)
176 : #define _Py_atomic_load_relaxed(ATOMIC_VAL) \
177 : _Py_atomic_load_explicit(ATOMIC_VAL, _Py_memory_order_relaxed)
178 :
179 : #ifdef __cplusplus
180 : }
181 : #endif
182 :
183 : #endif /* Py_ATOMIC_H */
184 : #endif /* Py_LIMITED_API */
|