v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
atomicops_internals_tsan.h
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 
29 // This file is an internal atomic implementation for compiler-based
30 // ThreadSanitizer. Use base/atomicops.h instead.
31 
32 #ifndef V8_ATOMICOPS_INTERNALS_TSAN_H_
33 #define V8_ATOMICOPS_INTERNALS_TSAN_H_
34 
35 namespace v8 {
36 namespace internal {
37 
38 #ifndef TSAN_INTERFACE_ATOMIC_H
39 #define TSAN_INTERFACE_ATOMIC_H
40 
41 // This struct is not part of the public API of this module; clients may not
42 // use it. (However, it's exported via BASE_EXPORT because clients implicitly
43 // do use it at link time by inlining these functions.)
44 // Features of this x86. Values may not be correct before main() is run,
45 // but are set conservatively.
47  bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence
48  // after acquire compare-and-swap.
49  bool has_sse2; // Processor has SSE2.
50 };
53 
54 #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
55 
56 extern "C" {
57 typedef char __tsan_atomic8;
58 typedef short __tsan_atomic16; // NOLINT
59 typedef int __tsan_atomic32;
60 typedef long __tsan_atomic64; // NOLINT
61 
62 #if defined(__SIZEOF_INT128__) \
63  || (__clang_major__ * 100 + __clang_minor__ >= 302)
64 typedef __int128 __tsan_atomic128;
65 #define __TSAN_HAS_INT128 1
66 #else
67 typedef char __tsan_atomic128;
68 #define __TSAN_HAS_INT128 0
69 #endif
70 
71 typedef enum {
79 
80 __tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8* a,
82 __tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16* a,
84 __tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32* a,
86 __tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64* a,
88 __tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128* a,
90 
91 void __tsan_atomic8_store(volatile __tsan_atomic8* a, __tsan_atomic8 v,
93 void __tsan_atomic16_store(volatile __tsan_atomic16* a, __tsan_atomic16 v,
95 void __tsan_atomic32_store(volatile __tsan_atomic32* a, __tsan_atomic32 v,
97 void __tsan_atomic64_store(volatile __tsan_atomic64* a, __tsan_atomic64 v,
99 void __tsan_atomic128_store(volatile __tsan_atomic128* a, __tsan_atomic128 v,
101 
102 __tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8* a,
103  __tsan_atomic8 v, __tsan_memory_order mo);
104 __tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16* a,
105  __tsan_atomic16 v, __tsan_memory_order mo);
106 __tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32* a,
107  __tsan_atomic32 v, __tsan_memory_order mo);
108 __tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64* a,
109  __tsan_atomic64 v, __tsan_memory_order mo);
110 __tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128* a,
111  __tsan_atomic128 v, __tsan_memory_order mo);
112 
113 __tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8* a,
114  __tsan_atomic8 v, __tsan_memory_order mo);
115 __tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16* a,
116  __tsan_atomic16 v, __tsan_memory_order mo);
117 __tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32* a,
118  __tsan_atomic32 v, __tsan_memory_order mo);
119 __tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64* a,
120  __tsan_atomic64 v, __tsan_memory_order mo);
121 __tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128* a,
122  __tsan_atomic128 v, __tsan_memory_order mo);
123 
124 __tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8* a,
125  __tsan_atomic8 v, __tsan_memory_order mo);
126 __tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16* a,
127  __tsan_atomic16 v, __tsan_memory_order mo);
128 __tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32* a,
129  __tsan_atomic32 v, __tsan_memory_order mo);
130 __tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64* a,
131  __tsan_atomic64 v, __tsan_memory_order mo);
132 __tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128* a,
133  __tsan_atomic128 v, __tsan_memory_order mo);
134 
135 __tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8* a,
136  __tsan_atomic8 v, __tsan_memory_order mo);
137 __tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16* a,
138  __tsan_atomic16 v, __tsan_memory_order mo);
139 __tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32* a,
140  __tsan_atomic32 v, __tsan_memory_order mo);
141 __tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64* a,
142  __tsan_atomic64 v, __tsan_memory_order mo);
143 __tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128* a,
144  __tsan_atomic128 v, __tsan_memory_order mo);
145 
146 __tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8* a,
147  __tsan_atomic8 v, __tsan_memory_order mo);
148 __tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16* a,
149  __tsan_atomic16 v, __tsan_memory_order mo);
150 __tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32* a,
151  __tsan_atomic32 v, __tsan_memory_order mo);
152 __tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64* a,
153  __tsan_atomic64 v, __tsan_memory_order mo);
154 __tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128* a,
155  __tsan_atomic128 v, __tsan_memory_order mo);
156 
157 __tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8* a,
158  __tsan_atomic8 v, __tsan_memory_order mo);
159 __tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16* a,
160  __tsan_atomic16 v, __tsan_memory_order mo);
161 __tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32* a,
162  __tsan_atomic32 v, __tsan_memory_order mo);
163 __tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64* a,
164  __tsan_atomic64 v, __tsan_memory_order mo);
165 __tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128* a,
166  __tsan_atomic128 v, __tsan_memory_order mo);
167 
168 int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8* a,
169  __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo,
170  __tsan_memory_order fail_mo);
171 int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16* a,
172  __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo,
173  __tsan_memory_order fail_mo);
174 int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32* a,
175  __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo,
176  __tsan_memory_order fail_mo);
177 int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64* a,
178  __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo,
179  __tsan_memory_order fail_mo);
180 int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128* a,
181  __tsan_atomic128* c, __tsan_atomic128 v, __tsan_memory_order mo,
182  __tsan_memory_order fail_mo);
183 
184 int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8* a,
185  __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo,
186  __tsan_memory_order fail_mo);
187 int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16* a,
188  __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo,
189  __tsan_memory_order fail_mo);
190 int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32* a,
191  __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo,
192  __tsan_memory_order fail_mo);
193 int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64* a,
194  __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo,
195  __tsan_memory_order fail_mo);
196 int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128* a,
197  __tsan_atomic128* c, __tsan_atomic128 v, __tsan_memory_order mo,
198  __tsan_memory_order fail_mo);
199 
201  volatile __tsan_atomic8* a, __tsan_atomic8 c, __tsan_atomic8 v,
204  volatile __tsan_atomic16* a, __tsan_atomic16 c, __tsan_atomic16 v,
207  volatile __tsan_atomic32* a, __tsan_atomic32 c, __tsan_atomic32 v,
210  volatile __tsan_atomic64* a, __tsan_atomic64 c, __tsan_atomic64 v,
213  volatile __tsan_atomic128* a, __tsan_atomic128 c, __tsan_atomic128 v,
215 
218 } // extern "C"
219 
220 #endif // #ifndef TSAN_INTERFACE_ATOMIC_H
221 
222 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
223  Atomic32 old_value,
224  Atomic32 new_value) {
225  Atomic32 cmp = old_value;
226  __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
228  return cmp;
229 }
230 
231 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
232  Atomic32 new_value) {
233  return __tsan_atomic32_exchange(ptr, new_value,
235 }
236 
238  Atomic32 new_value) {
239  return __tsan_atomic32_exchange(ptr, new_value,
241 }
242 
244  Atomic32 new_value) {
245  return __tsan_atomic32_exchange(ptr, new_value,
247 }
248 
249 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
250  Atomic32 increment) {
251  return increment + __tsan_atomic32_fetch_add(ptr, increment,
253 }
254 
255 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
256  Atomic32 increment) {
257  return increment + __tsan_atomic32_fetch_add(ptr, increment,
259 }
260 
261 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
262  Atomic32 old_value,
263  Atomic32 new_value) {
264  Atomic32 cmp = old_value;
265  __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
267  return cmp;
268 }
269 
270 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
271  Atomic32 old_value,
272  Atomic32 new_value) {
273  Atomic32 cmp = old_value;
274  __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
276  return cmp;
277 }
278 
279 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
281 }
282 
283 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
286 }
287 
288 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
290 }
291 
292 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
294 }
295 
296 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
298 }
299 
300 inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
303 }
304 
305 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
306  Atomic64 old_value,
307  Atomic64 new_value) {
308  Atomic64 cmp = old_value;
309  __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
311  return cmp;
312 }
313 
314 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
315  Atomic64 new_value) {
317 }
318 
319 inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
320  Atomic64 new_value) {
322 }
323 
324 inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
325  Atomic64 new_value) {
327 }
328 
329 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
330  Atomic64 increment) {
331  return increment + __tsan_atomic64_fetch_add(ptr, increment,
333 }
334 
335 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
336  Atomic64 increment) {
337  return increment + __tsan_atomic64_fetch_add(ptr, increment,
339 }
340 
341 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
343 }
344 
345 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
348 }
349 
350 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
352 }
353 
354 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
356 }
357 
358 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
360 }
361 
362 inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
365 }
366 
367 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
368  Atomic64 old_value,
369  Atomic64 new_value) {
370  Atomic64 cmp = old_value;
371  __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
373  return cmp;
374 }
375 
376 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
377  Atomic64 old_value,
378  Atomic64 new_value) {
379  Atomic64 cmp = old_value;
380  __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
382  return cmp;
383 }
384 
385 inline void MemoryBarrier() {
387 }
388 
389 } // namespace internal
390 } // namespace v8
391 
392 #undef ATOMICOPS_COMPILER_BARRIER
393 
394 #endif // V8_ATOMICOPS_INTERNALS_TSAN_H_
void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value)
__tsan_atomic8 __tsan_atomic8_compare_exchange_val(volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32 *a, __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
void __tsan_atomic_signal_fence(__tsan_memory_order mo)
__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo)
__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo)
int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr, Atomic32 new_value)
__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo)
int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16 *a, __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo)
Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr, Atomic32 old_value, Atomic32 new_value)
__tsan_atomic16 __tsan_atomic16_compare_exchange_val(volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr, Atomic32 old_value, Atomic32 new_value)
void __tsan_atomic16_store(volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo)
__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo)
int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128 *a, __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value)
Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr, Atomic32 new_value)
__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo)
int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a, __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo)
__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo)
__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64 *a, __tsan_memory_order mo)
__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo)
__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo)
__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo)
__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo)
__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo)
Atomic32 Release_Load(volatile const Atomic32 *ptr)
Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr, Atomic32 increment)
__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo)
__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo)
__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo)
int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16 *a, __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
void Release_Store(volatile Atomic32 *ptr, Atomic32 value)
void __tsan_atomic128_store(volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo)
__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo)
void __tsan_atomic64_store(volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo)
Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr, Atomic32 increment)
Atomic32 NoBarrier_Load(volatile const Atomic32 *ptr)
__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo)
__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128 *a, __tsan_atomic128 v, __tsan_memory_order mo)
__tsan_atomic64 __tsan_atomic64_compare_exchange_val(volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo)
__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo)
__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo)
struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures
__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128 *a, __tsan_memory_order mo)
__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16 *a, __tsan_memory_order mo)
__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo)
__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo)
__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo)
int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32 *a, __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a, __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
Atomic32 Acquire_Load(volatile const Atomic32 *ptr)
void __tsan_atomic_thread_fence(__tsan_memory_order mo)
int32_t Atomic32
Definition: atomicops.h:66
Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr, Atomic32 new_value)
__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo)
__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8 *a, __tsan_memory_order mo)
void __tsan_atomic32_store(volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo)
__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo)
__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo)
__tsan_atomic128 __tsan_atomic128_compare_exchange_val(volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32 *a, __tsan_memory_order mo)
__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32 *a, __tsan_atomic32 v, __tsan_memory_order mo)
Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr, Atomic32 old_value, Atomic32 new_value)
void __tsan_atomic8_store(volatile __tsan_atomic8 *a, __tsan_atomic8 v, __tsan_memory_order mo)
__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16 *a, __tsan_atomic16 v, __tsan_memory_order mo)
__tsan_atomic32 __tsan_atomic32_compare_exchange_val(volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)
int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128 *a, __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo, __tsan_memory_order fail_mo)