v8  3.14.5(node0.10.28)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
atomicops.h
Go to the documentation of this file.
1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 // The routines exported by this module are subtle. If you use them, even if
29 // you get the code right, it will depend on careful reasoning about atomicity
30 // and memory ordering; it will be less readable, and harder to maintain. If
31 // you plan to use these routines, you should have a good reason, such as solid
32 // evidence that performance would otherwise suffer, or there being no
33 // alternative. You should assume only properties explicitly guaranteed by the
34 // specifications in this file. You are almost certainly _not_ writing code
35 // just for the x86; if you assume x86 semantics, x86 hardware bugs and
36 // implementations on other archtectures will cause your code to break. If you
37 // do not know what you are doing, avoid these routines, and use a Mutex.
38 //
39 // It is incorrect to make direct assignments to/from an atomic variable.
40 // You should use one of the Load or Store routines. The NoBarrier
41 // versions are provided when no barriers are needed:
42 // NoBarrier_Store()
43 // NoBarrier_Load()
44 // Although there are currently no compiler enforcement, you are encouraged
45 // to use these.
46 //
47 
48 #ifndef V8_ATOMICOPS_H_
49 #define V8_ATOMICOPS_H_
50 
51 #include "../include/v8.h"
52 #include "globals.h"
53 
54 namespace v8 {
55 namespace internal {
56 
57 typedef int32_t Atomic32;
58 #ifdef V8_HOST_ARCH_64_BIT
59 // We need to be able to go between Atomic64 and AtomicWord implicitly. This
60 // means Atomic64 and AtomicWord should be the same type on 64-bit.
61 #if defined(__APPLE__)
62 // MacOS is an exception to the implicit conversion rule above,
63 // because it uses long for intptr_t.
64 typedef int64_t Atomic64;
65 #else
66 typedef intptr_t Atomic64;
67 #endif
68 #endif
69 
70 // Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or
71 // Atomic64 routines below, depending on your architecture.
72 #if defined(__OpenBSD__) && defined(__i386__)
73 typedef Atomic32 AtomicWord;
74 #else
75 typedef intptr_t AtomicWord;
76 #endif
77 
78 // Atomically execute:
79 // result = *ptr;
80 // if (*ptr == old_value)
81 // *ptr = new_value;
82 // return result;
83 //
84 // I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
85 // Always return the old value of "*ptr"
86 //
87 // This routine implies no memory barriers.
89  Atomic32 old_value,
90  Atomic32 new_value);
91 
92 // Atomically store new_value into *ptr, returning the previous value held in
93 // *ptr. This routine implies no memory barriers.
94 Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
95 
96 // Atomically increment *ptr by "increment". Returns the new value of
97 // *ptr with the increment applied. This routine implies no memory barriers.
98 Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment);
99 
101  Atomic32 increment);
102 
103 // These following lower-level operations are typically useful only to people
104 // implementing higher-level synchronization operations like spinlocks,
105 // mutexes, and condition-variables. They combine CompareAndSwap(), a load, or
106 // a store with appropriate memory-ordering instructions. "Acquire" operations
107 // ensure that no later memory access can be reordered ahead of the operation.
108 // "Release" operations ensure that no previous memory access can be reordered
109 // after the operation. "Barrier" operations have both "Acquire" and "Release"
110 // semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
111 // access.
113  Atomic32 old_value,
114  Atomic32 new_value);
116  Atomic32 old_value,
117  Atomic32 new_value);
118 
119 void MemoryBarrier();
120 void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value);
121 void Acquire_Store(volatile Atomic32* ptr, Atomic32 value);
122 void Release_Store(volatile Atomic32* ptr, Atomic32 value);
123 
124 Atomic32 NoBarrier_Load(volatile const Atomic32* ptr);
125 Atomic32 Acquire_Load(volatile const Atomic32* ptr);
126 Atomic32 Release_Load(volatile const Atomic32* ptr);
127 
128 // 64-bit atomic operations (only available on 64-bit processors).
129 #ifdef V8_HOST_ARCH_64_BIT
130 Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
131  Atomic64 old_value,
132  Atomic64 new_value);
133 Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
134 Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
135 Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
136 
137 Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
138  Atomic64 old_value,
139  Atomic64 new_value);
140 Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
141  Atomic64 old_value,
142  Atomic64 new_value);
143 void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value);
144 void Acquire_Store(volatile Atomic64* ptr, Atomic64 value);
145 void Release_Store(volatile Atomic64* ptr, Atomic64 value);
146 Atomic64 NoBarrier_Load(volatile const Atomic64* ptr);
147 Atomic64 Acquire_Load(volatile const Atomic64* ptr);
148 Atomic64 Release_Load(volatile const Atomic64* ptr);
149 #endif // V8_HOST_ARCH_64_BIT
150 
151 } } // namespace v8::internal
152 
153 // Include our platform specific implementation.
154 #if defined(_MSC_VER) && \
155  (defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64))
157 #elif defined(__APPLE__) && \
158  (defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64))
160 #elif defined(__GNUC__) && \
161  (defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64))
163 #elif defined(__GNUC__) && defined(V8_HOST_ARCH_ARM)
165 #elif defined(__GNUC__) && defined(V8_HOST_ARCH_MIPS)
167 #else
168 #error "Atomic operations are not supported on your platform"
169 #endif
170 
171 #endif // V8_ATOMICOPS_H_
void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value)
int int32_t
Definition: unicode.cc:47
Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr, Atomic32 old_value, Atomic32 new_value)
Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr, Atomic32 old_value, Atomic32 new_value)
void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value)
Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr, Atomic32 new_value)
intptr_t AtomicWord
Definition: atomicops.h:75
Atomic32 Release_Load(volatile const Atomic32 *ptr)
Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr, Atomic32 increment)
void Release_Store(volatile Atomic32 *ptr, Atomic32 value)
Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr, Atomic32 increment)
Atomic32 NoBarrier_Load(volatile const Atomic32 *ptr)
Atomic32 Acquire_Load(volatile const Atomic32 *ptr)
int32_t Atomic32
Definition: atomicops.h:57
Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr, Atomic32 old_value, Atomic32 new_value)