30 #ifndef V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
31 #define V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
37 __asm__ __volatile__ (
50 __asm__ __volatile__ (
52 "ldxr %w[prev], %[ptr] \n\t"
53 "cmp %w[prev], %w[old_value] \n\t"
55 "stxr %w[temp], %w[new_value], %[ptr] \n\t"
56 "cbnz %w[temp], 0b \n\t"
62 : [old_value]
"r" (old_value),
63 [new_value]
"r" (new_value)
75 __asm__ __volatile__ (
77 "ldxr %w[result], %[ptr] \n\t"
78 "stxr %w[temp], %w[new_value], %[ptr] \n\t"
79 "cbnz %w[temp], 0b \n\t"
80 : [result]
"=&r" (result),
83 : [new_value]
"r" (new_value)
95 __asm__ __volatile__ (
97 "ldxr %w[result], %[ptr] \n\t"
98 "add %w[result], %w[result], %w[increment]\n\t"
99 "stxr %w[temp], %w[result], %[ptr] \n\t"
100 "cbnz %w[temp], 0b \n\t"
101 : [result]
"=&r" (result),
104 : [increment]
"r" (increment)
126 __asm__ __volatile__ (
128 "ldxr %w[prev], %[ptr] \n\t"
129 "cmp %w[prev], %w[old_value] \n\t"
131 "stxr %w[temp], %w[new_value], %[ptr] \n\t"
132 "cbnz %w[temp], 0b \n\t"
138 : [prev]
"=&r" (prev),
141 : [old_value]
"r" (old_value),
142 [new_value]
"r" (new_value)
157 __asm__ __volatile__ (
159 "ldxr %w[prev], %[ptr] \n\t"
160 "cmp %w[prev], %w[old_value] \n\t"
162 "stxr %w[temp], %w[new_value], %[ptr] \n\t"
163 "cbnz %w[temp], 0b \n\t"
167 : [prev]
"=&r" (prev),
170 : [old_value]
"r" (old_value),
171 [new_value]
"r" (new_value)
212 Atomic64 new_value) {
216 __asm__ __volatile__ (
218 "ldxr %[prev], %[ptr] \n\t"
219 "cmp %[prev], %[old_value] \n\t"
221 "stxr %w[temp], %[new_value], %[ptr] \n\t"
222 "cbnz %w[temp], 0b \n\t"
225 : [prev]
"=&r" (prev),
228 : [old_value]
"r" (old_value),
229 [new_value]
"r" (new_value)
237 Atomic64 new_value) {
241 __asm__ __volatile__ (
243 "ldxr %[result], %[ptr] \n\t"
244 "stxr %w[temp], %[new_value], %[ptr] \n\t"
245 "cbnz %w[temp], 0b \n\t"
246 : [result]
"=&r" (result),
249 : [new_value]
"r" (new_value)
257 Atomic64 increment) {
261 __asm__ __volatile__ (
263 "ldxr %[result], %[ptr] \n\t"
264 "add %[result], %[result], %[increment] \n\t"
265 "stxr %w[temp], %[result], %[ptr] \n\t"
266 "cbnz %w[temp], 0b \n\t"
267 : [result]
"=&r" (result),
270 : [increment]
"r" (increment)
278 Atomic64 increment) {
288 Atomic64 new_value) {
292 __asm__ __volatile__ (
294 "ldxr %[prev], %[ptr] \n\t"
295 "cmp %[prev], %[old_value] \n\t"
297 "stxr %w[temp], %[new_value], %[ptr] \n\t"
298 "cbnz %w[temp], 0b \n\t"
302 : [prev]
"=&r" (prev),
305 : [old_value]
"r" (old_value),
306 [new_value]
"r" (new_value)
315 Atomic64 new_value) {
321 __asm__ __volatile__ (
323 "ldxr %[prev], %[ptr] \n\t"
324 "cmp %[prev], %[old_value] \n\t"
326 "stxr %w[temp], %[new_value], %[ptr] \n\t"
327 "cbnz %w[temp], 0b \n\t"
330 : [prev]
"=&r" (prev),
333 : [old_value]
"r" (old_value),
334 [new_value]
"r" (new_value)
360 Atomic64 value = *ptr;
372 #endif // V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value)
Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr, Atomic32 old_value, Atomic32 new_value)
Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr, Atomic32 old_value, Atomic32 new_value)
void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value)
Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr, Atomic32 new_value)
Atomic32 Release_Load(volatile const Atomic32 *ptr)
Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr, Atomic32 increment)
void Release_Store(volatile Atomic32 *ptr, Atomic32 value)
Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr, Atomic32 increment)
Atomic32 NoBarrier_Load(volatile const Atomic32 *ptr)
Atomic32 Acquire_Load(volatile const Atomic32 *ptr)
Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr, Atomic32 old_value, Atomic32 new_value)