Lines Matching full:v
59 template<typename T> T func_xchg(volatile T *v, T op) { in func_xchg() argument
60 T res = __sync_lock_test_and_set(v, op); in func_xchg()
66 template<typename T> T func_add(volatile T *v, T op) { in func_add() argument
67 return __sync_fetch_and_add(v, op); in func_add()
70 template<typename T> T func_sub(volatile T *v, T op) { in func_sub() argument
71 return __sync_fetch_and_sub(v, op); in func_sub()
74 template<typename T> T func_and(volatile T *v, T op) { in func_and() argument
75 return __sync_fetch_and_and(v, op); in func_and()
78 template<typename T> T func_or(volatile T *v, T op) { in func_or() argument
79 return __sync_fetch_and_or(v, op); in func_or()
82 template<typename T> T func_xor(volatile T *v, T op) { in func_xor() argument
83 return __sync_fetch_and_xor(v, op); in func_xor()
86 template<typename T> T func_nand(volatile T *v, T op) { in func_nand() argument
88 T cmp = *v; in func_nand()
91 T cur = __sync_val_compare_and_swap(v, cmp, newv); in func_nand()
98 template<typename T> T func_cas(volatile T *v, T cmp, T xch) { in func_cas() argument
99 return __sync_val_compare_and_swap(v, cmp, xch); in func_cas()
108 a128 func_xchg(volatile a128 *v, a128 op) { in func_xchg() argument
110 a128 cmp = *v; in func_xchg()
111 *v = op; in func_xchg()
115 a128 func_add(volatile a128 *v, a128 op) { in func_add() argument
117 a128 cmp = *v; in func_add()
118 *v = cmp + op; in func_add()
122 a128 func_sub(volatile a128 *v, a128 op) { in func_sub() argument
124 a128 cmp = *v; in func_sub()
125 *v = cmp - op; in func_sub()
129 a128 func_and(volatile a128 *v, a128 op) { in func_and() argument
131 a128 cmp = *v; in func_and()
132 *v = cmp & op; in func_and()
136 a128 func_or(volatile a128 *v, a128 op) { in func_or() argument
138 a128 cmp = *v; in func_or()
139 *v = cmp | op; in func_or()
143 a128 func_xor(volatile a128 *v, a128 op) { in func_xor() argument
145 a128 cmp = *v; in func_xor()
146 *v = cmp ^ op; in func_xor()
150 a128 func_nand(volatile a128 *v, a128 op) { in func_nand() argument
152 a128 cmp = *v; in func_nand()
153 *v = ~(cmp & op); in func_nand()
157 a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) { in func_cas() argument
159 a128 cur = *v; in func_cas()
161 *v = xch; in func_cas()
235 T v = NoTsanAtomicLoad(a, mo); in AtomicLoad() local
243 v = NoTsanAtomicLoad(a, mo); in AtomicLoad()
246 return v; in AtomicLoad()
250 static void NoTsanAtomicStore(volatile T *a, T v, morder mo) { in NoTsanAtomicStore() argument
251 atomic_store(to_atomic(a), v, to_mo(mo)); in NoTsanAtomicStore()
255 static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) { in NoTsanAtomicStore() argument
257 *a = v; in NoTsanAtomicStore()
262 static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v, in AtomicStore() argument
271 NoTsanAtomicStore(a, v, mo); in AtomicStore()
279 NoTsanAtomicStore(a, v, mo); in AtomicStore()
284 template <typename T, T (*F)(volatile T *v, T op)>
285 static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) { in AtomicRMW() argument
288 return F(a, v); in AtomicRMW()
299 v = F(a, v); in AtomicRMW()
303 return v; in AtomicRMW()
307 static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) { in NoTsanAtomicExchange() argument
308 return func_xchg(a, v); in NoTsanAtomicExchange()
312 static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) { in NoTsanAtomicFetchAdd() argument
313 return func_add(a, v); in NoTsanAtomicFetchAdd()
317 static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) { in NoTsanAtomicFetchSub() argument
318 return func_sub(a, v); in NoTsanAtomicFetchSub()
322 static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) { in NoTsanAtomicFetchAnd() argument
323 return func_and(a, v); in NoTsanAtomicFetchAnd()
327 static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) { in NoTsanAtomicFetchOr() argument
328 return func_or(a, v); in NoTsanAtomicFetchOr()
332 static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) { in NoTsanAtomicFetchXor() argument
333 return func_xor(a, v); in NoTsanAtomicFetchXor()
337 static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) { in NoTsanAtomicFetchNand() argument
338 return func_nand(a, v); in NoTsanAtomicFetchNand()
342 static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v, in AtomicExchange() argument
344 return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo); in AtomicExchange()
348 static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v, in AtomicFetchAdd() argument
350 return AtomicRMW<T, func_add>(thr, pc, a, v, mo); in AtomicFetchAdd()
354 static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v, in AtomicFetchSub() argument
356 return AtomicRMW<T, func_sub>(thr, pc, a, v, mo); in AtomicFetchSub()
360 static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v, in AtomicFetchAnd() argument
362 return AtomicRMW<T, func_and>(thr, pc, a, v, mo); in AtomicFetchAnd()
366 static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v, in AtomicFetchOr() argument
368 return AtomicRMW<T, func_or>(thr, pc, a, v, mo); in AtomicFetchOr()
372 static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v, in AtomicFetchXor() argument
374 return AtomicRMW<T, func_xor>(thr, pc, a, v, mo); in AtomicFetchXor()
378 static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v, in AtomicFetchNand() argument
380 return AtomicRMW<T, func_nand>(thr, pc, a, v, mo); in AtomicFetchNand()
384 static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) { in NoTsanAtomicCAS() argument
385 return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo)); in NoTsanAtomicCAS()
389 static bool NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v, in NoTsanAtomicCAS() argument
392 a128 cur = func_cas(a, old, v); in NoTsanAtomicCAS()
401 static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) { in NoTsanAtomicCAS() argument
402 NoTsanAtomicCAS(a, &c, v, mo, fmo); in NoTsanAtomicCAS()
407 static bool AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T *c, T v, in AtomicCAS() argument
417 T pr = func_cas(a, cc, v); in AtomicCAS()
430 T pr = func_cas(a, cc, v); in AtomicCAS()
450 volatile T *a, T c, T v, morder mo, morder fmo) { in AtomicCAS() argument
451 AtomicCAS(thr, pc, a, &c, v, mo, fmo); in AtomicCAS()
525 void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) { in __tsan_atomic8_store() argument
526 ATOMIC_IMPL(Store, a, v, mo); in __tsan_atomic8_store()
530 void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) { in __tsan_atomic16_store() argument
531 ATOMIC_IMPL(Store, a, v, mo); in __tsan_atomic16_store()
535 void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) { in __tsan_atomic32_store() argument
536 ATOMIC_IMPL(Store, a, v, mo); in __tsan_atomic32_store()
540 void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) { in __tsan_atomic64_store() argument
541 ATOMIC_IMPL(Store, a, v, mo); in __tsan_atomic64_store()
546 void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) { in __tsan_atomic128_store() argument
547 ATOMIC_IMPL(Store, a, v, mo); in __tsan_atomic128_store()
552 a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) { in __tsan_atomic8_exchange() argument
553 ATOMIC_IMPL(Exchange, a, v, mo); in __tsan_atomic8_exchange()
557 a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) { in __tsan_atomic16_exchange() argument
558 ATOMIC_IMPL(Exchange, a, v, mo); in __tsan_atomic16_exchange()
562 a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) { in __tsan_atomic32_exchange() argument
563 ATOMIC_IMPL(Exchange, a, v, mo); in __tsan_atomic32_exchange()
567 a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) { in __tsan_atomic64_exchange() argument
568 ATOMIC_IMPL(Exchange, a, v, mo); in __tsan_atomic64_exchange()
573 a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) { in __tsan_atomic128_exchange() argument
574 ATOMIC_IMPL(Exchange, a, v, mo); in __tsan_atomic128_exchange()
579 a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) { in __tsan_atomic8_fetch_add() argument
580 ATOMIC_IMPL(FetchAdd, a, v, mo); in __tsan_atomic8_fetch_add()
584 a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) { in __tsan_atomic16_fetch_add() argument
585 ATOMIC_IMPL(FetchAdd, a, v, mo); in __tsan_atomic16_fetch_add()
589 a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) { in __tsan_atomic32_fetch_add() argument
590 ATOMIC_IMPL(FetchAdd, a, v, mo); in __tsan_atomic32_fetch_add()
594 a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) { in __tsan_atomic64_fetch_add() argument
595 ATOMIC_IMPL(FetchAdd, a, v, mo); in __tsan_atomic64_fetch_add()
600 a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) { in __tsan_atomic128_fetch_add() argument
601 ATOMIC_IMPL(FetchAdd, a, v, mo); in __tsan_atomic128_fetch_add()
606 a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) { in __tsan_atomic8_fetch_sub() argument
607 ATOMIC_IMPL(FetchSub, a, v, mo); in __tsan_atomic8_fetch_sub()
611 a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) { in __tsan_atomic16_fetch_sub() argument
612 ATOMIC_IMPL(FetchSub, a, v, mo); in __tsan_atomic16_fetch_sub()
616 a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) { in __tsan_atomic32_fetch_sub() argument
617 ATOMIC_IMPL(FetchSub, a, v, mo); in __tsan_atomic32_fetch_sub()
621 a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) { in __tsan_atomic64_fetch_sub() argument
622 ATOMIC_IMPL(FetchSub, a, v, mo); in __tsan_atomic64_fetch_sub()
627 a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) { in __tsan_atomic128_fetch_sub() argument
628 ATOMIC_IMPL(FetchSub, a, v, mo); in __tsan_atomic128_fetch_sub()
633 a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) { in __tsan_atomic8_fetch_and() argument
634 ATOMIC_IMPL(FetchAnd, a, v, mo); in __tsan_atomic8_fetch_and()
638 a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) { in __tsan_atomic16_fetch_and() argument
639 ATOMIC_IMPL(FetchAnd, a, v, mo); in __tsan_atomic16_fetch_and()
643 a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) { in __tsan_atomic32_fetch_and() argument
644 ATOMIC_IMPL(FetchAnd, a, v, mo); in __tsan_atomic32_fetch_and()
648 a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) { in __tsan_atomic64_fetch_and() argument
649 ATOMIC_IMPL(FetchAnd, a, v, mo); in __tsan_atomic64_fetch_and()
654 a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) { in __tsan_atomic128_fetch_and() argument
655 ATOMIC_IMPL(FetchAnd, a, v, mo); in __tsan_atomic128_fetch_and()
660 a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) { in __tsan_atomic8_fetch_or() argument
661 ATOMIC_IMPL(FetchOr, a, v, mo); in __tsan_atomic8_fetch_or()
665 a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) { in __tsan_atomic16_fetch_or() argument
666 ATOMIC_IMPL(FetchOr, a, v, mo); in __tsan_atomic16_fetch_or()
670 a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) { in __tsan_atomic32_fetch_or() argument
671 ATOMIC_IMPL(FetchOr, a, v, mo); in __tsan_atomic32_fetch_or()
675 a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) { in __tsan_atomic64_fetch_or() argument
676 ATOMIC_IMPL(FetchOr, a, v, mo); in __tsan_atomic64_fetch_or()
681 a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) { in __tsan_atomic128_fetch_or() argument
682 ATOMIC_IMPL(FetchOr, a, v, mo); in __tsan_atomic128_fetch_or()
687 a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) { in __tsan_atomic8_fetch_xor() argument
688 ATOMIC_IMPL(FetchXor, a, v, mo); in __tsan_atomic8_fetch_xor()
692 a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) { in __tsan_atomic16_fetch_xor() argument
693 ATOMIC_IMPL(FetchXor, a, v, mo); in __tsan_atomic16_fetch_xor()
697 a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) { in __tsan_atomic32_fetch_xor() argument
698 ATOMIC_IMPL(FetchXor, a, v, mo); in __tsan_atomic32_fetch_xor()
702 a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) { in __tsan_atomic64_fetch_xor() argument
703 ATOMIC_IMPL(FetchXor, a, v, mo); in __tsan_atomic64_fetch_xor()
708 a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) { in __tsan_atomic128_fetch_xor() argument
709 ATOMIC_IMPL(FetchXor, a, v, mo); in __tsan_atomic128_fetch_xor()
714 a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) { in __tsan_atomic8_fetch_nand() argument
715 ATOMIC_IMPL(FetchNand, a, v, mo); in __tsan_atomic8_fetch_nand()
719 a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) { in __tsan_atomic16_fetch_nand() argument
720 ATOMIC_IMPL(FetchNand, a, v, mo); in __tsan_atomic16_fetch_nand()
724 a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) { in __tsan_atomic32_fetch_nand() argument
725 ATOMIC_IMPL(FetchNand, a, v, mo); in __tsan_atomic32_fetch_nand()
729 a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) { in __tsan_atomic64_fetch_nand() argument
730 ATOMIC_IMPL(FetchNand, a, v, mo); in __tsan_atomic64_fetch_nand()
735 a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) { in __tsan_atomic128_fetch_nand() argument
736 ATOMIC_IMPL(FetchNand, a, v, mo); in __tsan_atomic128_fetch_nand()
741 int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v, in __tsan_atomic8_compare_exchange_strong() argument
743 ATOMIC_IMPL(CAS, a, c, v, mo, fmo); in __tsan_atomic8_compare_exchange_strong()
747 int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v, in __tsan_atomic16_compare_exchange_strong() argument
749 ATOMIC_IMPL(CAS, a, c, v, mo, fmo); in __tsan_atomic16_compare_exchange_strong()
753 int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v, in __tsan_atomic32_compare_exchange_strong() argument
755 ATOMIC_IMPL(CAS, a, c, v, mo, fmo); in __tsan_atomic32_compare_exchange_strong()
759 int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v, in __tsan_atomic64_compare_exchange_strong() argument
761 ATOMIC_IMPL(CAS, a, c, v, mo, fmo); in __tsan_atomic64_compare_exchange_strong()
766 int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v, in __tsan_atomic128_compare_exchange_strong() argument
768 ATOMIC_IMPL(CAS, a, c, v, mo, fmo); in __tsan_atomic128_compare_exchange_strong()
773 int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v, in __tsan_atomic8_compare_exchange_weak() argument
775 ATOMIC_IMPL(CAS, a, c, v, mo, fmo); in __tsan_atomic8_compare_exchange_weak()
779 int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v, in __tsan_atomic16_compare_exchange_weak() argument
781 ATOMIC_IMPL(CAS, a, c, v, mo, fmo); in __tsan_atomic16_compare_exchange_weak()
785 int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v, in __tsan_atomic32_compare_exchange_weak() argument
787 ATOMIC_IMPL(CAS, a, c, v, mo, fmo); in __tsan_atomic32_compare_exchange_weak()
791 int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v, in __tsan_atomic64_compare_exchange_weak() argument
793 ATOMIC_IMPL(CAS, a, c, v, mo, fmo); in __tsan_atomic64_compare_exchange_weak()
798 int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v, in __tsan_atomic128_compare_exchange_weak() argument
800 ATOMIC_IMPL(CAS, a, c, v, mo, fmo); in __tsan_atomic128_compare_exchange_weak()
805 a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v, in __tsan_atomic8_compare_exchange_val() argument
807 ATOMIC_IMPL(CAS, a, c, v, mo, fmo); in __tsan_atomic8_compare_exchange_val()
811 a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v, in __tsan_atomic16_compare_exchange_val() argument
813 ATOMIC_IMPL(CAS, a, c, v, mo, fmo); in __tsan_atomic16_compare_exchange_val()
817 a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v, in __tsan_atomic32_compare_exchange_val() argument
819 ATOMIC_IMPL(CAS, a, c, v, mo, fmo); in __tsan_atomic32_compare_exchange_val()
823 a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v, in __tsan_atomic64_compare_exchange_val() argument
825 ATOMIC_IMPL(CAS, a, c, v, mo, fmo); in __tsan_atomic64_compare_exchange_val()
830 a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v, in __tsan_atomic128_compare_exchange_val() argument
832 ATOMIC_IMPL(CAS, a, c, v, mo, fmo); in __tsan_atomic128_compare_exchange_val()