Lines Matching refs:mo
36 static bool IsLoadOrder(morder mo) {
37 return mo == mo_relaxed || mo == mo_consume
38 || mo == mo_acquire || mo == mo_seq_cst;
41 static bool IsStoreOrder(morder mo) {
42 return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
46 static bool IsReleaseOrder(morder mo) {
47 return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
50 static bool IsAcquireOrder(morder mo) {
51 return mo == mo_consume || mo == mo_acquire
52 || mo == mo_acq_rel || mo == mo_seq_cst;
55 static bool IsAcqRelOrder(morder mo) {
56 return mo == mo_acq_rel || mo == mo_seq_cst;
198 static memory_order to_mo(morder mo) {
199 switch (mo) {
212 static T NoTsanAtomicLoad(const volatile T *a, morder mo) {
213 return atomic_load(to_atomic(a), to_mo(mo));
217 static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
224 static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) {
225 DCHECK(IsLoadOrder(mo));
228 if (!IsAcquireOrder(mo)) {
231 return NoTsanAtomicLoad(a, mo);
235 T v = NoTsanAtomicLoad(a, mo);
243 v = NoTsanAtomicLoad(a, mo);
250 static void NoTsanAtomicStore(volatile T *a, T v, morder mo) {
251 atomic_store(to_atomic(a), v, to_mo(mo));
255 static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
263 morder mo) {
264 DCHECK(IsStoreOrder(mo));
270 if (!IsReleaseOrder(mo)) {
271 NoTsanAtomicStore(a, v, mo);
279 NoTsanAtomicStore(a, v, mo);
285 static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
287 if (LIKELY(mo == mo_relaxed))
292 RWLock lock(&s->mtx, IsReleaseOrder(mo));
293 if (IsAcqRelOrder(mo))
295 else if (IsReleaseOrder(mo))
297 else if (IsAcquireOrder(mo))
301 if (IsReleaseOrder(mo))
307 static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) {
312 static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) {
317 static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) {
322 static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) {
327 static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) {
332 static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) {
337 static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) {
343 morder mo) {
344 return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
349 morder mo) {
350 return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
355 morder mo) {
356 return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
361 morder mo) {
362 return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
367 morder mo) {
368 return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
373 morder mo) {
374 return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
379 morder mo) {
380 return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
384 static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) {
385 return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo));
390 morder mo, morder fmo) {
401 static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
402 NoTsanAtomicCAS(a, &c, v, mo, fmo);
408 morder mo, morder fmo) {
415 if (LIKELY(mo == mo_relaxed && fmo == mo_relaxed)) {
424 bool release = IsReleaseOrder(mo);
434 mo = fmo;
436 if (success && IsAcqRelOrder(mo))
438 else if (success && IsReleaseOrder(mo))
440 else if (IsAcquireOrder(mo))
450 volatile T *a, T c, T v, morder mo, morder fmo) {
451 AtomicCAS(thr, pc, a, &c, v, mo, fmo);
456 static void NoTsanAtomicFence(morder mo) {
460 static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
471 static morder convert_morder(morder mo) {
485 return (morder)(mo & 0x7fff);
493 mo = convert_morder(mo); \
498 a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
499 ATOMIC_IMPL(Load, a, mo);
503 a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
504 ATOMIC_IMPL(Load, a, mo);
508 a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
509 ATOMIC_IMPL(Load, a, mo);
513 a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
514 ATOMIC_IMPL(Load, a, mo);
519 a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
520 ATOMIC_IMPL(Load, a, mo);
525 void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
526 ATOMIC_IMPL(Store, a, v, mo);
530 void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
531 ATOMIC_IMPL(Store, a, v, mo);
535 void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
536 ATOMIC_IMPL(Store, a, v, mo);
540 void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
541 ATOMIC_IMPL(Store, a, v, mo);
546 void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
547 ATOMIC_IMPL(Store, a, v, mo);
552 a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
553 ATOMIC_IMPL(Exchange, a, v, mo);
557 a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
558 ATOMIC_IMPL(Exchange, a, v, mo);
562 a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
563 ATOMIC_IMPL(Exchange, a, v, mo);
567 a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
568 ATOMIC_IMPL(Exchange, a, v, mo);
573 a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
574 ATOMIC_IMPL(Exchange, a, v, mo);
579 a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
580 ATOMIC_IMPL(FetchAdd, a, v, mo);
584 a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
585 ATOMIC_IMPL(FetchAdd, a, v, mo);
589 a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
590 ATOMIC_IMPL(FetchAdd, a, v, mo);
594 a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
595 mo);
600 a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
601 ATOMIC_IMPL(FetchAdd, a, v, mo);
606 a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
607 ATOMIC_IMPL(FetchSub, a, v, mo);
611 a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
612 ATOMIC_IMPL(FetchSub, a, v, mo);
616 a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
617 ATOMIC_IMPL(FetchSub, a, v, mo);
621 a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
622 ATOMIC_IMPL(FetchSub, a, v, mo);
627 a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
628 ATOMIC_IMPL(FetchSub, a, v, mo);
633 a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
634 ATOMIC_IMPL(FetchAnd, a, v, mo);
638 a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
639 ATOMIC_IMPL(FetchAnd, a, v, mo);
643 a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
644 ATOMIC_IMPL(FetchAnd, a, v, mo);
648 a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
649 ATOMIC_IMPL(FetchAnd, a, v, mo);
654 a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
655 ATOMIC_IMPL(FetchAnd, a, v, mo);
660 a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
661 ATOMIC_IMPL(FetchOr, a, v, mo);
665 a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
666 ATOMIC_IMPL(FetchOr, a, v, mo);
670 a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
671 ATOMIC_IMPL(FetchOr, a, v, mo);
675 a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
676 ATOMIC_IMPL(FetchOr, a, v, mo);
681 a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
682 ATOMIC_IMPL(FetchOr, a, v, mo);
687 a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
688 ATOMIC_IMPL(FetchXor, a, v, mo);
692 a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
693 ATOMIC_IMPL(FetchXor, a, v, mo);
697 a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
698 ATOMIC_IMPL(FetchXor, a, v, mo);
702 a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
703 ATOMIC_IMPL(FetchXor, a, v, mo);
708 a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
709 ATOMIC_IMPL(FetchXor, a, v, mo);
714 a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
715 ATOMIC_IMPL(FetchNand, a, v, mo);
719 a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
720 ATOMIC_IMPL(FetchNand, a, v, mo);
724 a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
725 ATOMIC_IMPL(FetchNand, a, v, mo);
729 a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
730 ATOMIC_IMPL(FetchNand, a, v, mo);
735 a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
736 ATOMIC_IMPL(FetchNand, a, v, mo);
742 morder mo, morder fmo) {
743 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
748 morder mo, morder fmo) {
749 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
754 morder mo, morder fmo) {
755 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
760 morder mo, morder fmo) {
761 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
767 morder mo, morder fmo) {
768 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
774 morder mo, morder fmo) {
775 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
780 morder mo, morder fmo) {
781 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
786 morder mo, morder fmo) {
787 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
792 morder mo, morder fmo) {
793 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
799 morder mo, morder fmo) {
800 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
806 morder mo, morder fmo) {
807 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
812 morder mo, morder fmo) {
813 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
818 morder mo, morder fmo) {
819 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
824 morder mo, morder fmo) {
825 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
831 morder mo, morder fmo) {
832 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
837 void __tsan_atomic_thread_fence(morder mo) { ATOMIC_IMPL(Fence, mo); }
840 void __tsan_atomic_signal_fence(morder mo) {