Lines Matching refs:mo
36 static bool IsLoadOrder(morder mo) {
37 return mo == mo_relaxed || mo == mo_consume
38 || mo == mo_acquire || mo == mo_seq_cst;
41 static bool IsStoreOrder(morder mo) {
42 return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
46 static bool IsReleaseOrder(morder mo) {
47 return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
50 static bool IsAcquireOrder(morder mo) {
51 return mo == mo_consume || mo == mo_acquire
52 || mo == mo_acq_rel || mo == mo_seq_cst;
55 static bool IsAcqRelOrder(morder mo) {
56 return mo == mo_acq_rel || mo == mo_seq_cst;
198 static memory_order to_mo(morder mo) {
199 switch (mo) {
212 static T NoTsanAtomicLoad(const volatile T *a, morder mo) {
213 return atomic_load(to_atomic(a), to_mo(mo));
217 static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
224 static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) {
225 DCHECK(IsLoadOrder(mo));
228 if (!IsAcquireOrder(mo)) {
231 return NoTsanAtomicLoad(a, mo);
235 T v = NoTsanAtomicLoad(a, mo);
242 v = NoTsanAtomicLoad(a, mo);
249 static void NoTsanAtomicStore(volatile T *a, T v, morder mo) {
250 atomic_store(to_atomic(a), v, to_mo(mo));
254 static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
262 morder mo) {
263 DCHECK(IsStoreOrder(mo));
269 if (!IsReleaseOrder(mo)) {
270 NoTsanAtomicStore(a, v, mo);
280 NoTsanAtomicStore(a, v, mo);
284 static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
286 if (LIKELY(mo == mo_relaxed))
293 if (IsAcqRelOrder(mo))
295 else if (IsReleaseOrder(mo))
297 else if (IsAcquireOrder(mo))
303 static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) {
308 static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) {
313 static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) {
318 static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) {
323 static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) {
328 static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) {
333 static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) {
339 morder mo) {
340 return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
345 morder mo) {
346 return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
351 morder mo) {
352 return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
357 morder mo) {
358 return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
363 morder mo) {
364 return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
369 morder mo) {
370 return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
375 morder mo) {
376 return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
380 static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) {
381 return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo));
386 morder mo, morder fmo) {
397 static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
398 NoTsanAtomicCAS(a, &c, v, mo, fmo);
404 morder mo, morder fmo) {
411 if (LIKELY(mo == mo_relaxed && fmo == mo_relaxed)) {
420 bool release = IsReleaseOrder(mo);
428 mo = fmo;
434 if (success && IsAcqRelOrder(mo))
436 else if (success && IsReleaseOrder(mo))
438 else if (IsAcquireOrder(mo))
445 volatile T *a, T c, T v, morder mo, morder fmo) {
446 AtomicCAS(thr, pc, a, &c, v, mo, fmo);
451 static void NoTsanAtomicFence(morder mo) {
455 static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
466 static morder convert_morder(morder mo) {
480 return (morder)(mo & 0x7fff);
488 mo = convert_morder(mo); \
493 a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
494 ATOMIC_IMPL(Load, a, mo);
498 a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
499 ATOMIC_IMPL(Load, a, mo);
503 a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
504 ATOMIC_IMPL(Load, a, mo);
508 a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
509 ATOMIC_IMPL(Load, a, mo);
514 a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
515 ATOMIC_IMPL(Load, a, mo);
520 void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
521 ATOMIC_IMPL(Store, a, v, mo);
525 void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
526 ATOMIC_IMPL(Store, a, v, mo);
530 void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
531 ATOMIC_IMPL(Store, a, v, mo);
535 void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
536 ATOMIC_IMPL(Store, a, v, mo);
541 void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
542 ATOMIC_IMPL(Store, a, v, mo);
547 a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
548 ATOMIC_IMPL(Exchange, a, v, mo);
552 a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
553 ATOMIC_IMPL(Exchange, a, v, mo);
557 a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
558 ATOMIC_IMPL(Exchange, a, v, mo);
562 a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
563 ATOMIC_IMPL(Exchange, a, v, mo);
568 a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
569 ATOMIC_IMPL(Exchange, a, v, mo);
574 a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
575 ATOMIC_IMPL(FetchAdd, a, v, mo);
579 a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
580 ATOMIC_IMPL(FetchAdd, a, v, mo);
584 a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
585 mo);
589 a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
590 ATOMIC_IMPL(FetchAdd, a, v, mo);
595 a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
596 ATOMIC_IMPL(FetchAdd, a, v, mo);
601 a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
602 ATOMIC_IMPL(FetchSub, a, v, mo);
606 a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
607 ATOMIC_IMPL(FetchSub, a, v, mo);
611 a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
612 ATOMIC_IMPL(FetchSub, a, v, mo);
616 a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
617 ATOMIC_IMPL(FetchSub, a, v, mo);
622 a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
623 ATOMIC_IMPL(FetchSub, a, v, mo);
628 a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
629 ATOMIC_IMPL(FetchAnd, a, v, mo);
633 a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
634 ATOMIC_IMPL(FetchAnd, a, v, mo);
638 a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
639 ATOMIC_IMPL(FetchAnd, a, v, mo);
643 a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
644 ATOMIC_IMPL(FetchAnd, a, v, mo);
649 a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
650 ATOMIC_IMPL(FetchAnd, a, v, mo);
655 a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
656 ATOMIC_IMPL(FetchOr, a, v, mo);
660 a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
661 ATOMIC_IMPL(FetchOr, a, v, mo);
665 a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
666 ATOMIC_IMPL(FetchOr, a, v, mo);
670 a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
671 ATOMIC_IMPL(FetchOr, a, v, mo);
676 a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
677 ATOMIC_IMPL(FetchOr, a, v, mo);
682 a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
683 ATOMIC_IMPL(FetchXor, a, v, mo);
687 a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
688 ATOMIC_IMPL(FetchXor, a, v, mo);
692 a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
693 ATOMIC_IMPL(FetchXor, a, v, mo);
697 a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
698 ATOMIC_IMPL(FetchXor, a, v, mo);
703 a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
704 ATOMIC_IMPL(FetchXor, a, v, mo);
709 a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
710 ATOMIC_IMPL(FetchNand, a, v, mo);
714 a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
715 ATOMIC_IMPL(FetchNand, a, v, mo);
719 a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
720 ATOMIC_IMPL(FetchNand, a, v, mo);
724 a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
725 ATOMIC_IMPL(FetchNand, a, v, mo);
730 a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
731 ATOMIC_IMPL(FetchNand, a, v, mo);
737 morder mo, morder fmo) {
738 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
743 morder mo, morder fmo) {
744 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
749 morder mo, morder fmo) {
750 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
755 morder mo, morder fmo) {
756 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
762 morder mo, morder fmo) {
763 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
769 morder mo, morder fmo) {
770 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
775 morder mo, morder fmo) {
776 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
781 morder mo, morder fmo) {
782 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
787 morder mo, morder fmo) {
788 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
794 morder mo, morder fmo) {
795 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
801 morder mo, morder fmo) {
802 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
807 morder mo, morder fmo) {
808 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
813 morder mo, morder fmo) {
814 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
819 morder mo, morder fmo) {
820 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
826 morder mo, morder fmo) {
827 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
832 void __tsan_atomic_thread_fence(morder mo) { ATOMIC_IMPL(Fence, mo); }
835 void __tsan_atomic_signal_fence(morder mo) {