Lines Matching refs:l_mutex
159 * general spin lock pointed to by lwp::l_mutex. The locks covering
191 * l_mutex references the sleep queue lock. If the LWP was
400 l->l_mutex = l->l_cpu->ci_schedstate.spc_lwplock;
837 KASSERT(l2->l_mutex == l2->l_cpu->ci_schedstate.spc_lwplock);
1045 lock = prev->l_mutex;
1545 kmutex_t *cur = l->l_mutex;
1556 kmutex_t *oldmtx = l->l_mutex;
1560 atomic_store_release(&l->l_mutex, mtx);
1575 old = l->l_mutex;
1576 atomic_store_release(&l->l_mutex, mtx);
1586 if (!mutex_tryenter(old = atomic_load_consume(&l->l_mutex)))
1588 if (__predict_true(atomic_load_relaxed(&l->l_mutex) == old))
1598 KASSERT(mutex_owned(l->l_mutex));
1608 kmutex_t *old = atomic_load_consume(&l->l_mutex);
1612 * Re-test l->l_mutex. If it has changed, we need to try again.
1615 while (__predict_false(atomic_load_relaxed(&l->l_mutex) != old)) {
1617 old = atomic_load_consume(&l->l_mutex);
1629 mutex_spin_exit(l->l_mutex);
1636 KASSERT(mutex_owned(l->l_mutex));
1648 KASSERT(mutex_owned(l->l_mutex));
1659 KASSERT(mutex_owned(l->l_mutex));