Lines Matching refs:spc_mutex
557 * On arrival here LWPs on a run queue are locked by spc_mutex which
565 KASSERT(lwp_locked(newl, spc->spc_mutex));
588 * LWPs can appear in the queue due to our hold on spc_mutex, and
590 * the release of spc_mutex becomes globally visible.
620 KASSERT(mutex_owned(curcpu()->ci_schedstate.spc_mutex));
668 lwp_setlock(l, spc->spc_mutex);
709 * Preemption related tasks. Must be done holding spc_mutex. Clear
711 * sched_resched_cpu() which also holds spc_mutex, and only ever
731 if (l->l_mutex == spc->spc_mutex) {
734 * to the run queue (it is now locked by spc_mutex).
739 * Otherwise, drop the spc_mutex, we are done with the
742 mutex_spin_exit(spc->spc_mutex);
867 mutex_spin_exit(spc->spc_mutex);
895 KASSERT(l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex);
951 oldlock = lwp_setlock(l, l->l_cpu->ci_schedstate.spc_mutex);
1066 KASSERT(lwp_locked(l, spc->spc_mutex));
1095 KASSERT(lwp_locked(l, spc->spc_mutex));