HomeSort by: relevance | last modified time | path
    Searched refs:spc_lwplock (Results 1 - 8 of 8) sorted by relevancy

  /src/sys/kern/
kern_idle.c 55 KASSERT(lwp_locked(l, spc->spc_lwplock));
94 KASSERT(l->l_mutex == l->l_cpu->ci_schedstate.spc_lwplock);
kern_synch.c 297 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
322 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
558 * is currently held. Idle LWPs are always locked by spc_lwplock,
560 * in all cases newl is locked by spc_lwplock.
572 lwp_setlock(newl, spc->spc_lwplock);
665 KASSERT(lwp_locked(l, spc->spc_lwplock));
733 * Drop spc_lwplock, if the current LWP has been moved
736 mutex_spin_exit(spc->spc_lwplock);
908 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
917 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
    [all...]
kern_sleepq.c 148 lwp_setlock(l, spc->spc_lwplock);
159 lwp_setlock(l, spc->spc_lwplock);
kern_kthread.c 116 lwp_unlock_to(l, ci->ci_schedstate.spc_lwplock);
kern_lwp.c 173 * Always covered by spc_lwplock, which protects LWPs not
186 * be spc_lwplock for SOBJ_SLEEPQ_NULL (an "untracked" sleep).
193 * the sleep queue since halted, then the lock is spc_lwplock.
197 * sleepq -> turnstile -> spc_lwplock -> spc_mutex
400 l->l_mutex = l->l_cpu->ci_schedstate.spc_lwplock;
837 KASSERT(l2->l_mutex == l2->l_cpu->ci_schedstate.spc_lwplock);
1295 lwp_unlock_to(l, l->l_cpu->ci_schedstate.spc_lwplock);
1416 lwp_unlock_to(l, tspc->spc_lwplock);
kern_turnstile.c 324 l->l_cpu->ci_schedstate.spc_lwplock);
kern_runq.c 146 if (spc->spc_lwplock == NULL) {
147 spc->spc_lwplock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
151 lwp0.l_mutex = spc->spc_lwplock;
380 * the running LWP lock (spc_lwplock), or a sleep queue lock. That
  /src/sys/sys/
sched.h 160 struct kmutex *spc_lwplock; /* (: general purpose lock for LWPs */ member in struct:schedstate_percpu

Completed in 16 milliseconds