HomeSort by: relevance | last modified time | path
    Searched refs:spc_mutex (Results 1 - 8 of 8) sorted by relevancy

  /src/sys/sys/
lwp.h 74 * s: spc_mutex, which may or may not be referenced by l_mutex
411 mutex_spin_enter(ci->ci_schedstate.spc_mutex);
417 mutex_spin_exit(ci->ci_schedstate.spc_mutex);
428 mutex_spin_enter(spc1->spc_mutex);
429 mutex_spin_enter(spc2->spc_mutex);
431 mutex_spin_enter(spc2->spc_mutex);
432 mutex_spin_enter(spc1->spc_mutex);
sched.h 154 * m: spc_mutex
159 struct kmutex *spc_mutex; /* (: lock on below, runnable LWPs */ member in struct:schedstate_percpu
  /src/sys/kern/
kern_synch.c 557 * On arrival here LWPs on a run queue are locked by spc_mutex which
565 KASSERT(lwp_locked(newl, spc->spc_mutex));
588 * LWPs can appear in the queue due to our hold on spc_mutex, and
590 * the release of spc_mutex becomes globally visible.
620 KASSERT(mutex_owned(curcpu()->ci_schedstate.spc_mutex));
668 lwp_setlock(l, spc->spc_mutex);
709 * Preemption related tasks. Must be done holding spc_mutex. Clear
711 * sched_resched_cpu() which also holds spc_mutex, and only ever
731 if (l->l_mutex == spc->spc_mutex) {
734 * to the run queue (it is now locked by spc_mutex)
    [all...]
kern_runq.c 153 if (spc->spc_mutex != NULL) {
165 spc->spc_mutex = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
182 * Put an LWP onto a run queue. The LWP must be locked by spc_mutex for
195 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
267 KASSERT(lwp_locked(l, spc->spc_mutex));
317 * will be for concurrency reasons, spc_mutex will be dropped before return.
328 KASSERT(mutex_owned(spc->spc_mutex));
412 * specified, and ideally it will be for concurrency reasons, spc_mutex will
420 KASSERT(lwp_locked(l, ci->ci_schedstate.spc_mutex));
700 lwp_unlock_to(l, curspc->spc_mutex);
    [all...]
sched_4bsd.c 124 KASSERT(l->l_mutex != spc->spc_mutex);
sched_m2.c 306 KASSERT(l->l_mutex != spc->spc_mutex);
kern_sleepq.c 176 lwp_setlock(l, spc->spc_mutex);
  /src/sys/rump/librump/rumpkern/
scheduler.c 174 ci->ci_schedstate.spc_mutex =
381 l->l_mutex = rcpu->rcpu_ci->ci_schedstate.spc_mutex;
405 KASSERT(l->l_mutex == l->l_cpu->ci_schedstate.spc_mutex);

Completed in 15 milliseconds