OpenGrok
Home
Sort by:
relevance
|
last modified time
|
path
Full Search
in project(s):
src
Definition
Symbol
File Path
History
|
|
Help
Searched
refs:l_mutex
(Results
1 - 15
of
15
) sorted by relevancy
/src/sys/rump/librump/rumpkern/
sleepq.c
91
kmutex_t *mp = l->
l_mutex
;
94
l->
l_mutex
= mp; /* keep sleepq lock until woken up */
145
mutex_spin_exit(l->
l_mutex
);
184
KASSERT(mutex_owned(l->
l_mutex
));
186
old = l->
l_mutex
;
187
atomic_store_release(&l->
l_mutex
, new);
194
kmutex_t *old = atomic_load_consume(&l->
l_mutex
);
197
while (__predict_false(atomic_load_relaxed(&l->
l_mutex
) != old)) {
199
old = atomic_load_consume(&l->
l_mutex
);
208
mutex_spin_exit(l->
l_mutex
);
[
all
...]
scheduler.c
381
l->
l_mutex
= rcpu->rcpu_ci->ci_schedstate.spc_mutex;
405
KASSERT(l->
l_mutex
== l->l_cpu->ci_schedstate.spc_mutex);
407
l->
l_mutex
= &unruntime_lock;
428
lwp0.
l_mutex
= &unruntime_lock;
lwproc.c
70
KASSERT(mutex_owned(l->
l_mutex
));
335
l->
l_mutex
= &unruntime_lock;
474
newlwp->
l_mutex
= l->
l_mutex
;
491
l->
l_mutex
= &unruntime_lock;
/src/sys/kern/
kern_turnstile.c
220
LOCKDEBUG_BARRIER(l->
l_mutex
, 1);
249
* Acquire owner->
l_mutex
if we don't have it yet.
250
* Because we already have another LWP lock (l->
l_mutex
) held,
253
dolock = l->
l_mutex
!= atomic_load_relaxed(&owner->
l_mutex
);
296
LOCKDEBUG_BARRIER(owner->
l_mutex
, 1);
299
LOCKDEBUG_BARRIER(l->
l_mutex
, 1);
300
if (cur->
l_mutex
!= atomic_load_relaxed(&l->
l_mutex
)) {
304
LOCKDEBUG_BARRIER(cur->
l_mutex
, 1)
[
all
...]
kern_lwp.c
159
* general spin lock pointed to by lwp::
l_mutex
. The locks covering
191
*
l_mutex
references the sleep queue lock. If the LWP was
400
l->
l_mutex
= l->l_cpu->ci_schedstate.spc_lwplock;
837
KASSERT(l2->
l_mutex
== l2->l_cpu->ci_schedstate.spc_lwplock);
1045
lock = prev->
l_mutex
;
1545
kmutex_t *cur = l->
l_mutex
;
1556
kmutex_t *oldmtx = l->
l_mutex
;
1560
atomic_store_release(&l->
l_mutex
, mtx);
1575
old = l->
l_mutex
;
1576
atomic_store_release(&l->
l_mutex
, mtx)
[
all
...]
kern_idle.c
94
KASSERT(l->
l_mutex
== l->l_cpu->ci_schedstate.spc_lwplock);
kern_condvar.c
493
KASSERT(l->
l_mutex
== mp);
540
KASSERT(l->
l_mutex
== mp);
kern_sleepq.c
464
KASSERT(l->
l_mutex
== mp);
487
kmutex_t *mp = l->
l_mutex
;
kern_synch.c
731
if (l->
l_mutex
== spc->spc_mutex) {
746
LOCKDEBUG_BARRIER(l->
l_mutex
, 1);
834
lock = prevlwp->
l_mutex
;
895
KASSERT(l->
l_mutex
!= l->l_cpu->ci_schedstate.spc_mutex);
sched_4bsd.c
124
KASSERT(l->
l_mutex
!= spc->spc_mutex);
sched_m2.c
102
/* without attaching the primary CPU
l_mutex
does not get initialized */
306
KASSERT(l->
l_mutex
!= spc->spc_mutex);
sys_lwp.c
491
mp = t->
l_mutex
;
sys_select.c
915
if (oflag == SEL_BLOCKING && l->
l_mutex
== lock) {
kern_runq.c
151
lwp0.
l_mutex
= spc->spc_lwplock;
/src/sys/sys/
lwp.h
72
* l: *
l_mutex
74
* s: spc_mutex, which may or may not be referenced by
l_mutex
91
kmutex_t * volatile
l_mutex
; /* l: ptr to mutex on sched state */
member in struct:lwp
Completed in 20 milliseconds
Indexes created Sat Sep 20 22:09:52 GMT 2025