HomeSort by: relevance | last modified time | path
    Searched refs:interlock (Results 1 - 25 of 41) sorted by relevancy

1 2

  /src/sys/kern/
subr_localcount.c 81 * localcount_drain(lc, cv, interlock)
84 * hold interlock; localcount_drain releases it during cross-calls
85 * and waits on cv. The cv and interlock passed here must be the
98 localcount_drain(struct localcount *lc, kcondvar_t *cv, kmutex_t *interlock)
102 KASSERT(mutex_owned(interlock));
115 mutex_exit(interlock);
116 xc_wait(xc_broadcast(0, &localcount_xc, lc, interlock));
117 mutex_enter(interlock);
129 cv_wait(cv, interlock);
162 kmutex_t *interlock = cookie1 local in function:localcount_xc
    [all...]
  /src/lib/libpthread/
pthread_barrier.c 78 pthread_mutex_t *interlock; local in function:pthread_barrier_wait
96 interlock = pthread__hashlock(barrier);
97 pthread_mutex_lock(interlock);
102 interlock);
103 pthread_mutex_unlock(interlock);
111 (void)pthread__park(self, interlock, &barrier->ptb_waiters,
116 pthread_mutex_lock(interlock);
118 pthread_mutex_unlock(interlock);
pthread_rwlock.c 161 pthread_mutex_t *interlock; local in function:pthread__rwlock_rdlock
204 * Grab the interlock. Once we have that, we
207 interlock = pthread__hashlock(ptr);
208 pthread_mutex_lock(interlock);
216 pthread_mutex_unlock(interlock);
225 error = pthread__park(self, interlock, &ptr->ptr_rblocked,
229 pthread__rwlock_early(self, ptr, interlock);
280 pthread_mutex_t *interlock; local in function:pthread__rwlock_wrlock
326 * Grab the interlock. Once we have that, we
329 interlock = pthread__hashlock(ptr)
447 pthread_mutex_t *interlock; local in function:pthread_rwlock_unlock
    [all...]
  /src/tests/rump/kernspace/
busypage.c 55 mutex_enter(&testpg->interlock);
58 mutex_exit(&testpg->interlock);
88 mutex_enter(&testpg->interlock);
90 cv_wait(&tcv, &testpg->interlock);
91 mutex_exit(&testpg->interlock);
  /src/lib/librumpuser/
rumpuser_int.h 37 rumpkern_unsched(int *nlocks, void *interlock)
40 rumpuser__hyp.hyp_backend_unschedule(0, nlocks, interlock);
44 rumpkern_sched(int nlocks, void *interlock)
47 rumpuser__hyp.hyp_backend_schedule(nlocks, interlock);
  /src/sys/external/bsd/drm2/dist/drm/nouveau/dispnv50/
nouveau_dispnv50_wimmc37b.c 34 wimmc37b_update(struct nv50_wndw *wndw, u32 *interlock)
39 if (interlock[NV50_DISP_INTERLOCK_WNDW] & wndw->interlock.data)
83 wndw->interlock.wimm = wndw->interlock.data;
nouveau_dispnv50_cursc37a.c 31 cursc37a_update(struct nv50_wndw *wndw, u32 *interlock)
wndw.h 21 struct nv50_disp_interlock interlock; member in struct:nv50_wndw
47 void nv50_wndw_flush_set(struct nv50_wndw *, u32 *interlock,
49 void nv50_wndw_flush_clr(struct nv50_wndw *, u32 *interlock, bool flush,
84 void (*update)(struct nv50_wndw *, u32 *interlock);
98 void (*update)(struct nv50_wndw *, u32 *interlock);
nouveau_dispnv50_core507d.c 35 core507d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
44 evo_data(push, interlock[NV50_DISP_INTERLOCK_BASE] |
45 interlock[NV50_DISP_INTERLOCK_OVLY]);
nouveau_dispnv50_corec37d.c 47 corec37d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
57 evo_data(push, interlock[NV50_DISP_INTERLOCK_CURS]);
58 evo_data(push, interlock[NV50_DISP_INTERLOCK_WNDW]);
nouveau_dispnv50_wndwc37e.c 208 wndwc37e_update(struct nv50_wndw *wndw, u32 *interlock)
213 evo_data(push, interlock[NV50_DISP_INTERLOCK_CURS] << 1 |
214 interlock[NV50_DISP_INTERLOCK_CORE]);
215 evo_data(push, interlock[NV50_DISP_INTERLOCK_WNDW]);
217 if (interlock[NV50_DISP_INTERLOCK_WIMM] & wndw->interlock.data)
nouveau_dispnv50_wndw.c 119 nv50_wndw_flush_clr(struct nv50_wndw *wndw, u32 *interlock, bool flush,
131 interlock[wndw->interlock.type] |= wndw->interlock.data;
135 nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 *interlock,
138 if (interlock[NV50_DISP_INTERLOCK_CORE]) {
161 interlock[wndw->interlock.type] |= wndw->interlock.data;
162 interlock[NV50_DISP_INTERLOCK_WIMM] |= wndw->interlock.wimm
    [all...]
core.h 22 void (*update)(struct nv50_core *, u32 *interlock, bool ntfy);
nouveau_dispnv50_disp.c 1902 nv50_disp_atomic_commit_core(struct drm_atomic_state *state, u32 *interlock)
1910 NV_ATOMIC(drm, "commit core %08x\n", interlock[NV50_DISP_INTERLOCK_BASE]);
1921 core->func->update(core, interlock, true);
1936 nv50_disp_atomic_commit_wndw(struct drm_atomic_state *state, u32 *interlock)
1944 if (interlock[wndw->interlock.type] & wndw->interlock.data) {
1946 wndw->func->update(wndw, interlock);
1964 u32 interlock[NV50_DISP_INTERLOCK__SIZE] = {}; local in function:nv50_disp_atomic_commit_tail
1990 interlock[NV50_DISP_INTERLOCK_CORE] |= 1
    [all...]
  /src/sys/external/bsd/drm2/include/linux/
kref.h 103 spinlock_t *interlock)
113 spin_lock(interlock);
119 spin_unlock(interlock);
137 struct mutex *interlock)
147 mutex_lock(interlock);
153 mutex_unlock(interlock);
  /src/sys/rump/librump/rumpkern/
klock.c 151 rump_user_unschedule(int nlocks, int *countp, void *interlock)
159 rump_unschedule_cpu_interlock(curlwp, interlock);
163 rump_user_schedule(int nlocks, void *interlock)
166 rump_schedule_cpu_interlock(curlwp, interlock);
scheduler.c 193 * condvar ops using scheduler lock as the rumpuser interlock.
301 rump_schedule_cpu_interlock(struct lwp *l, void *interlock)
324 if (interlock == rcpu->rcpu_mtx)
341 if (interlock != rcpu->rcpu_mtx)
447 rump_unschedule_cpu_interlock(struct lwp *l, void *interlock)
452 rump_unschedule_cpu1(l, interlock);
456 rump_unschedule_cpu1(struct lwp *l, void *interlock)
474 * If the scheduler interlock was requested by the caller, we
478 * grab the interlock.
480 if (interlock == rcpu->rcpu_mtx
    [all...]
vm.c 186 mutex_init(&pg->interlock, MUTEX_DEFAULT, IPL_NONE);
243 mutex_enter(&pg->interlock);
245 mutex_exit(&pg->interlock);
265 mutex_destroy(&pg->interlock);
458 mutex_enter(&pg->interlock);
466 mutex_enter(&pg1->interlock);
467 mutex_enter(&pg2->interlock);
469 mutex_enter(&pg2->interlock);
470 mutex_enter(&pg1->interlock);
478 mutex_exit(&pg->interlock);
    [all...]
  /src/sys/external/bsd/drm2/include/drm/
drm_wait_netbsd.h 63 DRM_WAITERS_P(drm_waitqueue_t *q, struct mutex *interlock)
65 KASSERT(mutex_is_locked(interlock));
70 DRM_WAKEUP_ONE(drm_waitqueue_t *q, struct mutex *interlock)
72 KASSERT(mutex_is_locked(interlock));
77 DRM_WAKEUP_ALL(drm_waitqueue_t *q, struct mutex *interlock)
79 KASSERT(mutex_is_locked(interlock));
84 DRM_SPIN_WAITERS_P(drm_waitqueue_t *q, spinlock_t *interlock)
86 KASSERT(spin_is_locked(interlock));
91 DRM_SPIN_WAKEUP_ONE(drm_waitqueue_t *q, spinlock_t *interlock)
93 KASSERT(spin_is_locked(interlock));
    [all...]
  /src/sys/uvm/
uvm_pdpolicy_clock.c 264 * acquire interlock to stabilize page identity.
268 mutex_enter(&pg->interlock);
270 mutex_exit(&pg->interlock);
293 mutex_exit(&pg->interlock);
300 mutex_exit(&pg->interlock);
306 mutex_exit(&pg->interlock);
314 * with the page interlock held, we can drop s->lock, which
320 * interlock. with the interlock dropped we can then
323 * object -> pdpol -> interlock
    [all...]
uvm_pdpolicy.h 80 KASSERT(mutex_owned(&pg->interlock));
uvm_anon.c 140 mutex_enter(&pg->interlock);
144 mutex_exit(&pg->interlock);
227 mutex_enter(&pg->interlock);
241 mutex_exit(&pg->interlock);
259 mutex_exit(&pg->interlock);
uvm_page.c 1262 * the page's interlock if we are changing the values.
1265 mutex_enter(&pg->interlock);
1274 mutex_exit(&pg->interlock);
1285 mutex_exit(&pg->interlock);
1288 mutex_enter(&pg->interlock);
1290 mutex_exit(&pg->interlock);
1331 KASSERT(mutex_owned(&oldpg->interlock));
1332 KASSERT(mutex_owned(&newpg->interlock));
1380 mutex_enter(&pg->interlock);
1389 mutex_exit(&pg->interlock);
    [all...]
  /src/sys/external/bsd/drm2/linux/
linux_kthread.c 132 kthread_alloc(int (*func)(void *), void *cookie, spinlock_t *interlock,
150 T->kt_interlock = interlock;
169 spinlock_t *interlock, drm_waitqueue_t *wq)
174 T = kthread_alloc(func, cookie, interlock, wq);
190 /* Lock order: interlock, then kthread lock. */
203 /* Release the interlock while we wait for thread to finish. */
238 /* Lock order: interlock, then kthread lock. */
250 * this point, we are done with the interlock, which we must
  /src/sys/arch/atari/dev/
dma.c 106 int rcaller, kmutex_t *interlock)
135 interlock);

Completed in 41 milliseconds

1 2