/src/sys/kern/ |
kern_rwlock.c | 285 uintptr_t owner, incr, need_wait, set_wait, curthread, next; local in function:rw_vector_enter 296 curthread = (uintptr_t)l; 299 RW_ASSERT(rw, curthread != 0); 323 incr = curthread | RW_WRITE_LOCKED; 353 if (__predict_false(RW_OWNER(rw) == curthread)) { 410 if (op == RW_READER || (rw->rw_owner & RW_THREAD) == curthread) 426 RW_ASSERT(rw, (op != RW_READER && RW_OWNER(rw) == curthread) || 439 uintptr_t curthread, owner, decr, newown, next; local in function:rw_vector_exit 445 curthread = (uintptr_t)l; 446 RW_ASSERT(rw, curthread != 0) 552 uintptr_t curthread, owner, incr, need_wait, next; local in function:rw_vector_tryenter 694 uintptr_t owner, curthread, newown, next; local in function:rw_tryupgrade [all...] |
kern_mutex.c | 231 MUTEX_ACQUIRE(kmutex_t *mtx, uintptr_t curthread) 235 uintptr_t newown = curthread; 452 uintptr_t owner, curthread; local in function:mutex_vector_enter 515 curthread = (uintptr_t)curlwp; 518 MUTEX_ASSERT(mtx, curthread != 0); 545 if (MUTEX_ACQUIRE(mtx, curthread)) 550 if (__predict_false(MUTEX_OWNER(owner) == curthread)) { 706 MUTEX_DASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread); 719 uintptr_t curthread; local in function:mutex_vector_exit 746 curthread = (uintptr_t)curlwp 875 uintptr_t curthread; local in function:mutex_tryenter [all...] |
kern_runq.c | 118 struct lwp *curthread; variable in typeref:struct:lwp * 1065 curthread = l;
|
/src/sys/arch/hppa/include/ |
mutex.h | 162 MUTEX_ACQUIRE(struct kmutex *mtx, uintptr_t curthread) 166 mtx->mtx_owner = curthread;
|
/src/sys/fs/nfs/client/ |
nfs_clkrpc.c | 153 struct thread *td = curthread;
|
nfs_clnode.c | 100 struct thread *td = curthread; /* XXX */
|
nfs_clbio.c | 92 td = curthread; /* XXX */ 93 cred = curthread->td_ucred; /* XXX */ 238 td = curthread; /* XXX */ 243 cred = crhold(curthread->td_ucred); /* XXX */ 860 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
|
nfs_clvfsops.c | 292 td = curthread; 399 struct thread *td = curthread; 897 td = curthread; 1652 td = curthread; 1729 ncl_fsinfo(nmp, vp, curthread->td_ucred, curthread); 1750 td = curthread;
|
nfs_clvnops.c | 812 struct thread *td = curthread; /* XXX */ 887 struct thread *td = curthread; /* XXX */ 2604 ncl_asyncio(VFSTONFS(ap->a_vp->v_mount), bp, NOCRED, curthread)) 2605 (void) ncl_doio(ap->a_vp, bp, cr, curthread, 1); 2990 struct thread *td = curthread; /* XXX */ 3187 curthread->td_ru.ru_oublock++; 3340 return (ncl_writebp(bp, 1, curthread)); 3392 struct thread *td = curthread;
|
nfs_clport.c | 1064 fibnum = curthread->td_proc->p_fibnum;
|
/src/sys/fs/nfs/common/ |
nfs_lock.c | 164 error = nfslockdans(curthread, &la); 248 td = curthread;
|
nfs_commonkrpc.c | 177 struct thread *td = curthread; 1083 td = curthread; /* XXX */ 1109 td = curthread; /* XXX */ 1127 td = curthread; /* XXX */
|
nfs_diskless.c | 229 CURVNET_SET(TD_TO_VNET(curthread));
|
nfs_commonport.c | 379 struct thread *td = curthread;
|
nfs_commonsubs.c | 2716 NULL, curthread);
|
/src/sys/fs/nfs/nlm/ |
nlm_advlock.c | 177 struct thread *td = curthread; 200 struct thread *td = curthread; 456 struct thread *td = curthread; 493 struct thread *td = curthread;
|
nlm_prot_impl.c | 1533 struct thread *td = curthread; 1596 nlm_auth = authunix_create(curthread->td_ucred); 1808 error = VOP_ACCESS(vs->vs_vp, accmode, cred, curthread); 1815 error = VOP_ACCESS(vs->vs_vp, VWRITE, cred, curthread); 1821 VOP_UNLOCK(vs->vs_vp, 0, curthread);
|
/src/sys/arch/ia64/ia64/ |
interrupt.c | 169 sched_preempt(curthread);
|
/src/sys/fs/nfs/server/ |
nfs_nfsdkrpc.c | 321 struct thread *td = curthread;
|
nfs_nfsdport.c | 3398 nfsrv_throwawayallstate(curthread);
|
/src/sys/net80211/ |
ieee80211_netbsd.c | 712 struct thread *td = curthread;
|
ieee80211_ioctl.c | 2523 error = suser(curthread); 2532 error = suser(curthread);
|
/src/sys/external/bsd/drm/dist/bsd-core/ |
drmP.h | 213 #define DRM_CURPROC curthread 229 #define DRM_CURRENTPID curthread->td_proc->p_pid
|
/src/sys/external/bsd/ipf/netinet/ |
ip_nat.c | 996 if (securelevel_ge(curthread->td_ucred, 3) && (mode & FWRITE))
|