HomeSort by: relevance | last modified time | path
    Searched refs:nlocks (Results 1 - 22 of 22) sorted by relevancy

  /src/sys/rump/librump/rumpkern/
klock.c 67 rump_kernel_bigwrap(int *nlocks)
74 *nlocks = giantcnt;
79 rump_kernel_bigunwrap(int nlocks)
84 giantcnt = nlocks;
88 _kernel_lock(int nlocks)
92 while (nlocks) {
94 giantcnt += nlocks;
95 nlocks = 0;
108 nlocks--;
114 _kernel_unlock(int nlocks, int *countp
    [all...]
sleepq.c 62 int nlocks; local
68 if ((nlocks = l->l_blcnt) != 0) {
71 return nlocks;
87 sleepq_block(int timo, bool catch, syncobj_t *syncobj __unused, int nlocks)
106 if (nlocks)
107 KERNEL_LOCK(nlocks, curlwp);
ltsleep.c 123 int rv, nlocks; local
132 rump_kernel_bigwrap(&nlocks);
135 rump_kernel_bigunwrap(nlocks);
scheduler.c 514 int nlocks; local
516 KERNEL_UNLOCK_ALL(l, &nlocks);
519 KERNEL_LOCK(nlocks, l);
lwproc.c 451 int nlocks; local
470 KERNEL_UNLOCK_ALL(NULL, &nlocks);
479 KERNEL_LOCK(nlocks, NULL);
  /src/lib/librumpuser/
rumpuser_int.h 37 rumpkern_unsched(int *nlocks, void *interlock)
40 rumpuser__hyp.hyp_backend_unschedule(0, nlocks, interlock);
44 rumpkern_sched(int nlocks, void *interlock)
47 rumpuser__hyp.hyp_backend_schedule(nlocks, interlock);
52 int nlocks; \
53 rumpkern_unsched(&nlocks, NULL); \
55 rumpkern_sched(nlocks, NULL); \
72 int nlocks; \
73 rumpkern_unsched(&nlocks, NULL); \
75 rumpkern_sched(nlocks, NULL);
    [all...]
rumpuser_component.c 50 int nlocks; local
52 rumpkern_unsched(&nlocks, NULL);
53 return (void *)(intptr_t)nlocks;
59 int nlocks = (int)(intptr_t)cookie; local
61 rumpkern_sched(nlocks, NULL);
rumpuser_file.c 226 int nlocks; local
228 rumpkern_unsched(&nlocks, NULL);
231 rumpkern_sched(nlocks, NULL);
256 int nlocks;
258 rumpkern_unsched(&nlocks, NULL);
264 rumpkern_sched(nlocks, NULL);
293 int nlocks;
295 rumpkern_unsched(&nlocks, NULL);
301 rumpkern_sched(nlocks, NULL);
rumpfiber.c 489 int nlocks; local
491 rumpkern_unsched(&nlocks, NULL);
502 rumpkern_sched(nlocks, NULL);
706 int nlocks; local
709 rumpkern_unsched(&nlocks, NULL);
712 rumpkern_sched(nlocks, NULL);
793 int nlocks; local
805 rumpkern_unsched(&nlocks, NULL);
808 rumpkern_sched(nlocks, NULL);
929 cv_unsched(struct rumpuser_mtx *mtx, int *nlocks)
954 int nlocks; local
978 int nlocks; local
    [all...]
rumpuser_pth.c 519 cv_unschedule(struct rumpuser_mtx *mtx, int *nlocks)
522 rumpkern_unsched(nlocks, mtx);
527 cv_reschedule(struct rumpuser_mtx *mtx, int nlocks)
548 rumpkern_sched(nlocks, mtx);
552 rumpkern_sched(nlocks, mtx);
559 int nlocks; local
562 cv_unschedule(mtx, &nlocks);
564 cv_reschedule(mtx, nlocks);
584 int rv, nlocks; local
596 cv_unschedule(mtx, &nlocks);
    [all...]
rumpuser.c 118 int nlocks; local
121 rumpkern_unsched(&nlocks, NULL);
169 rumpkern_sched(nlocks, NULL);
rumpuser_bio.c 140 int nlocks; local
142 rumpkern_unsched(&nlocks, NULL);
188 rumpkern_sched(nlocks, NULL);
rumpuser_sp.c 787 int rv, nlocks; local
789 rumpkern_unsched(&nlocks, NULL);
799 rumpkern_sched(nlocks, NULL);
827 int nlocks, rv; local
829 rumpkern_unsched(&nlocks, NULL);
831 rumpkern_sched(nlocks, NULL);
861 int nlocks, rv; local
863 rumpkern_unsched(&nlocks, NULL);
881 rumpkern_sched(nlocks, NULL);
889 int rv, nlocks; local
    [all...]
  /src/sys/kern/
kern_lock.c 65 "unsigned"/*nlocks*/);
67 "unsigned"/*nlocks*/);
257 * Acquire 'nlocks' holds on the kernel lock.
265 _kernel_lock(int nlocks)
275 _KERNEL_LOCK_ASSERT(nlocks > 0);
281 SDT_PROBE1(sdt, kernel, lock, entry, nlocks);
282 ci->ci_biglock_count += nlocks;
283 l->l_blcnt += nlocks;
294 SDT_PROBE1(sdt, kernel, lock, entry, nlocks);
295 ci->ci_biglock_count = nlocks;
    [all...]
kern_condvar.c 130 int nlocks; local
138 nlocks = sleepq_enter(sq, l, mp);
142 return nlocks;
177 int nlocks; local
181 nlocks = cv_enter(cv, mtx, l, false);
182 (void)sleepq_block(0, false, &cv_syncobj, nlocks);
198 int error, nlocks; local
202 nlocks = cv_enter(cv, mtx, l, true);
203 error = sleepq_block(0, true, &cv_syncobj, nlocks);
221 int error, nlocks; local
246 int error, nlocks; local
    [all...]
kern_synch.c 191 int nlocks; local
204 nlocks = sleepq_enter(sq, l, mp);
206 return sleepq_block(timo, catch_p, &sleep_syncobj, nlocks);
217 int error, nlocks; local
229 nlocks = sleepq_enter(sq, l, mp);
232 error = sleepq_block(timo, catch_p, &sleep_syncobj, nlocks);
247 int error, nlocks; local
257 nlocks = sleepq_enter(NULL, l, NULL);
259 error = sleepq_block(timo, intr, &kpause_syncobj, nlocks);
292 int nlocks; local
317 int nlocks; local
    [all...]
kern_sleepq.c 233 int nlocks; local
245 if (__predict_false((nlocks = l->l_blcnt) != 0)) {
248 return nlocks;
345 sleepq_block(int timo, bool catch_p, syncobj_t *syncobj, int nlocks)
446 if (__predict_false(nlocks != 0)) {
447 KERNEL_LOCK(nlocks, NULL);
kern_turnstile.c 378 int nlocks; local
421 nlocks = sleepq_enter(sq, l, lock);
433 sleepq_block(0, false, sobj, nlocks);
kern_timeout.c 589 int nlocks; local
623 nlocks = sleepq_enter(&cc->cc_sleepq, l, cc->cc_lock);
626 sleepq_block(0, false, &callout_syncobj, nlocks);
  /src/tests/fs/vfs/
t_vnops.c 955 unsigned int nlocks; local
960 nlocks = fcntl_getlocks(fd[i], 0, sz,
964 ATF_REQUIRE(nlocks < __arraycount(result));
965 result[nlocks] = lock[i];
966 result[nlocks].l_pid = pid[i];
967 nlocks++;
970 ATF_CHECK_EQ(nlocks, __arraycount(expect));
972 qsort(result, nlocks, sizeof(result[0]), &flock_compare);
974 for (j = 0; j < nlocks; j++) {
  /src/tests/kernel/
t_lockf.c 56 #define nlocks 500 /* number of locks per thread */ macro
84 for (i = 0; i < nlocks; i++) {
  /src/sys/uvm/pmap/
pmap.c 2267 const size_t nlocks = PAGE_SIZE / cache_line_size; local
2268 KASSERT((nlocks & (nlocks - 1)) == 0);
2272 for (size_t i = 0; i < nlocks; lock_va += cache_line_size, i++) {
2277 pli->pli_lock_mask = nlocks - 1;

Completed in 44 milliseconds