kern_synch.c revision 1.177.2.3 1 /* $NetBSD: kern_synch.c,v 1.177.2.3 2007/02/17 11:13:51 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*-
41 * Copyright (c) 1982, 1986, 1990, 1991, 1993
42 * The Regents of the University of California. All rights reserved.
43 * (c) UNIX System Laboratories, Inc.
44 * All or some portions of this file are derived from material licensed
45 * to the University of California by American Telephone and Telegraph
46 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
47 * the permission of UNIX System Laboratories, Inc.
48 *
49 * Redistribution and use in source and binary forms, with or without
50 * modification, are permitted provided that the following conditions
51 * are met:
52 * 1. Redistributions of source code must retain the above copyright
53 * notice, this list of conditions and the following disclaimer.
54 * 2. Redistributions in binary form must reproduce the above copyright
55 * notice, this list of conditions and the following disclaimer in the
56 * documentation and/or other materials provided with the distribution.
57 * 3. Neither the name of the University nor the names of its contributors
58 * may be used to endorse or promote products derived from this software
59 * without specific prior written permission.
60 *
61 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
62 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
63 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
64 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
65 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
66 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
67 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
68 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
69 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
70 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
71 * SUCH DAMAGE.
72 *
73 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
74 */
75
76 #include <sys/cdefs.h>
77 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.177.2.3 2007/02/17 11:13:51 yamt Exp $");
78
79 #include "opt_ddb.h"
80 #include "opt_kstack.h"
81 #include "opt_lockdebug.h"
82 #include "opt_multiprocessor.h"
83 #include "opt_perfctrs.h"
84
85 #define __MUTEX_PRIVATE
86
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/callout.h>
90 #include <sys/proc.h>
91 #include <sys/kernel.h>
92 #include <sys/buf.h>
93 #if defined(PERFCTRS)
94 #include <sys/pmc.h>
95 #endif
96 #include <sys/signalvar.h>
97 #include <sys/resourcevar.h>
98 #include <sys/sched.h>
99 #include <sys/kauth.h>
100 #include <sys/sleepq.h>
101 #include <sys/lockdebug.h>
102
103 #include <uvm/uvm_extern.h>
104
105 #include <machine/cpu.h>
106
107 int lbolt; /* once a second sleep address */
108 int rrticks; /* number of hardclock ticks per roundrobin() */
109
110 /*
111 * The global scheduler state.
112 */
113 kmutex_t sched_mutex; /* global sched state mutex */
114 struct prochd sched_qs[RUNQUE_NQS]; /* run queues */
115 volatile uint32_t sched_whichqs; /* bitmap of non-empty queues */
116
117 void schedcpu(void *);
118 void updatepri(struct lwp *);
119
120 void sched_unsleep(struct lwp *);
121 void sched_changepri(struct lwp *, int);
122
123 struct callout schedcpu_ch = CALLOUT_INITIALIZER_SETFUNC(schedcpu, NULL);
124 static unsigned int schedcpu_ticks;
125
126 syncobj_t sleep_syncobj = {
127 SOBJ_SLEEPQ_SORTED,
128 sleepq_unsleep,
129 sleepq_changepri
130 };
131
132 syncobj_t sched_syncobj = {
133 SOBJ_SLEEPQ_SORTED,
134 sched_unsleep,
135 sched_changepri
136 };
137
138 /*
139 * Force switch among equal priority processes every 100ms.
140 * Called from hardclock every hz/10 == rrticks hardclock ticks.
141 */
142 /* ARGSUSED */
143 void
144 roundrobin(struct cpu_info *ci)
145 {
146 struct schedstate_percpu *spc = &ci->ci_schedstate;
147
148 spc->spc_rrticks = rrticks;
149
150 if (!CURCPU_IDLE_P()) {
151 if (spc->spc_flags & SPCF_SEENRR) {
152 /*
153 * The process has already been through a roundrobin
154 * without switching and may be hogging the CPU.
155 * Indicate that the process should yield.
156 */
157 spc->spc_flags |= SPCF_SHOULDYIELD;
158 } else
159 spc->spc_flags |= SPCF_SEENRR;
160 }
161 cpu_need_resched(curcpu());
162 }
163
164 #define PPQ (128 / RUNQUE_NQS) /* priorities per queue */
165 #define NICE_WEIGHT 2 /* priorities per nice level */
166
167 #define ESTCPU_SHIFT 11
168 #define ESTCPU_MAX ((NICE_WEIGHT * PRIO_MAX - PPQ) << ESTCPU_SHIFT)
169 #define ESTCPULIM(e) min((e), ESTCPU_MAX)
170
171 /*
172 * Constants for digital decay and forget:
173 * 90% of (p_estcpu) usage in 5 * loadav time
174 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive)
175 * Note that, as ps(1) mentions, this can let percentages
176 * total over 100% (I've seen 137.9% for 3 processes).
177 *
178 * Note that hardclock updates p_estcpu and p_cpticks independently.
179 *
180 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
181 * That is, the system wants to compute a value of decay such
182 * that the following for loop:
183 * for (i = 0; i < (5 * loadavg); i++)
184 * p_estcpu *= decay;
185 * will compute
186 * p_estcpu *= 0.1;
187 * for all values of loadavg:
188 *
189 * Mathematically this loop can be expressed by saying:
190 * decay ** (5 * loadavg) ~= .1
191 *
192 * The system computes decay as:
193 * decay = (2 * loadavg) / (2 * loadavg + 1)
194 *
195 * We wish to prove that the system's computation of decay
196 * will always fulfill the equation:
197 * decay ** (5 * loadavg) ~= .1
198 *
199 * If we compute b as:
200 * b = 2 * loadavg
201 * then
202 * decay = b / (b + 1)
203 *
204 * We now need to prove two things:
205 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
206 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
207 *
208 * Facts:
209 * For x close to zero, exp(x) =~ 1 + x, since
210 * exp(x) = 0! + x**1/1! + x**2/2! + ... .
211 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
212 * For x close to zero, ln(1+x) =~ x, since
213 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1
214 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
215 * ln(.1) =~ -2.30
216 *
217 * Proof of (1):
218 * Solve (factor)**(power) =~ .1 given power (5*loadav):
219 * solving for factor,
220 * ln(factor) =~ (-2.30/5*loadav), or
221 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
222 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED
223 *
224 * Proof of (2):
225 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
226 * solving for power,
227 * power*ln(b/(b+1)) =~ -2.30, or
228 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED
229 *
230 * Actual power values for the implemented algorithm are as follows:
231 * loadav: 1 2 3 4
232 * power: 5.68 10.32 14.94 19.55
233 */
234
235 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
236 #define loadfactor(loadav) (2 * (loadav))
237
238 static fixpt_t
239 decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
240 {
241
242 if (estcpu == 0) {
243 return 0;
244 }
245
246 #if !defined(_LP64)
247 /* avoid 64bit arithmetics. */
248 #define FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
249 if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
250 return estcpu * loadfac / (loadfac + FSCALE);
251 }
252 #endif /* !defined(_LP64) */
253
254 return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
255 }
256
257 /*
258 * For all load averages >= 1 and max p_estcpu of (255 << ESTCPU_SHIFT),
259 * sleeping for at least seven times the loadfactor will decay p_estcpu to
260 * less than (1 << ESTCPU_SHIFT).
261 *
262 * note that our ESTCPU_MAX is actually much smaller than (255 << ESTCPU_SHIFT).
263 */
264 static fixpt_t
265 decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n)
266 {
267
268 if ((n << FSHIFT) >= 7 * loadfac) {
269 return 0;
270 }
271
272 while (estcpu != 0 && n > 1) {
273 estcpu = decay_cpu(loadfac, estcpu);
274 n--;
275 }
276
277 return estcpu;
278 }
279
280 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
281 fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
282
283 /*
284 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
285 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
286 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
287 *
288 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
289 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
290 *
291 * If you dont want to bother with the faster/more-accurate formula, you
292 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
293 * (more general) method of calculating the %age of CPU used by a process.
294 */
295 #define CCPU_SHIFT 11
296
297 /*
298 * schedcpu:
299 *
300 * Recompute process priorities, every hz ticks.
301 *
302 * XXXSMP This needs to be reorganised in order to reduce the locking
303 * burden.
304 */
305 /* ARGSUSED */
306 void
307 schedcpu(void *arg)
308 {
309 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
310 struct rlimit *rlim;
311 struct lwp *l;
312 struct proc *p;
313 int minslp, clkhz, sig;
314 long runtm;
315
316 schedcpu_ticks++;
317
318 mutex_enter(&proclist_mutex);
319 PROCLIST_FOREACH(p, &allproc) {
320 /*
321 * Increment time in/out of memory and sleep time (if
322 * sleeping). We ignore overflow; with 16-bit int's
323 * (remember them?) overflow takes 45 days.
324 */
325 minslp = 2;
326 mutex_enter(&p->p_smutex);
327 runtm = p->p_rtime.tv_sec;
328 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
329 if ((l->l_flag & L_IDLE) != 0)
330 continue;
331 lwp_lock(l);
332 runtm += l->l_rtime.tv_sec;
333 l->l_swtime++;
334 if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
335 l->l_stat == LSSUSPENDED) {
336 l->l_slptime++;
337 minslp = min(minslp, l->l_slptime);
338 } else
339 minslp = 0;
340 lwp_unlock(l);
341 }
342 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
343
344 /*
345 * Check if the process exceeds its CPU resource allocation.
346 * If over max, kill it.
347 */
348 rlim = &p->p_rlimit[RLIMIT_CPU];
349 sig = 0;
350 if (runtm >= rlim->rlim_cur) {
351 if (runtm >= rlim->rlim_max)
352 sig = SIGKILL;
353 else {
354 sig = SIGXCPU;
355 if (rlim->rlim_cur < rlim->rlim_max)
356 rlim->rlim_cur += 5;
357 }
358 }
359
360 /*
361 * If the process has run for more than autonicetime, reduce
362 * priority to give others a chance.
363 */
364 if (autonicetime && runtm > autonicetime && p->p_nice == NZERO
365 && kauth_cred_geteuid(p->p_cred)) {
366 mutex_spin_enter(&p->p_stmutex);
367 p->p_nice = autoniceval + NZERO;
368 resetprocpriority(p);
369 mutex_spin_exit(&p->p_stmutex);
370 }
371
372 /*
373 * If the process has slept the entire second,
374 * stop recalculating its priority until it wakes up.
375 */
376 if (minslp <= 1) {
377 /*
378 * p_pctcpu is only for ps.
379 */
380 mutex_spin_enter(&p->p_stmutex);
381 clkhz = stathz != 0 ? stathz : hz;
382 #if (FSHIFT >= CCPU_SHIFT)
383 p->p_pctcpu += (clkhz == 100)?
384 ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
385 100 * (((fixpt_t) p->p_cpticks)
386 << (FSHIFT - CCPU_SHIFT)) / clkhz;
387 #else
388 p->p_pctcpu += ((FSCALE - ccpu) *
389 (p->p_cpticks * FSCALE / clkhz)) >> FSHIFT;
390 #endif
391 p->p_cpticks = 0;
392 p->p_estcpu = decay_cpu(loadfac, p->p_estcpu);
393
394 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
395 if ((l->l_flag & L_IDLE) != 0)
396 continue;
397 lwp_lock(l);
398 if (l->l_slptime <= 1 &&
399 l->l_priority >= PUSER)
400 resetpriority(l);
401 lwp_unlock(l);
402 }
403 mutex_spin_exit(&p->p_stmutex);
404 }
405
406 mutex_exit(&p->p_smutex);
407 if (sig) {
408 psignal(p, sig);
409 }
410 }
411 mutex_exit(&proclist_mutex);
412 uvm_meter();
413 wakeup((caddr_t)&lbolt);
414 callout_schedule(&schedcpu_ch, hz);
415 }
416
417 /*
418 * Recalculate the priority of a process after it has slept for a while.
419 */
420 void
421 updatepri(struct lwp *l)
422 {
423 struct proc *p = l->l_proc;
424 fixpt_t loadfac;
425
426 LOCK_ASSERT(lwp_locked(l, NULL));
427 KASSERT(l->l_slptime > 1);
428
429 loadfac = loadfactor(averunnable.ldavg[0]);
430
431 l->l_slptime--; /* the first time was done in schedcpu */
432 /* XXX NJWLWP */
433 /* XXXSMP occasionally unlocked, should be per-LWP */
434 p->p_estcpu = decay_cpu_batch(loadfac, p->p_estcpu, l->l_slptime);
435 resetpriority(l);
436 }
437
438 /*
439 * During autoconfiguration or after a panic, a sleep will simply lower the
440 * priority briefly to allow interrupts, then return. The priority to be
441 * used (safepri) is machine-dependent, thus this value is initialized and
442 * maintained in the machine-dependent layers. This priority will typically
443 * be 0, or the lowest priority that is safe for use on the interrupt stack;
444 * it can be made higher to block network software interrupts after panics.
445 */
446 int safepri;
447
448 /*
449 * OBSOLETE INTERFACE
450 *
451 * General sleep call. Suspends the current process until a wakeup is
452 * performed on the specified identifier. The process will then be made
453 * runnable with the specified priority. Sleeps at most timo/hz seconds (0
454 * means no timeout). If pri includes PCATCH flag, signals are checked
455 * before and after sleeping, else signals are not checked. Returns 0 if
456 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
457 * signal needs to be delivered, ERESTART is returned if the current system
458 * call should be restarted if possible, and EINTR is returned if the system
459 * call should be interrupted by the signal (return EINTR).
460 *
461 * The interlock is held until we are on a sleep queue. The interlock will
462 * be locked before returning back to the caller unless the PNORELOCK flag
463 * is specified, in which case the interlock will always be unlocked upon
464 * return.
465 */
466 int
467 ltsleep(wchan_t ident, int priority, const char *wmesg, int timo,
468 volatile struct simplelock *interlock)
469 {
470 struct lwp *l = curlwp;
471 sleepq_t *sq;
472 int error, catch;
473
474 if (sleepq_dontsleep(l)) {
475 (void)sleepq_abort(NULL, 0);
476 if ((priority & PNORELOCK) != 0)
477 simple_unlock(interlock);
478 return 0;
479 }
480
481 sq = sleeptab_lookup(&sleeptab, ident);
482 sleepq_enter(sq, l);
483
484 if (interlock != NULL) {
485 LOCK_ASSERT(simple_lock_held(interlock));
486 simple_unlock(interlock);
487 }
488
489 catch = priority & PCATCH;
490 sleepq_block(sq, priority & PRIMASK, ident, wmesg, timo, catch,
491 &sleep_syncobj);
492 error = sleepq_unblock(timo, catch);
493
494 if (interlock != NULL && (priority & PNORELOCK) == 0)
495 simple_lock(interlock);
496
497 return error;
498 }
499
500 /*
501 * General sleep call for situations where a wake-up is not expected.
502 */
503 int
504 kpause(const char *wmesg, boolean_t intr, int timo, kmutex_t *mtx)
505 {
506 struct lwp *l = curlwp;
507 sleepq_t *sq;
508 int error;
509
510 if (sleepq_dontsleep(l))
511 return sleepq_abort(NULL, 0);
512
513 if (mtx != NULL)
514 mutex_exit(mtx);
515 sq = sleeptab_lookup(&sleeptab, l);
516 sleepq_enter(sq, l);
517 sleepq_block(sq, sched_kpri(l), l, wmesg, timo, intr, &sleep_syncobj);
518 error = sleepq_unblock(timo, intr);
519 if (mtx != NULL)
520 mutex_enter(mtx);
521
522 return error;
523 }
524
525 /*
526 * OBSOLETE INTERFACE
527 *
528 * Make all processes sleeping on the specified identifier runnable.
529 */
530 void
531 wakeup(wchan_t ident)
532 {
533 sleepq_t *sq;
534
535 if (cold)
536 return;
537
538 sq = sleeptab_lookup(&sleeptab, ident);
539 sleepq_wake(sq, ident, (u_int)-1);
540 }
541
542 /*
543 * OBSOLETE INTERFACE
544 *
545 * Make the highest priority process first in line on the specified
546 * identifier runnable.
547 */
548 void
549 wakeup_one(wchan_t ident)
550 {
551 sleepq_t *sq;
552
553 if (cold)
554 return;
555
556 sq = sleeptab_lookup(&sleeptab, ident);
557 sleepq_wake(sq, ident, 1);
558 }
559
560
561 /*
562 * General yield call. Puts the current process back on its run queue and
563 * performs a voluntary context switch. Should only be called when the
564 * current process explicitly requests it (eg sched_yield(2) in compat code).
565 */
566 void
567 yield(void)
568 {
569 struct lwp *l = curlwp;
570
571 KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
572 lwp_lock(l);
573 if (l->l_stat == LSONPROC) {
574 KASSERT(lwp_locked(l, &sched_mutex));
575 l->l_priority = l->l_usrpri;
576 }
577 l->l_nvcsw++;
578 mi_switch(l, NULL);
579 KERNEL_LOCK(l->l_biglocks, l);
580 }
581
582 /*
583 * General preemption call. Puts the current process back on its run queue
584 * and performs an involuntary context switch.
585 */
586 void
587 preempt(void)
588 {
589 struct lwp *l = curlwp;
590
591 KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
592 lwp_lock(l);
593 if (l->l_stat == LSONPROC) {
594 KASSERT(lwp_locked(l, &sched_mutex));
595 l->l_priority = l->l_usrpri;
596 }
597 l->l_nivcsw++;
598 (void)mi_switch(l, NULL);
599 KERNEL_LOCK(l->l_biglocks, l);
600 }
601
602 /*
603 * sched_switch_unlock: update 'curlwp' and release old lwp.
604 */
605
606 void
607 sched_switch_unlock(struct lwp *old, struct lwp *new)
608 {
609
610 KASSERT(old == NULL || old == curlwp);
611
612 if (old != NULL) {
613 LOCKDEBUG_BARRIER(&old->l_mutex, 1);
614 } else {
615 LOCKDEBUG_BARRIER(NULL, 1);
616 }
617
618 curlwp = new;
619 if (old != NULL) {
620 lwp_unlock(old);
621 }
622 spl0();
623 }
624
625 /*
626 * Compute the amount of time during which the current lwp was running.
627 *
628 * - update l_rtime unless it's an idle lwp.
629 * - update spc_runtime for the next lwp.
630 */
631
632 static inline void
633 updatertime(struct lwp *l, struct schedstate_percpu *spc)
634 {
635 struct timeval tv;
636 long s, u;
637
638 if ((l->l_flag & L_IDLE) != 0) {
639 microtime(&spc->spc_runtime);
640 return;
641 }
642
643 microtime(&tv);
644 u = l->l_rtime.tv_usec + (tv.tv_usec - spc->spc_runtime.tv_usec);
645 s = l->l_rtime.tv_sec + (tv.tv_sec - spc->spc_runtime.tv_sec);
646 if (u < 0) {
647 u += 1000000;
648 s--;
649 } else if (u >= 1000000) {
650 u -= 1000000;
651 s++;
652 }
653 l->l_rtime.tv_usec = u;
654 l->l_rtime.tv_sec = s;
655
656 spc->spc_runtime = tv;
657 }
658
659 /*
660 * The machine independent parts of context switch. Switch to "new"
661 * if non-NULL, otherwise let cpu_switch choose the next lwp.
662 *
663 * Returns 1 if another process was actually run.
664 */
665 int
666 mi_switch(struct lwp *l, struct lwp *newl)
667 {
668 struct schedstate_percpu *spc;
669 int retval, oldspl;
670
671 LOCK_ASSERT(lwp_locked(l, NULL));
672
673 #ifdef LOCKDEBUG
674 spinlock_switchcheck();
675 simple_lock_switchcheck();
676 #endif
677 #ifdef KSTACK_CHECK_MAGIC
678 kstack_check_magic(l);
679 #endif
680
681 /*
682 * It's safe to read the per CPU schedstate unlocked here, as all we
683 * are after is the run time and that's guarenteed to have been last
684 * updated by this CPU.
685 */
686 KDASSERT(l->l_cpu == curcpu());
687 spc = &l->l_cpu->ci_schedstate;
688
689 /*
690 * XXXSMP If we are using h/w performance counters, save context.
691 */
692 #if PERFCTRS
693 if (PMC_ENABLED(l->l_proc)) {
694 pmc_save_context(l->l_proc);
695 }
696 #endif
697
698 /*
699 * If on the CPU and we have gotten this far, then we must yield.
700 */
701 KASSERT(l->l_stat != LSRUN);
702 if (l->l_stat == LSONPROC) {
703 KASSERT(lwp_locked(l, &sched_mutex));
704 l->l_stat = LSRUN;
705 if ((l->l_flag & L_IDLE) == 0) {
706 setrunqueue(l);
707 }
708 }
709 uvmexp.swtch++;
710
711 /*
712 * Process is about to yield the CPU; clear the appropriate
713 * scheduling flags.
714 */
715 spc->spc_flags &= ~SPCF_SWITCHCLEAR;
716
717 LOCKDEBUG_BARRIER(l->l_mutex, 1);
718
719 /*
720 * Switch to the new LWP if necessary.
721 * When we run again, we'll return back here.
722 */
723 oldspl = MUTEX_SPIN_OLDSPL(l->l_cpu);
724
725 /*
726 * Acquire the sched_mutex if necessary.
727 */
728 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
729 if (l->l_mutex != &sched_mutex) {
730 mutex_enter(&sched_mutex);
731 }
732 #endif
733
734 if (newl == NULL) {
735 newl = nextrunqueue();
736 }
737 if (newl != NULL) {
738 KASSERT(lwp_locked(newl, &sched_mutex));
739 remrunqueue(newl);
740 } else {
741 newl = l->l_cpu->ci_data.cpu_idlelwp;
742 KASSERT(newl != NULL);
743 }
744
745 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
746 if (l->l_mutex != &sched_mutex) {
747 mutex_exit(&sched_mutex);
748 }
749 #endif
750
751 newl->l_stat = LSONPROC;
752 updatertime(l, spc);
753 if (l != newl) {
754 struct lwp *prevlwp;
755
756 uvmexp.swtch++;
757 pmap_deactivate(l);
758 newl->l_cpu = l->l_cpu;
759 prevlwp = cpu_switchto(l, newl);
760 sched_switch_unlock(prevlwp, l);
761 pmap_activate(l);
762 retval = 1;
763 } else {
764 sched_switch_unlock(l, l);
765 retval = 0;
766 }
767
768 KASSERT(l == curlwp);
769 KASSERT(l->l_stat == LSONPROC);
770
771 /*
772 * XXXSMP If we are using h/w performance counters, restore context.
773 */
774 #if PERFCTRS
775 if (PMC_ENABLED(l->l_proc)) {
776 pmc_restore_context(l->l_proc);
777 }
778 #endif
779
780 /*
781 * We're running again; record our new start time. We might
782 * be running on a new CPU now, so don't use the cached
783 * schedstate_percpu pointer.
784 */
785 KDASSERT(l->l_cpu == curcpu());
786
787 (void)splsched();
788 splx(oldspl);
789 return retval;
790 }
791
792 /*
793 * Initialize the (doubly-linked) run queues
794 * to be empty.
795 */
796 void
797 rqinit()
798 {
799 int i;
800
801 for (i = 0; i < RUNQUE_NQS; i++)
802 sched_qs[i].ph_link = sched_qs[i].ph_rlink =
803 (struct lwp *)&sched_qs[i];
804
805 mutex_init(&sched_mutex, MUTEX_SPIN, IPL_SCHED);
806 }
807
808 static inline void
809 resched_lwp(struct lwp *l, u_char pri)
810 {
811 struct cpu_info *ci;
812
813 /*
814 * XXXSMP
815 * Since l->l_cpu persists across a context switch,
816 * this gives us *very weak* processor affinity, in
817 * that we notify the CPU on which the process last
818 * ran that it should try to switch.
819 *
820 * This does not guarantee that the process will run on
821 * that processor next, because another processor might
822 * grab it the next time it performs a context switch.
823 *
824 * This also does not handle the case where its last
825 * CPU is running a higher-priority process, but every
826 * other CPU is running a lower-priority process. There
827 * are ways to handle this situation, but they're not
828 * currently very pretty, and we also need to weigh the
829 * cost of moving a process from one CPU to another.
830 *
831 * XXXSMP
832 * There is also the issue of locking the other CPU's
833 * sched state, which we currently do not do.
834 */
835 ci = (l->l_cpu != NULL) ? l->l_cpu : curcpu();
836 if (pri < ci->ci_schedstate.spc_curpriority)
837 cpu_need_resched(ci);
838 }
839
840 /*
841 * Change process state to be runnable, placing it on the run queue if it is
842 * in memory, and awakening the swapper if it isn't in memory.
843 *
844 * Call with the process and LWP locked. Will return with the LWP unlocked.
845 */
846 void
847 setrunnable(struct lwp *l)
848 {
849 struct proc *p = l->l_proc;
850 sigset_t *ss;
851
852 KASSERT((l->l_flag & L_IDLE) == 0);
853 LOCK_ASSERT(mutex_owned(&p->p_smutex));
854 LOCK_ASSERT(lwp_locked(l, NULL));
855
856 switch (l->l_stat) {
857 case LSSTOP:
858 /*
859 * If we're being traced (possibly because someone attached us
860 * while we were stopped), check for a signal from the debugger.
861 */
862 if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0) {
863 if ((sigprop[p->p_xstat] & SA_TOLWP) != 0)
864 ss = &l->l_sigpend.sp_set;
865 else
866 ss = &p->p_sigpend.sp_set;
867 sigaddset(ss, p->p_xstat);
868 signotify(l);
869 }
870 p->p_nrlwps++;
871 break;
872 case LSSUSPENDED:
873 l->l_flag &= ~L_WSUSPEND;
874 p->p_nrlwps++;
875 break;
876 case LSSLEEP:
877 KASSERT(l->l_wchan != NULL);
878 break;
879 default:
880 panic("setrunnable: lwp %p state was %d", l, l->l_stat);
881 }
882
883 /*
884 * If the LWP was sleeping interruptably, then it's OK to start it
885 * again. If not, mark it as still sleeping.
886 */
887 if (l->l_wchan != NULL) {
888 l->l_stat = LSSLEEP;
889 if ((l->l_flag & L_SINTR) != 0)
890 lwp_unsleep(l);
891 else {
892 lwp_unlock(l);
893 #ifdef DIAGNOSTIC
894 panic("setrunnable: !L_SINTR");
895 #endif
896 }
897 return;
898 }
899
900 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
901
902 /*
903 * If the LWP is still on the CPU, mark it as LSONPROC. It may be
904 * about to call mi_switch(), in which case it will yield.
905 *
906 * XXXSMP Will need to change for preemption.
907 */
908 #ifdef MULTIPROCESSOR
909 if (l->l_cpu->ci_curlwp == l) {
910 #else
911 if (l == curlwp) {
912 #endif
913 l->l_stat = LSONPROC;
914 l->l_slptime = 0;
915 lwp_unlock(l);
916 return;
917 }
918
919 /*
920 * Set the LWP runnable. If it's swapped out, we need to wake the swapper
921 * to bring it back in. Otherwise, enter it into a run queue.
922 */
923 if (l->l_slptime > 1)
924 updatepri(l);
925 l->l_stat = LSRUN;
926 l->l_slptime = 0;
927
928 if (l->l_flag & L_INMEM) {
929 setrunqueue(l);
930 resched_lwp(l, l->l_priority);
931 lwp_unlock(l);
932 } else {
933 lwp_unlock(l);
934 uvm_kick_scheduler();
935 }
936 }
937
938 boolean_t
939 sched_curcpu_runnable_p(void)
940 {
941
942 return sched_whichqs != 0;
943 }
944
945 /*
946 * Compute the priority of a process when running in user mode.
947 * Arrange to reschedule if the resulting priority is better
948 * than that of the current process.
949 */
950 void
951 resetpriority(struct lwp *l)
952 {
953 unsigned int newpriority;
954 struct proc *p = l->l_proc;
955
956 /* XXXSMP LOCK_ASSERT(mutex_owned(&p->p_stmutex)); */
957 LOCK_ASSERT(lwp_locked(l, NULL));
958
959 if ((l->l_flag & L_SYSTEM) != 0)
960 return;
961
962 newpriority = PUSER + (p->p_estcpu >> ESTCPU_SHIFT) +
963 NICE_WEIGHT * (p->p_nice - NZERO);
964 newpriority = min(newpriority, MAXPRI);
965 lwp_changepri(l, newpriority);
966 }
967
968 /*
969 * Recompute priority for all LWPs in a process.
970 */
971 void
972 resetprocpriority(struct proc *p)
973 {
974 struct lwp *l;
975
976 LOCK_ASSERT(mutex_owned(&p->p_stmutex));
977
978 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
979 lwp_lock(l);
980 resetpriority(l);
981 lwp_unlock(l);
982 }
983 }
984
985 /*
986 * We adjust the priority of the current process. The priority of a process
987 * gets worse as it accumulates CPU time. The CPU usage estimator (p_estcpu)
988 * is increased here. The formula for computing priorities (in kern_synch.c)
989 * will compute a different value each time p_estcpu increases. This can
990 * cause a switch, but unless the priority crosses a PPQ boundary the actual
991 * queue will not change. The CPU usage estimator ramps up quite quickly
992 * when the process is running (linearly), and decays away exponentially, at
993 * a rate which is proportionally slower when the system is busy. The basic
994 * principle is that the system will 90% forget that the process used a lot
995 * of CPU time in 5 * loadav seconds. This causes the system to favor
996 * processes which haven't run much recently, and to round-robin among other
997 * processes.
998 */
999
1000 void
1001 schedclock(struct lwp *l)
1002 {
1003 struct proc *p = l->l_proc;
1004
1005 KASSERT(!CURCPU_IDLE_P());
1006 mutex_spin_enter(&p->p_stmutex);
1007 p->p_estcpu = ESTCPULIM(p->p_estcpu + (1 << ESTCPU_SHIFT));
1008 lwp_lock(l);
1009 resetpriority(l);
1010 mutex_spin_exit(&p->p_stmutex);
1011 if ((l->l_flag & L_SYSTEM) == 0 && l->l_priority >= PUSER)
1012 l->l_priority = l->l_usrpri;
1013 lwp_unlock(l);
1014 }
1015
1016 /*
1017 * suspendsched:
1018 *
1019 * Convert all non-L_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
1020 */
1021 void
1022 suspendsched(void)
1023 {
1024 #ifdef MULTIPROCESSOR
1025 CPU_INFO_ITERATOR cii;
1026 struct cpu_info *ci;
1027 #endif
1028 struct lwp *l;
1029 struct proc *p;
1030
1031 /*
1032 * We do this by process in order not to violate the locking rules.
1033 */
1034 mutex_enter(&proclist_mutex);
1035 PROCLIST_FOREACH(p, &allproc) {
1036 mutex_enter(&p->p_smutex);
1037
1038 if ((p->p_flag & P_SYSTEM) != 0) {
1039 mutex_exit(&p->p_smutex);
1040 continue;
1041 }
1042
1043 p->p_stat = SSTOP;
1044
1045 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1046 if (l == curlwp)
1047 continue;
1048
1049 lwp_lock(l);
1050
1051 /*
1052 * Set L_WREBOOT so that the LWP will suspend itself
1053 * when it tries to return to user mode. We want to
1054 * try and get to get as many LWPs as possible to
1055 * the user / kernel boundary, so that they will
1056 * release any locks that they hold.
1057 */
1058 l->l_flag |= (L_WREBOOT | L_WSUSPEND);
1059
1060 if (l->l_stat == LSSLEEP &&
1061 (l->l_flag & L_SINTR) != 0) {
1062 /* setrunnable() will release the lock. */
1063 setrunnable(l);
1064 continue;
1065 }
1066
1067 lwp_unlock(l);
1068 }
1069
1070 mutex_exit(&p->p_smutex);
1071 }
1072 mutex_exit(&proclist_mutex);
1073
1074 /*
1075 * Kick all CPUs to make them preempt any LWPs running in user mode.
1076 * They'll trap into the kernel and suspend themselves in userret().
1077 */
1078 sched_lock(0);
1079 #ifdef MULTIPROCESSOR
1080 for (CPU_INFO_FOREACH(cii, ci))
1081 cpu_need_resched(ci);
1082 #else
1083 cpu_need_resched(curcpu());
1084 #endif
1085 sched_unlock(0);
1086 }
1087
1088 /*
1089 * scheduler_fork_hook:
1090 *
1091 * Inherit the parent's scheduler history.
1092 */
1093 void
1094 scheduler_fork_hook(struct proc *parent, struct proc *child)
1095 {
1096
1097 LOCK_ASSERT(mutex_owned(&parent->p_smutex));
1098
1099 child->p_estcpu = child->p_estcpu_inherited = parent->p_estcpu;
1100 child->p_forktime = schedcpu_ticks;
1101 }
1102
1103 /*
1104 * scheduler_wait_hook:
1105 *
1106 * Chargeback parents for the sins of their children.
1107 */
1108 void
1109 scheduler_wait_hook(struct proc *parent, struct proc *child)
1110 {
1111 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
1112 fixpt_t estcpu;
1113
1114 /* XXX Only if parent != init?? */
1115
1116 mutex_spin_enter(&parent->p_stmutex);
1117 estcpu = decay_cpu_batch(loadfac, child->p_estcpu_inherited,
1118 schedcpu_ticks - child->p_forktime);
1119 if (child->p_estcpu > estcpu)
1120 parent->p_estcpu =
1121 ESTCPULIM(parent->p_estcpu + child->p_estcpu - estcpu);
1122 mutex_spin_exit(&parent->p_stmutex);
1123 }
1124
1125 /*
1126 * sched_kpri:
1127 *
1128 * Scale a priority level to a kernel priority level, usually
1129 * for an LWP that is about to sleep.
1130 */
1131 int
1132 sched_kpri(struct lwp *l)
1133 {
1134 /*
1135 * Scale user priorities (127 -> 50) up to kernel priorities
1136 * in the range (49 -> 8). Reserve the top 8 kernel priorities
1137 * for high priority kthreads. Kernel priorities passed in
1138 * are left "as is". XXX This is somewhat arbitrary.
1139 */
1140 static const uint8_t kpri_tab[] = {
1141 0, 1, 2, 3, 4, 5, 6, 7,
1142 8, 9, 10, 11, 12, 13, 14, 15,
1143 16, 17, 18, 19, 20, 21, 22, 23,
1144 24, 25, 26, 27, 28, 29, 30, 31,
1145 32, 33, 34, 35, 36, 37, 38, 39,
1146 40, 41, 42, 43, 44, 45, 46, 47,
1147 48, 49, 8, 8, 9, 9, 10, 10,
1148 11, 11, 12, 12, 13, 14, 14, 15,
1149 15, 16, 16, 17, 17, 18, 18, 19,
1150 20, 20, 21, 21, 22, 22, 23, 23,
1151 24, 24, 25, 26, 26, 27, 27, 28,
1152 28, 29, 29, 30, 30, 31, 32, 32,
1153 33, 33, 34, 34, 35, 35, 36, 36,
1154 37, 38, 38, 39, 39, 40, 40, 41,
1155 41, 42, 42, 43, 44, 44, 45, 45,
1156 46, 46, 47, 47, 48, 48, 49, 49,
1157 };
1158
1159 return kpri_tab[l->l_usrpri];
1160 }
1161
1162 /*
1163 * sched_unsleep:
1164 *
1165 * The is called when the LWP has not been awoken normally but instead
1166 * interrupted: for example, if the sleep timed out. Because of this,
1167 * it's not a valid action for running or idle LWPs.
1168 */
1169 void
1170 sched_unsleep(struct lwp *l)
1171 {
1172
1173 lwp_unlock(l);
1174 panic("sched_unsleep");
1175 }
1176
1177 /*
1178 * sched_changepri:
1179 *
1180 * Adjust the priority of an LWP.
1181 */
1182 void
1183 sched_changepri(struct lwp *l, int pri)
1184 {
1185
1186 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
1187
1188 l->l_usrpri = pri;
1189
1190 if (l->l_priority < PUSER)
1191 return;
1192 if (l->l_stat != LSRUN || (l->l_flag & L_INMEM) == 0 ||
1193 (l->l_priority / PPQ) == (pri / PPQ)) {
1194 l->l_priority = pri;
1195 return;
1196 }
1197
1198 remrunqueue(l);
1199 l->l_priority = pri;
1200 setrunqueue(l);
1201 resched_lwp(l, pri);
1202 }
1203
1204 /*
1205 * On some architectures, it's faster to use a MSB ordering for the priorites
1206 * than the traditional LSB ordering.
1207 */
1208 #ifdef __HAVE_BIGENDIAN_BITOPS
1209 #define RQMASK(n) (0x80000000 >> (n))
1210 #else
1211 #define RQMASK(n) (0x00000001 << (n))
1212 #endif
1213
1214 /*
1215 * Low-level routines to access the run queue. Optimised assembler
1216 * routines can override these.
1217 */
1218
1219 #ifndef __HAVE_MD_RUNQUEUE
1220
1221 /*
1222 * The primitives that manipulate the run queues. whichqs tells which
1223 * of the 32 queues qs have processes in them. Setrunqueue puts processes
1224 * into queues, remrunqueue removes them from queues. The running process is
1225 * on no queue, other processes are on a queue related to p->p_priority,
1226 * divided by 4 actually to shrink the 0-127 range of priorities into the 32
1227 * available queues.
1228 */
1229 #ifdef RQDEBUG
1230 static void
1231 checkrunqueue(int whichq, struct lwp *l)
1232 {
1233 const struct prochd * const rq = &sched_qs[whichq];
1234 struct lwp *l2;
1235 int found = 0;
1236 int die = 0;
1237 int empty = 1;
1238 for (l2 = rq->ph_link; l2 != (const void*) rq; l2 = l2->l_forw) {
1239 if (l2->l_stat != LSRUN) {
1240 printf("checkrunqueue[%d]: lwp %p state (%d) "
1241 " != LSRUN\n", whichq, l2, l2->l_stat);
1242 }
1243 if (l2->l_back->l_forw != l2) {
1244 printf("checkrunqueue[%d]: lwp %p back-qptr (%p) "
1245 "corrupt %p\n", whichq, l2, l2->l_back,
1246 l2->l_back->l_forw);
1247 die = 1;
1248 }
1249 if (l2->l_forw->l_back != l2) {
1250 printf("checkrunqueue[%d]: lwp %p forw-qptr (%p) "
1251 "corrupt %p\n", whichq, l2, l2->l_forw,
1252 l2->l_forw->l_back);
1253 die = 1;
1254 }
1255 if (l2 == l)
1256 found = 1;
1257 empty = 0;
1258 }
1259 if (empty && (sched_whichqs & RQMASK(whichq)) != 0) {
1260 printf("checkrunqueue[%d]: bit set for empty run-queue %p\n",
1261 whichq, rq);
1262 die = 1;
1263 } else if (!empty && (sched_whichqs & RQMASK(whichq)) == 0) {
1264 printf("checkrunqueue[%d]: bit clear for non-empty "
1265 "run-queue %p\n", whichq, rq);
1266 die = 1;
1267 }
1268 if (l != NULL && (sched_whichqs & RQMASK(whichq)) == 0) {
1269 printf("checkrunqueue[%d]: bit clear for active lwp %p\n",
1270 whichq, l);
1271 die = 1;
1272 }
1273 if (l != NULL && empty) {
1274 printf("checkrunqueue[%d]: empty run-queue %p with "
1275 "active lwp %p\n", whichq, rq, l);
1276 die = 1;
1277 }
1278 if (l != NULL && !found) {
1279 printf("checkrunqueue[%d]: lwp %p not in runqueue %p!",
1280 whichq, l, rq);
1281 die = 1;
1282 }
1283 if (die)
1284 panic("checkrunqueue: inconsistency found");
1285 }
1286 #endif /* RQDEBUG */
1287
1288 void
1289 setrunqueue(struct lwp *l)
1290 {
1291 struct prochd *rq;
1292 struct lwp *prev;
1293 const int whichq = l->l_priority / PPQ;
1294
1295 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
1296
1297 #ifdef RQDEBUG
1298 checkrunqueue(whichq, NULL);
1299 #endif
1300 #ifdef DIAGNOSTIC
1301 if (l->l_back != NULL || l->l_stat != LSRUN)
1302 panic("setrunqueue");
1303 #endif
1304 sched_whichqs |= RQMASK(whichq);
1305 rq = &sched_qs[whichq];
1306 prev = rq->ph_rlink;
1307 l->l_forw = (struct lwp *)rq;
1308 rq->ph_rlink = l;
1309 prev->l_forw = l;
1310 l->l_back = prev;
1311 #ifdef RQDEBUG
1312 checkrunqueue(whichq, l);
1313 #endif
1314 }
1315
1316 /*
1317 * XXXSMP When LWP dispatch (cpu_switch()) is changed to use remrunqueue(),
1318 * drop of the effective priority level from kernel to user needs to be
1319 * moved here from userret(). The assignment in userret() is currently
1320 * done unlocked.
1321 */
1322 void
1323 remrunqueue(struct lwp *l)
1324 {
1325 struct lwp *prev, *next;
1326 const int whichq = l->l_priority / PPQ;
1327
1328 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
1329
1330 #ifdef RQDEBUG
1331 checkrunqueue(whichq, l);
1332 #endif
1333
1334 #if defined(DIAGNOSTIC)
1335 if (((sched_whichqs & RQMASK(whichq)) == 0) || l->l_back == NULL) {
1336 /* Shouldn't happen - interrupts disabled. */
1337 panic("remrunqueue: bit %d not set", whichq);
1338 }
1339 #endif
1340 prev = l->l_back;
1341 l->l_back = NULL;
1342 next = l->l_forw;
1343 prev->l_forw = next;
1344 next->l_back = prev;
1345 if (prev == next)
1346 sched_whichqs &= ~RQMASK(whichq);
1347 #ifdef RQDEBUG
1348 checkrunqueue(whichq, NULL);
1349 #endif
1350 }
1351
1352 struct lwp *
1353 nextrunqueue(void)
1354 {
1355 const struct prochd *rq;
1356 struct lwp *l;
1357 int whichq;
1358
1359 if (sched_whichqs == 0) {
1360 return NULL;
1361 }
1362 #ifdef __HAVE_BIGENDIAN_BITOPS
1363 for (whichq = 0; ; whichq++) {
1364 if ((sched_whichqs & RQMASK(whichq)) != 0) {
1365 break;
1366 }
1367 }
1368 #else
1369 whichq = ffs(sched_whichqs) - 1;
1370 #endif
1371 rq = &sched_qs[whichq];
1372 l = rq->ph_link;
1373 return l;
1374 }
1375
1376 #endif /* !defined(__HAVE_MD_RUNQUEUE) */
1377
1378 #if defined(DDB)
1379 void
1380 sched_print_runqueue(void (*pr)(const char *, ...))
1381 {
1382 struct prochd *ph;
1383 struct lwp *l;
1384 int i, first;
1385
1386 for (i = 0; i < RUNQUE_NQS; i++)
1387 {
1388 first = 1;
1389 ph = &sched_qs[i];
1390 for (l = ph->ph_link; l != (void *)ph; l = l->l_forw) {
1391 if (first) {
1392 (*pr)("%c%d",
1393 (sched_whichqs & RQMASK(i))
1394 ? ' ' : '!', i);
1395 first = 0;
1396 }
1397 (*pr)("\t%d.%d (%s) pri=%d usrpri=%d\n",
1398 l->l_proc->p_pid,
1399 l->l_lid, l->l_proc->p_comm,
1400 (int)l->l_priority, (int)l->l_usrpri);
1401 }
1402 }
1403 }
1404 #endif /* defined(DDB) */
1405 #undef RQMASK
1406