sched_4bsd.c revision 1.1.2.10 1 /* $NetBSD: sched_4bsd.c,v 1.1.2.10 2007/03/10 13:40:49 rmind Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran, and
10 * Daniel Sieger.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the NetBSD
23 * Foundation, Inc. and its contributors.
24 * 4. Neither the name of The NetBSD Foundation nor the names of its
25 * contributors may be used to endorse or promote products derived
26 * from this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGE.
39 */
40
41 /*-
42 * Copyright (c) 1982, 1986, 1990, 1991, 1993
43 * The Regents of the University of California. All rights reserved.
44 * (c) UNIX System Laboratories, Inc.
45 * All or some portions of this file are derived from material licensed
46 * to the University of California by American Telephone and Telegraph
47 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
48 * the permission of UNIX System Laboratories, Inc.
49 *
50 * Redistribution and use in source and binary forms, with or without
51 * modification, are permitted provided that the following conditions
52 * are met:
53 * 1. Redistributions of source code must retain the above copyright
54 * notice, this list of conditions and the following disclaimer.
55 * 2. Redistributions in binary form must reproduce the above copyright
56 * notice, this list of conditions and the following disclaimer in the
57 * documentation and/or other materials provided with the distribution.
58 * 3. Neither the name of the University nor the names of its contributors
59 * may be used to endorse or promote products derived from this software
60 * without specific prior written permission.
61 *
62 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
63 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
66 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72 * SUCH DAMAGE.
73 *
74 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
75 */
76
77 #include <sys/cdefs.h>
78 __KERNEL_RCSID(0, "$NetBSD: sched_4bsd.c,v 1.1.2.10 2007/03/10 13:40:49 rmind Exp $");
79
80 #include "opt_ddb.h"
81 #include "opt_lockdebug.h"
82 #include "opt_perfctrs.h"
83
84 #define __MUTEX_PRIVATE
85
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/callout.h>
89 #include <sys/cpu.h>
90 #include <sys/proc.h>
91 #include <sys/kernel.h>
92 #include <sys/signalvar.h>
93 #include <sys/resourcevar.h>
94 #include <sys/sched.h>
95 #include <sys/sysctl.h>
96 #include <sys/kauth.h>
97 #include <sys/lockdebug.h>
98
99 #include <uvm/uvm_extern.h>
100
101 /*
102 * Run queues.
103 *
104 * We have 32 run queues in descending priority of 0..31. We maintain
105 * a bitmask of non-empty queues in order speed up finding the first
106 * runnable process. The bitmask is maintained only by machine-dependent
107 * code, allowing the most efficient instructions to be used to find the
108 * first non-empty queue.
109 */
110
111
112 #define RUNQUE_NQS 32 /* number of runqueues */
113 #define PPQ (128 / RUNQUE_NQS) /* priorities per queue */
114
115 struct prochd {
116 struct lwp *ph_link;
117 struct lwp *ph_rlink;
118 };
119
120 struct prochd sched_qs[RUNQUE_NQS]; /* run queues */
121 volatile uint32_t sched_whichqs; /* bitmap of non-empty queues */
122
123 void schedcpu(void *);
124 void updatepri(struct lwp *);
125 void resetpriority(struct lwp *);
126 void resetprocpriority(struct proc *);
127
128 struct callout schedcpu_ch = CALLOUT_INITIALIZER_SETFUNC(schedcpu, NULL);
129 static unsigned int schedcpu_ticks;
130
131 int rrticks; /* number of hardclock ticks per sched_tick() */
132
133 /*
134 * Force switch among equal priority processes every 100ms.
135 * Called from hardclock every hz/10 == rrticks hardclock ticks.
136 */
137 /* ARGSUSED */
138 void
139 sched_tick(struct cpu_info *ci)
140 {
141 struct schedstate_percpu *spc = &ci->ci_schedstate;
142
143 spc->spc_ticks = rrticks;
144
145 if (!CURCPU_IDLE_P()) {
146 if (spc->spc_flags & SPCF_SEENRR) {
147 /*
148 * The process has already been through a roundrobin
149 * without switching and may be hogging the CPU.
150 * Indicate that the process should yield.
151 */
152 spc->spc_flags |= SPCF_SHOULDYIELD;
153 } else
154 spc->spc_flags |= SPCF_SEENRR;
155 }
156 cpu_need_resched(curcpu(), 0);
157 }
158
159 #define NICE_WEIGHT 2 /* priorities per nice level */
160
161 #define ESTCPU_SHIFT 11
162 #define ESTCPU_MAX ((NICE_WEIGHT * PRIO_MAX - PPQ) << ESTCPU_SHIFT)
163 #define ESTCPULIM(e) min((e), ESTCPU_MAX)
164
165 /*
166 * Constants for digital decay and forget:
167 * 90% of (p_estcpu) usage in 5 * loadav time
168 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive)
169 * Note that, as ps(1) mentions, this can let percentages
170 * total over 100% (I've seen 137.9% for 3 processes).
171 *
172 * Note that hardclock updates p_estcpu and p_cpticks independently.
173 *
174 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
175 * That is, the system wants to compute a value of decay such
176 * that the following for loop:
177 * for (i = 0; i < (5 * loadavg); i++)
178 * p_estcpu *= decay;
179 * will compute
180 * p_estcpu *= 0.1;
181 * for all values of loadavg:
182 *
183 * Mathematically this loop can be expressed by saying:
184 * decay ** (5 * loadavg) ~= .1
185 *
186 * The system computes decay as:
187 * decay = (2 * loadavg) / (2 * loadavg + 1)
188 *
189 * We wish to prove that the system's computation of decay
190 * will always fulfill the equation:
191 * decay ** (5 * loadavg) ~= .1
192 *
193 * If we compute b as:
194 * b = 2 * loadavg
195 * then
196 * decay = b / (b + 1)
197 *
198 * We now need to prove two things:
199 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
200 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
201 *
202 * Facts:
203 * For x close to zero, exp(x) =~ 1 + x, since
204 * exp(x) = 0! + x**1/1! + x**2/2! + ... .
205 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
206 * For x close to zero, ln(1+x) =~ x, since
207 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1
208 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
209 * ln(.1) =~ -2.30
210 *
211 * Proof of (1):
212 * Solve (factor)**(power) =~ .1 given power (5*loadav):
213 * solving for factor,
214 * ln(factor) =~ (-2.30/5*loadav), or
215 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
216 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED
217 *
218 * Proof of (2):
219 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
220 * solving for power,
221 * power*ln(b/(b+1)) =~ -2.30, or
222 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED
223 *
224 * Actual power values for the implemented algorithm are as follows:
225 * loadav: 1 2 3 4
226 * power: 5.68 10.32 14.94 19.55
227 */
228
229 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
230 #define loadfactor(loadav) (2 * (loadav))
231
232 static fixpt_t
233 decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
234 {
235
236 if (estcpu == 0) {
237 return 0;
238 }
239
240 #if !defined(_LP64)
241 /* avoid 64bit arithmetics. */
242 #define FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
243 if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
244 return estcpu * loadfac / (loadfac + FSCALE);
245 }
246 #endif /* !defined(_LP64) */
247
248 return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
249 }
250
251 /*
252 * For all load averages >= 1 and max p_estcpu of (255 << ESTCPU_SHIFT),
253 * sleeping for at least seven times the loadfactor will decay p_estcpu to
254 * less than (1 << ESTCPU_SHIFT).
255 *
256 * note that our ESTCPU_MAX is actually much smaller than (255 << ESTCPU_SHIFT).
257 */
258 static fixpt_t
259 decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n)
260 {
261
262 if ((n << FSHIFT) >= 7 * loadfac) {
263 return 0;
264 }
265
266 while (estcpu != 0 && n > 1) {
267 estcpu = decay_cpu(loadfac, estcpu);
268 n--;
269 }
270
271 return estcpu;
272 }
273
274 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
275 fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
276
277 /*
278 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
279 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
280 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
281 *
282 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
283 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
284 *
285 * If you dont want to bother with the faster/more-accurate formula, you
286 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
287 * (more general) method of calculating the %age of CPU used by a process.
288 */
289 #define CCPU_SHIFT 11
290
291 /*
292 * schedcpu:
293 *
294 * Recompute process priorities, every hz ticks.
295 *
296 * XXXSMP This needs to be reorganised in order to reduce the locking
297 * burden.
298 */
299 /* ARGSUSED */
300 void
301 schedcpu(void *arg)
302 {
303 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
304 struct rlimit *rlim;
305 struct lwp *l;
306 struct proc *p;
307 int minslp, clkhz, sig;
308 long runtm;
309
310 schedcpu_ticks++;
311
312 mutex_enter(&proclist_mutex);
313 PROCLIST_FOREACH(p, &allproc) {
314 /*
315 * Increment time in/out of memory and sleep time (if
316 * sleeping). We ignore overflow; with 16-bit int's
317 * (remember them?) overflow takes 45 days.
318 */
319 minslp = 2;
320 mutex_enter(&p->p_smutex);
321 runtm = p->p_rtime.tv_sec;
322 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
323 if ((l->l_flag & LW_IDLE) != 0)
324 continue;
325 lwp_lock(l);
326 runtm += l->l_rtime.tv_sec;
327 l->l_swtime++;
328 if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
329 l->l_stat == LSSUSPENDED) {
330 l->l_slptime++;
331 minslp = min(minslp, l->l_slptime);
332 } else
333 minslp = 0;
334 lwp_unlock(l);
335 }
336 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
337
338 /*
339 * Check if the process exceeds its CPU resource allocation.
340 * If over max, kill it.
341 */
342 rlim = &p->p_rlimit[RLIMIT_CPU];
343 sig = 0;
344 if (runtm >= rlim->rlim_cur) {
345 if (runtm >= rlim->rlim_max)
346 sig = SIGKILL;
347 else {
348 sig = SIGXCPU;
349 if (rlim->rlim_cur < rlim->rlim_max)
350 rlim->rlim_cur += 5;
351 }
352 }
353
354 /*
355 * If the process has run for more than autonicetime, reduce
356 * priority to give others a chance.
357 */
358 if (autonicetime && runtm > autonicetime && p->p_nice == NZERO
359 && kauth_cred_geteuid(p->p_cred)) {
360 mutex_spin_enter(&p->p_stmutex);
361 p->p_nice = autoniceval + NZERO;
362 resetprocpriority(p);
363 mutex_spin_exit(&p->p_stmutex);
364 }
365
366 /*
367 * If the process has slept the entire second,
368 * stop recalculating its priority until it wakes up.
369 */
370 if (minslp <= 1) {
371 /*
372 * p_pctcpu is only for ps.
373 */
374 mutex_spin_enter(&p->p_stmutex);
375 clkhz = stathz != 0 ? stathz : hz;
376 #if (FSHIFT >= CCPU_SHIFT)
377 p->p_pctcpu += (clkhz == 100)?
378 ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
379 100 * (((fixpt_t) p->p_cpticks)
380 << (FSHIFT - CCPU_SHIFT)) / clkhz;
381 #else
382 p->p_pctcpu += ((FSCALE - ccpu) *
383 (p->p_cpticks * FSCALE / clkhz)) >> FSHIFT;
384 #endif
385 p->p_cpticks = 0;
386 p->p_estcpu = decay_cpu(loadfac, p->p_estcpu);
387
388 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
389 if ((l->l_flag & LW_IDLE) != 0)
390 continue;
391 lwp_lock(l);
392 if (l->l_slptime <= 1 &&
393 l->l_priority >= PUSER)
394 resetpriority(l);
395 lwp_unlock(l);
396 }
397 mutex_spin_exit(&p->p_stmutex);
398 }
399
400 mutex_exit(&p->p_smutex);
401 if (sig) {
402 psignal(p, sig);
403 }
404 }
405 mutex_exit(&proclist_mutex);
406 uvm_meter();
407 wakeup((caddr_t)&lbolt);
408 callout_schedule(&schedcpu_ch, hz);
409 }
410
411 /*
412 * Recalculate the priority of a process after it has slept for a while.
413 */
414 void
415 updatepri(struct lwp *l)
416 {
417 struct proc *p = l->l_proc;
418 fixpt_t loadfac;
419
420 LOCK_ASSERT(lwp_locked(l, NULL));
421 KASSERT(l->l_slptime > 1);
422
423 loadfac = loadfactor(averunnable.ldavg[0]);
424
425 l->l_slptime--; /* the first time was done in schedcpu */
426 /* XXX NJWLWP */
427 /* XXXSMP occasionally unlocked, should be per-LWP */
428 p->p_estcpu = decay_cpu_batch(loadfac, p->p_estcpu, l->l_slptime);
429 resetpriority(l);
430 }
431
432 /*
433 * Initialize the (doubly-linked) run queues
434 * to be empty.
435 */
436 void
437 sched_rqinit()
438 {
439 int i;
440
441 for (i = 0; i < RUNQUE_NQS; i++)
442 sched_qs[i].ph_link = sched_qs[i].ph_rlink =
443 (struct lwp *)&sched_qs[i];
444
445 mutex_init(&sched_mutex, MUTEX_SPIN, IPL_SCHED);
446 }
447
448 void
449 sched_setup()
450 {
451 rrticks = hz / 10;
452
453 schedcpu(NULL);
454 }
455
456 void
457 sched_setrunnable(struct lwp *l)
458 {
459 if (l->l_slptime > 1)
460 updatepri(l);
461 }
462
463 bool
464 sched_curcpu_runnable_p(void)
465 {
466
467 return sched_whichqs != 0;
468 }
469
470 void
471 sched_nice(struct proc *chgp, int n)
472 {
473 chgp->p_nice = n;
474 (void)resetprocpriority(chgp);
475 }
476
477 /*
478 * Compute the priority of a process when running in user mode.
479 * Arrange to reschedule if the resulting priority is better
480 * than that of the current process.
481 */
482 void
483 resetpriority(struct lwp *l)
484 {
485 unsigned int newpriority;
486 struct proc *p = l->l_proc;
487
488 /* XXXSMP LOCK_ASSERT(mutex_owned(&p->p_stmutex)); */
489 LOCK_ASSERT(lwp_locked(l, NULL));
490
491 if ((l->l_flag & LW_SYSTEM) != 0)
492 return;
493
494 newpriority = PUSER + (p->p_estcpu >> ESTCPU_SHIFT) +
495 NICE_WEIGHT * (p->p_nice - NZERO);
496 newpriority = min(newpriority, MAXPRI);
497 lwp_changepri(l, newpriority);
498 }
499
500 /*
501 * Recompute priority for all LWPs in a process.
502 */
503 void
504 resetprocpriority(struct proc *p)
505 {
506 struct lwp *l;
507
508 LOCK_ASSERT(mutex_owned(&p->p_stmutex));
509
510 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
511 lwp_lock(l);
512 resetpriority(l);
513 lwp_unlock(l);
514 }
515 }
516
517 /*
518 * We adjust the priority of the current process. The priority of a process
519 * gets worse as it accumulates CPU time. The CPU usage estimator (p_estcpu)
520 * is increased here. The formula for computing priorities (in kern_synch.c)
521 * will compute a different value each time p_estcpu increases. This can
522 * cause a switch, but unless the priority crosses a PPQ boundary the actual
523 * queue will not change. The CPU usage estimator ramps up quite quickly
524 * when the process is running (linearly), and decays away exponentially, at
525 * a rate which is proportionally slower when the system is busy. The basic
526 * principle is that the system will 90% forget that the process used a lot
527 * of CPU time in 5 * loadav seconds. This causes the system to favor
528 * processes which haven't run much recently, and to round-robin among other
529 * processes.
530 */
531
532 void
533 sched_clock(struct lwp *l)
534 {
535 struct proc *p = l->l_proc;
536
537 KASSERT(!CURCPU_IDLE_P());
538 mutex_spin_enter(&p->p_stmutex);
539 p->p_estcpu = ESTCPULIM(p->p_estcpu + (1 << ESTCPU_SHIFT));
540 lwp_lock(l);
541 resetpriority(l);
542 mutex_spin_exit(&p->p_stmutex);
543 if ((l->l_flag & LW_SYSTEM) == 0 && l->l_priority >= PUSER)
544 l->l_priority = l->l_usrpri;
545 lwp_unlock(l);
546 }
547
548 /*
549 * scheduler_fork_hook:
550 *
551 * Inherit the parent's scheduler history.
552 */
553 void
554 sched_proc_fork(struct proc *parent, struct proc *child)
555 {
556
557 LOCK_ASSERT(mutex_owned(&parent->p_smutex));
558
559 child->p_estcpu = child->p_estcpu_inherited = parent->p_estcpu;
560 child->p_forktime = schedcpu_ticks;
561 }
562
563 /*
564 * scheduler_wait_hook:
565 *
566 * Chargeback parents for the sins of their children.
567 */
568 void
569 sched_proc_exit(struct proc *parent, struct proc *child)
570 {
571 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
572 fixpt_t estcpu;
573
574 /* XXX Only if parent != init?? */
575
576 mutex_spin_enter(&parent->p_stmutex);
577 estcpu = decay_cpu_batch(loadfac, child->p_estcpu_inherited,
578 schedcpu_ticks - child->p_forktime);
579 if (child->p_estcpu > estcpu)
580 parent->p_estcpu =
581 ESTCPULIM(parent->p_estcpu + child->p_estcpu - estcpu);
582 mutex_spin_exit(&parent->p_stmutex);
583 }
584
585 /*
586 * On some architectures, it's faster to use a MSB ordering for the priorites
587 * than the traditional LSB ordering.
588 */
589 #ifdef __HAVE_BIGENDIAN_BITOPS
590 #define RQMASK(n) (0x80000000 >> (n))
591 #else
592 #define RQMASK(n) (0x00000001 << (n))
593 #endif
594
595 /*
596 * Low-level routines to access the run queue. Optimised assembler
597 * routines can override these.
598 */
599
600 #ifndef __HAVE_MD_RUNQUEUE
601
602 /*
603 * The primitives that manipulate the run queues. whichqs tells which
604 * of the 32 queues qs have processes in them. sched_enqueue puts processes
605 * into queues, sched_dequeue removes them from queues. The running process is
606 * on no queue, other processes are on a queue related to p->p_priority,
607 * divided by 4 actually to shrink the 0-127 range of priorities into the 32
608 * available queues.
609 */
610 #ifdef RQDEBUG
611 static void
612 checkrunqueue(int whichq, struct lwp *l)
613 {
614 const struct prochd * const rq = &sched_qs[whichq];
615 struct lwp *l2;
616 int found = 0;
617 int die = 0;
618 int empty = 1;
619 for (l2 = rq->ph_link; l2 != (const void*) rq; l2 = l2->l_forw) {
620 if (l2->l_stat != LSRUN) {
621 printf("checkrunqueue[%d]: lwp %p state (%d) "
622 " != LSRUN\n", whichq, l2, l2->l_stat);
623 }
624 if (l2->l_back->l_forw != l2) {
625 printf("checkrunqueue[%d]: lwp %p back-qptr (%p) "
626 "corrupt %p\n", whichq, l2, l2->l_back,
627 l2->l_back->l_forw);
628 die = 1;
629 }
630 if (l2->l_forw->l_back != l2) {
631 printf("checkrunqueue[%d]: lwp %p forw-qptr (%p) "
632 "corrupt %p\n", whichq, l2, l2->l_forw,
633 l2->l_forw->l_back);
634 die = 1;
635 }
636 if (l2 == l)
637 found = 1;
638 empty = 0;
639 }
640 if (empty && (sched_whichqs & RQMASK(whichq)) != 0) {
641 printf("checkrunqueue[%d]: bit set for empty run-queue %p\n",
642 whichq, rq);
643 die = 1;
644 } else if (!empty && (sched_whichqs & RQMASK(whichq)) == 0) {
645 printf("checkrunqueue[%d]: bit clear for non-empty "
646 "run-queue %p\n", whichq, rq);
647 die = 1;
648 }
649 if (l != NULL && (sched_whichqs & RQMASK(whichq)) == 0) {
650 printf("checkrunqueue[%d]: bit clear for active lwp %p\n",
651 whichq, l);
652 die = 1;
653 }
654 if (l != NULL && empty) {
655 printf("checkrunqueue[%d]: empty run-queue %p with "
656 "active lwp %p\n", whichq, rq, l);
657 die = 1;
658 }
659 if (l != NULL && !found) {
660 printf("checkrunqueue[%d]: lwp %p not in runqueue %p!",
661 whichq, l, rq);
662 die = 1;
663 }
664 if (die)
665 panic("checkrunqueue: inconsistency found");
666 }
667 #endif /* RQDEBUG */
668
669 void
670 sched_enqueue(struct lwp *l)
671 {
672 struct prochd *rq;
673 struct lwp *prev;
674 const int whichq = lwp_eprio(l) / PPQ;
675
676 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
677
678 #ifdef RQDEBUG
679 checkrunqueue(whichq, NULL);
680 #endif
681 #ifdef DIAGNOSTIC
682 if (l->l_back != NULL || l->l_stat != LSRUN)
683 panic("sched_enqueue");
684 #endif
685 sched_whichqs |= RQMASK(whichq);
686 rq = &sched_qs[whichq];
687 prev = rq->ph_rlink;
688 l->l_forw = (struct lwp *)rq;
689 rq->ph_rlink = l;
690 prev->l_forw = l;
691 l->l_back = prev;
692 #ifdef RQDEBUG
693 checkrunqueue(whichq, l);
694 #endif
695 }
696
697 /*
698 * XXXSMP When LWP dispatch (cpu_switch()) is changed to use sched_dequeue(),
699 * drop of the effective priority level from kernel to user needs to be
700 * moved here from userret(). The assignment in userret() is currently
701 * done unlocked.
702 */
703 void
704 sched_dequeue(struct lwp *l)
705 {
706 struct lwp *prev, *next;
707 const int whichq = lwp_eprio(l) / PPQ;
708
709 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
710
711 #ifdef RQDEBUG
712 checkrunqueue(whichq, l);
713 #endif
714
715 #if defined(DIAGNOSTIC)
716 if (((sched_whichqs & RQMASK(whichq)) == 0) || l->l_back == NULL) {
717 /* Shouldn't happen - interrupts disabled. */
718 panic("sched_dequeue: bit %d not set", whichq);
719 }
720 #endif
721 prev = l->l_back;
722 l->l_back = NULL;
723 next = l->l_forw;
724 prev->l_forw = next;
725 next->l_back = prev;
726 if (prev == next)
727 sched_whichqs &= ~RQMASK(whichq);
728 #ifdef RQDEBUG
729 checkrunqueue(whichq, NULL);
730 #endif
731 }
732
733 struct lwp *
734 sched_switch(struct lwp *l)
735 {
736 const struct prochd *rq;
737 int whichq;
738
739 KASSERT(l != NULL);
740 KASSERT(l->l_stat != LSRUN);
741
742 if (l->l_stat == LSONPROC) {
743 KASSERT(lwp_locked(l, &sched_mutex));
744 l->l_stat = LSRUN;
745 if ((l->l_flag & LW_IDLE) == 0) {
746 sched_enqueue(l);
747 }
748 }
749
750 if (sched_whichqs == 0) {
751 return NULL;
752 }
753 #ifdef __HAVE_BIGENDIAN_BITOPS
754 for (whichq = 0; ; whichq++) {
755 if ((sched_whichqs & RQMASK(whichq)) != 0) {
756 break;
757 }
758 }
759 #else
760 whichq = ffs(sched_whichqs) - 1;
761 #endif
762 rq = &sched_qs[whichq];
763 return rq->ph_link;
764 }
765
766 #endif /* !defined(__HAVE_MD_RUNQUEUE) */
767
768 /* Dummy */
769 void sched_lwp_fork(struct lwp *l)
770 {
771
772 }
773
774 void sched_lwp_exit(struct lwp *l)
775 {
776
777 }
778
779 void sched_slept(struct lwp *l)
780 {
781
782 }
783
784 /* SysCtl */
785
786 SYSCTL_SETUP(sysctl_sched_setup, "sysctl kern.sched subtree setup") {
787 sysctl_createv(clog, 0, NULL, NULL,
788 CTLFLAG_PERMANENT,
789 CTLTYPE_NODE, "kern", NULL,
790 NULL, 0, NULL, 0,
791 CTL_KERN, CTL_EOL);
792 sysctl_createv(clog, 0, NULL, NULL,
793 CTLFLAG_PERMANENT,
794 CTLTYPE_NODE, "sched",
795 SYSCTL_DESCR("Scheduler options"),
796 NULL, 0, NULL, 0,
797 CTL_KERN, KERN_SCHED, CTL_EOL);
798 sysctl_createv(clog, 0, NULL, NULL,
799 CTLFLAG_PERMANENT,
800 CTLTYPE_STRING, "name", NULL,
801 NULL, 0, __UNCONST("4.4BSD"), 0,
802 CTL_KERN, KERN_SCHED, CTL_CREATE, CTL_EOL);
803 sysctl_createv(clog, 0, NULL, NULL,
804 CTLFLAG_PERMANENT,
805 CTLTYPE_INT, "ccpu",
806 SYSCTL_DESCR("Scheduler exponential decay value"),
807 NULL, 0, &ccpu, 0,
808 CTL_KERN, KERN_SCHED, CTL_CREATE, CTL_EOL);
809 }
810
811 #if defined(DDB)
812 void
813 sched_print_runqueue(void (*pr)(const char *, ...))
814 {
815 struct prochd *ph;
816 struct lwp *l;
817 int i, first;
818
819 for (i = 0; i < RUNQUE_NQS; i++)
820 {
821 first = 1;
822 ph = &sched_qs[i];
823 for (l = ph->ph_link; l != (void *)ph; l = l->l_forw) {
824 if (first) {
825 (*pr)("%c%d",
826 (sched_whichqs & RQMASK(i))
827 ? ' ' : '!', i);
828 first = 0;
829 }
830 (*pr)("\t%d.%d (%s) pri=%d usrpri=%d\n",
831 l->l_proc->p_pid,
832 l->l_lid, l->l_proc->p_comm,
833 (int)l->l_priority, (int)l->l_usrpri);
834 }
835 }
836 }
837 #endif /* defined(DDB) */
838 #undef RQMASK
839