kern_synch.c revision 1.169 1 /* $NetBSD: kern_synch.c,v 1.169 2006/11/01 09:32:52 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2004 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 * This code is derived from software contributed to The NetBSD Foundation
11 * by Charles M. Hannum.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 * must display the following acknowledgement:
23 * This product includes software developed by the NetBSD
24 * Foundation, Inc. and its contributors.
25 * 4. Neither the name of The NetBSD Foundation nor the names of its
26 * contributors may be used to endorse or promote products derived
27 * from this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
30 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
31 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
32 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
33 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
36 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
37 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42 /*-
43 * Copyright (c) 1982, 1986, 1990, 1991, 1993
44 * The Regents of the University of California. All rights reserved.
45 * (c) UNIX System Laboratories, Inc.
46 * All or some portions of this file are derived from material licensed
47 * to the University of California by American Telephone and Telegraph
48 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
49 * the permission of UNIX System Laboratories, Inc.
50 *
51 * Redistribution and use in source and binary forms, with or without
52 * modification, are permitted provided that the following conditions
53 * are met:
54 * 1. Redistributions of source code must retain the above copyright
55 * notice, this list of conditions and the following disclaimer.
56 * 2. Redistributions in binary form must reproduce the above copyright
57 * notice, this list of conditions and the following disclaimer in the
58 * documentation and/or other materials provided with the distribution.
59 * 3. Neither the name of the University nor the names of its contributors
60 * may be used to endorse or promote products derived from this software
61 * without specific prior written permission.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
64 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
65 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
66 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
67 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
68 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
69 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
70 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
71 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
72 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73 * SUCH DAMAGE.
74 *
75 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.169 2006/11/01 09:32:52 yamt Exp $");
80
81 #include "opt_ddb.h"
82 #include "opt_ktrace.h"
83 #include "opt_kstack.h"
84 #include "opt_lockdebug.h"
85 #include "opt_multiprocessor.h"
86 #include "opt_perfctrs.h"
87
88 #include <sys/param.h>
89 #include <sys/systm.h>
90 #include <sys/callout.h>
91 #include <sys/proc.h>
92 #include <sys/kernel.h>
93 #include <sys/buf.h>
94 #if defined(PERFCTRS)
95 #include <sys/pmc.h>
96 #endif
97 #include <sys/signalvar.h>
98 #include <sys/resourcevar.h>
99 #include <sys/sched.h>
100 #include <sys/sa.h>
101 #include <sys/savar.h>
102 #include <sys/kauth.h>
103
104 #include <uvm/uvm_extern.h>
105
106 #ifdef KTRACE
107 #include <sys/ktrace.h>
108 #endif
109
110 #include <machine/cpu.h>
111
112 int lbolt; /* once a second sleep address */
113 int rrticks; /* number of hardclock ticks per roundrobin() */
114
115 /*
116 * Sleep queues.
117 *
118 * We're only looking at 7 bits of the address; everything is
119 * aligned to 4, lots of things are aligned to greater powers
120 * of 2. Shift right by 8, i.e. drop the bottom 256 worth.
121 */
122 #define SLPQUE_TABLESIZE 128
123 #define SLPQUE_LOOKUP(x) (((u_long)(x) >> 8) & (SLPQUE_TABLESIZE - 1))
124
125 #define SLPQUE(ident) (&sched_slpque[SLPQUE_LOOKUP(ident)])
126
127 /*
128 * The global scheduler state.
129 */
130 struct prochd sched_qs[RUNQUE_NQS]; /* run queues */
131 volatile uint32_t sched_whichqs; /* bitmap of non-empty queues */
132 struct slpque sched_slpque[SLPQUE_TABLESIZE]; /* sleep queues */
133
134 struct simplelock sched_lock = SIMPLELOCK_INITIALIZER;
135
136 void schedcpu(void *);
137 void updatepri(struct lwp *);
138 void endtsleep(void *);
139
140 inline void sa_awaken(struct lwp *);
141 inline void awaken(struct lwp *);
142
143 struct callout schedcpu_ch = CALLOUT_INITIALIZER_SETFUNC(schedcpu, NULL);
144 static unsigned int schedcpu_ticks;
145
146
147 /*
148 * Force switch among equal priority processes every 100ms.
149 * Called from hardclock every hz/10 == rrticks hardclock ticks.
150 */
151 /* ARGSUSED */
152 void
153 roundrobin(struct cpu_info *ci)
154 {
155 struct schedstate_percpu *spc = &ci->ci_schedstate;
156
157 spc->spc_rrticks = rrticks;
158
159 if (curlwp != NULL) {
160 if (spc->spc_flags & SPCF_SEENRR) {
161 /*
162 * The process has already been through a roundrobin
163 * without switching and may be hogging the CPU.
164 * Indicate that the process should yield.
165 */
166 spc->spc_flags |= SPCF_SHOULDYIELD;
167 } else
168 spc->spc_flags |= SPCF_SEENRR;
169 }
170 need_resched(curcpu());
171 }
172
173 #define PPQ (128 / RUNQUE_NQS) /* priorities per queue */
174 #define NICE_WEIGHT 2 /* priorities per nice level */
175
176 #define ESTCPU_SHIFT 11
177 #define ESTCPU_MAX ((NICE_WEIGHT * PRIO_MAX - PPQ) << ESTCPU_SHIFT)
178 #define ESTCPULIM(e) min((e), ESTCPU_MAX)
179
180 /*
181 * Constants for digital decay and forget:
182 * 90% of (p_estcpu) usage in 5 * loadav time
183 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive)
184 * Note that, as ps(1) mentions, this can let percentages
185 * total over 100% (I've seen 137.9% for 3 processes).
186 *
187 * Note that hardclock updates p_estcpu and p_cpticks independently.
188 *
189 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
190 * That is, the system wants to compute a value of decay such
191 * that the following for loop:
192 * for (i = 0; i < (5 * loadavg); i++)
193 * p_estcpu *= decay;
194 * will compute
195 * p_estcpu *= 0.1;
196 * for all values of loadavg:
197 *
198 * Mathematically this loop can be expressed by saying:
199 * decay ** (5 * loadavg) ~= .1
200 *
201 * The system computes decay as:
202 * decay = (2 * loadavg) / (2 * loadavg + 1)
203 *
204 * We wish to prove that the system's computation of decay
205 * will always fulfill the equation:
206 * decay ** (5 * loadavg) ~= .1
207 *
208 * If we compute b as:
209 * b = 2 * loadavg
210 * then
211 * decay = b / (b + 1)
212 *
213 * We now need to prove two things:
214 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
215 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
216 *
217 * Facts:
218 * For x close to zero, exp(x) =~ 1 + x, since
219 * exp(x) = 0! + x**1/1! + x**2/2! + ... .
220 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
221 * For x close to zero, ln(1+x) =~ x, since
222 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1
223 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
224 * ln(.1) =~ -2.30
225 *
226 * Proof of (1):
227 * Solve (factor)**(power) =~ .1 given power (5*loadav):
228 * solving for factor,
229 * ln(factor) =~ (-2.30/5*loadav), or
230 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
231 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED
232 *
233 * Proof of (2):
234 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
235 * solving for power,
236 * power*ln(b/(b+1)) =~ -2.30, or
237 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED
238 *
239 * Actual power values for the implemented algorithm are as follows:
240 * loadav: 1 2 3 4
241 * power: 5.68 10.32 14.94 19.55
242 */
243
244 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
245 #define loadfactor(loadav) (2 * (loadav))
246
247 static fixpt_t
248 decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
249 {
250
251 if (estcpu == 0) {
252 return 0;
253 }
254
255 #if !defined(_LP64)
256 /* avoid 64bit arithmetics. */
257 #define FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
258 if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
259 return estcpu * loadfac / (loadfac + FSCALE);
260 }
261 #endif /* !defined(_LP64) */
262
263 return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
264 }
265
266 /*
267 * For all load averages >= 1 and max p_estcpu of (255 << ESTCPU_SHIFT),
268 * sleeping for at least seven times the loadfactor will decay p_estcpu to
269 * less than (1 << ESTCPU_SHIFT).
270 *
271 * note that our ESTCPU_MAX is actually much smaller than (255 << ESTCPU_SHIFT).
272 */
273 static fixpt_t
274 decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n)
275 {
276
277 if ((n << FSHIFT) >= 7 * loadfac) {
278 return 0;
279 }
280
281 while (estcpu != 0 && n > 1) {
282 estcpu = decay_cpu(loadfac, estcpu);
283 n--;
284 }
285
286 return estcpu;
287 }
288
289 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
290 fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
291
292 /*
293 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
294 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
295 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
296 *
297 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
298 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
299 *
300 * If you dont want to bother with the faster/more-accurate formula, you
301 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
302 * (more general) method of calculating the %age of CPU used by a process.
303 */
304 #define CCPU_SHIFT 11
305
306 /*
307 * Recompute process priorities, every hz ticks.
308 */
309 /* ARGSUSED */
310 void
311 schedcpu(void *arg __unused)
312 {
313 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
314 struct lwp *l;
315 struct proc *p;
316 int s, minslp;
317 int clkhz;
318
319 schedcpu_ticks++;
320
321 proclist_lock_read();
322 PROCLIST_FOREACH(p, &allproc) {
323 /*
324 * Increment time in/out of memory and sleep time
325 * (if sleeping). We ignore overflow; with 16-bit int's
326 * (remember them?) overflow takes 45 days.
327 */
328 minslp = 2;
329 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
330 l->l_swtime++;
331 if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
332 l->l_stat == LSSUSPENDED) {
333 l->l_slptime++;
334 minslp = min(minslp, l->l_slptime);
335 } else
336 minslp = 0;
337 }
338 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
339 /*
340 * If the process has slept the entire second,
341 * stop recalculating its priority until it wakes up.
342 */
343 if (minslp > 1)
344 continue;
345 s = splstatclock(); /* prevent state changes */
346 /*
347 * p_pctcpu is only for ps.
348 */
349 clkhz = stathz != 0 ? stathz : hz;
350 #if (FSHIFT >= CCPU_SHIFT)
351 p->p_pctcpu += (clkhz == 100)?
352 ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
353 100 * (((fixpt_t) p->p_cpticks)
354 << (FSHIFT - CCPU_SHIFT)) / clkhz;
355 #else
356 p->p_pctcpu += ((FSCALE - ccpu) *
357 (p->p_cpticks * FSCALE / clkhz)) >> FSHIFT;
358 #endif
359 p->p_cpticks = 0;
360 p->p_estcpu = decay_cpu(loadfac, p->p_estcpu);
361 splx(s); /* Done with the process CPU ticks update */
362 SCHED_LOCK(s);
363 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
364 if (l->l_slptime > 1)
365 continue;
366 resetpriority(l);
367 if (l->l_priority >= PUSER) {
368 if (l->l_stat == LSRUN &&
369 (l->l_flag & L_INMEM) &&
370 (l->l_priority / PPQ) != (l->l_usrpri / PPQ)) {
371 remrunqueue(l);
372 l->l_priority = l->l_usrpri;
373 setrunqueue(l);
374 } else
375 l->l_priority = l->l_usrpri;
376 }
377 }
378 SCHED_UNLOCK(s);
379 }
380 proclist_unlock_read();
381 uvm_meter();
382 wakeup((caddr_t)&lbolt);
383 callout_schedule(&schedcpu_ch, hz);
384 }
385
386 /*
387 * Recalculate the priority of a process after it has slept for a while.
388 */
389 void
390 updatepri(struct lwp *l)
391 {
392 struct proc *p = l->l_proc;
393 fixpt_t loadfac;
394
395 SCHED_ASSERT_LOCKED();
396 KASSERT(l->l_slptime > 1);
397
398 loadfac = loadfactor(averunnable.ldavg[0]);
399
400 l->l_slptime--; /* the first time was done in schedcpu */
401 /* XXX NJWLWP */
402 p->p_estcpu = decay_cpu_batch(loadfac, p->p_estcpu, l->l_slptime);
403 resetpriority(l);
404 }
405
406 /*
407 * During autoconfiguration or after a panic, a sleep will simply
408 * lower the priority briefly to allow interrupts, then return.
409 * The priority to be used (safepri) is machine-dependent, thus this
410 * value is initialized and maintained in the machine-dependent layers.
411 * This priority will typically be 0, or the lowest priority
412 * that is safe for use on the interrupt stack; it can be made
413 * higher to block network software interrupts after panics.
414 */
415 int safepri;
416
417 /*
418 * General sleep call. Suspends the current process until a wakeup is
419 * performed on the specified identifier. The process will then be made
420 * runnable with the specified priority. Sleeps at most timo/hz seconds
421 * (0 means no timeout). If pri includes PCATCH flag, signals are checked
422 * before and after sleeping, else signals are not checked. Returns 0 if
423 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
424 * signal needs to be delivered, ERESTART is returned if the current system
425 * call should be restarted if possible, and EINTR is returned if the system
426 * call should be interrupted by the signal (return EINTR).
427 *
428 * The interlock is held until the scheduler_slock is acquired. The
429 * interlock will be locked before returning back to the caller
430 * unless the PNORELOCK flag is specified, in which case the
431 * interlock will always be unlocked upon return.
432 */
433 int
434 ltsleep(volatile const void *ident, int priority, const char *wmesg, int timo,
435 volatile struct simplelock *interlock)
436 {
437 struct lwp *l = curlwp;
438 struct proc *p = l ? l->l_proc : NULL;
439 struct slpque *qp;
440 struct sadata_upcall *sau;
441 int sig, s;
442 int catch = priority & PCATCH;
443 int relock = (priority & PNORELOCK) == 0;
444 int exiterr = (priority & PNOEXITERR) == 0;
445
446 /*
447 * XXXSMP
448 * This is probably bogus. Figure out what the right
449 * thing to do here really is.
450 * Note that not sleeping if ltsleep is called with curlwp == NULL
451 * in the shutdown case is disgusting but partly necessary given
452 * how shutdown (barely) works.
453 */
454 if (cold || (doing_shutdown && (panicstr || (l == NULL)))) {
455 /*
456 * After a panic, or during autoconfiguration,
457 * just give interrupts a chance, then just return;
458 * don't run any other procs or panic below,
459 * in case this is the idle process and already asleep.
460 */
461 s = splhigh();
462 splx(safepri);
463 splx(s);
464 if (interlock != NULL && relock == 0)
465 simple_unlock(interlock);
466 return (0);
467 }
468
469 KASSERT(p != NULL);
470 LOCK_ASSERT(interlock == NULL || simple_lock_held(interlock));
471
472 #ifdef KTRACE
473 if (KTRPOINT(p, KTR_CSW))
474 ktrcsw(l, 1, 0);
475 #endif
476
477 /*
478 * XXX We need to allocate the sadata_upcall structure here,
479 * XXX since we can't sleep while waiting for memory inside
480 * XXX sa_upcall(). It would be nice if we could safely
481 * XXX allocate the sadata_upcall structure on the stack, here.
482 */
483 if (l->l_flag & L_SA) {
484 sau = sadata_upcall_alloc(0);
485 } else {
486 sau = NULL;
487 }
488
489 SCHED_LOCK(s);
490
491 #ifdef DIAGNOSTIC
492 if (ident == NULL)
493 panic("ltsleep: ident == NULL");
494 if (l->l_stat != LSONPROC)
495 panic("ltsleep: l_stat %d != LSONPROC", l->l_stat);
496 if (l->l_back != NULL)
497 panic("ltsleep: p_back != NULL");
498 #endif
499
500 l->l_wchan = ident;
501 l->l_wmesg = wmesg;
502 l->l_slptime = 0;
503 l->l_priority = priority & PRIMASK;
504
505 qp = SLPQUE(ident);
506 if (qp->sq_head == 0)
507 qp->sq_head = l;
508 else {
509 *qp->sq_tailp = l;
510 }
511 *(qp->sq_tailp = &l->l_forw) = 0;
512
513 if (timo)
514 callout_reset(&l->l_tsleep_ch, timo, endtsleep, l);
515
516 /*
517 * We can now release the interlock; the scheduler_slock
518 * is held, so a thread can't get in to do wakeup() before
519 * we do the switch.
520 *
521 * XXX We leave the code block here, after inserting ourselves
522 * on the sleep queue, because we might want a more clever
523 * data structure for the sleep queues at some point.
524 */
525 if (interlock != NULL)
526 simple_unlock(interlock);
527
528 /*
529 * We put ourselves on the sleep queue and start our timeout
530 * before calling CURSIG, as we could stop there, and a wakeup
531 * or a SIGCONT (or both) could occur while we were stopped.
532 * A SIGCONT would cause us to be marked as SSLEEP
533 * without resuming us, thus we must be ready for sleep
534 * when CURSIG is called. If the wakeup happens while we're
535 * stopped, p->p_wchan will be 0 upon return from CURSIG.
536 */
537 if (catch) {
538 l->l_flag |= L_SINTR;
539 if (((sig = CURSIG(l)) != 0) ||
540 ((p->p_flag & P_WEXIT) && p->p_nlwps > 1)) {
541 if (l->l_wchan != NULL)
542 unsleep(l);
543 l->l_stat = LSONPROC;
544 SCHED_UNLOCK(s);
545 goto resume;
546 }
547 if (l->l_wchan == NULL) {
548 catch = 0;
549 SCHED_UNLOCK(s);
550 goto resume;
551 }
552 } else
553 sig = 0;
554 l->l_stat = LSSLEEP;
555 p->p_nrlwps--;
556 p->p_stats->p_ru.ru_nvcsw++;
557 SCHED_ASSERT_LOCKED();
558 if (l->l_flag & L_SA)
559 sa_switch(l, sau, SA_UPCALL_BLOCKED);
560 else
561 mi_switch(l, NULL);
562
563 #ifdef KERN_SYNCH_BPENDTSLEEP_LABEL
564 /*
565 * XXX
566 * gcc4 optimizer will duplicate this asm statement on some arch
567 * and it will cause a multiple symbol definition error in gas.
568 * the kernel Makefile is setup to use -fno-reorder-blocks if
569 * this option is set.
570 */
571 /* handy breakpoint location after process "wakes" */
572 __asm(".globl bpendtsleep\nbpendtsleep:");
573 #endif
574 /*
575 * p->p_nrlwps is incremented by whoever made us runnable again,
576 * either setrunnable() or awaken().
577 */
578
579 SCHED_ASSERT_UNLOCKED();
580 splx(s);
581
582 resume:
583 KDASSERT(l->l_cpu != NULL);
584 KDASSERT(l->l_cpu == curcpu());
585 l->l_cpu->ci_schedstate.spc_curpriority = l->l_usrpri;
586
587 l->l_flag &= ~L_SINTR;
588 if (l->l_flag & L_TIMEOUT) {
589 l->l_flag &= ~(L_TIMEOUT|L_CANCELLED);
590 if (sig == 0) {
591 #ifdef KTRACE
592 if (KTRPOINT(p, KTR_CSW))
593 ktrcsw(l, 0, 0);
594 #endif
595 if (relock && interlock != NULL)
596 simple_lock(interlock);
597 return (EWOULDBLOCK);
598 }
599 } else if (timo)
600 callout_stop(&l->l_tsleep_ch);
601
602 if (catch) {
603 const int cancelled = l->l_flag & L_CANCELLED;
604 l->l_flag &= ~L_CANCELLED;
605 if (sig != 0 || (sig = CURSIG(l)) != 0 || cancelled) {
606 #ifdef KTRACE
607 if (KTRPOINT(p, KTR_CSW))
608 ktrcsw(l, 0, 0);
609 #endif
610 if (relock && interlock != NULL)
611 simple_lock(interlock);
612 /*
613 * If this sleep was canceled, don't let the syscall
614 * restart.
615 */
616 if (cancelled ||
617 (SIGACTION(p, sig).sa_flags & SA_RESTART) == 0)
618 return (EINTR);
619 return (ERESTART);
620 }
621 }
622
623 #ifdef KTRACE
624 if (KTRPOINT(p, KTR_CSW))
625 ktrcsw(l, 0, 0);
626 #endif
627 if (relock && interlock != NULL)
628 simple_lock(interlock);
629
630 /* XXXNJW this is very much a kluge.
631 * revisit. a better way of preventing looping/hanging syscalls like
632 * wait4() and _lwp_wait() from wedging an exiting process
633 * would be preferred.
634 */
635 if (catch && ((p->p_flag & P_WEXIT) && p->p_nlwps > 1 && exiterr))
636 return (EINTR);
637 return (0);
638 }
639
640 /*
641 * Implement timeout for tsleep.
642 * If process hasn't been awakened (wchan non-zero),
643 * set timeout flag and undo the sleep. If proc
644 * is stopped, just unsleep so it will remain stopped.
645 */
646 void
647 endtsleep(void *arg)
648 {
649 struct lwp *l;
650 int s;
651
652 l = (struct lwp *)arg;
653 SCHED_LOCK(s);
654 if (l->l_wchan) {
655 if (l->l_stat == LSSLEEP)
656 setrunnable(l);
657 else
658 unsleep(l);
659 l->l_flag |= L_TIMEOUT;
660 }
661 SCHED_UNLOCK(s);
662 }
663
664 /*
665 * Remove a process from its wait queue
666 */
667 void
668 unsleep(struct lwp *l)
669 {
670 struct slpque *qp;
671 struct lwp **hp;
672
673 SCHED_ASSERT_LOCKED();
674
675 if (l->l_wchan) {
676 hp = &(qp = SLPQUE(l->l_wchan))->sq_head;
677 while (*hp != l)
678 hp = &(*hp)->l_forw;
679 *hp = l->l_forw;
680 if (qp->sq_tailp == &l->l_forw)
681 qp->sq_tailp = hp;
682 l->l_wchan = 0;
683 }
684 }
685
686 inline void
687 sa_awaken(struct lwp *l)
688 {
689
690 SCHED_ASSERT_LOCKED();
691
692 if (l == l->l_savp->savp_lwp && l->l_flag & L_SA_YIELD)
693 l->l_flag &= ~L_SA_IDLE;
694 }
695
696 /*
697 * Optimized-for-wakeup() version of setrunnable().
698 */
699 inline void
700 awaken(struct lwp *l)
701 {
702
703 SCHED_ASSERT_LOCKED();
704
705 if (l->l_proc->p_sa)
706 sa_awaken(l);
707
708 if (l->l_slptime > 1)
709 updatepri(l);
710 l->l_slptime = 0;
711 l->l_stat = LSRUN;
712 l->l_proc->p_nrlwps++;
713 /*
714 * Since curpriority is a user priority, p->p_priority
715 * is always better than curpriority on the last CPU on
716 * which it ran.
717 *
718 * XXXSMP See affinity comment in resched_proc().
719 */
720 if (l->l_flag & L_INMEM) {
721 setrunqueue(l);
722 KASSERT(l->l_cpu != NULL);
723 need_resched(l->l_cpu);
724 } else
725 sched_wakeup(&proc0);
726 }
727
728 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
729 void
730 sched_unlock_idle(void)
731 {
732
733 simple_unlock(&sched_lock);
734 }
735
736 void
737 sched_lock_idle(void)
738 {
739
740 simple_lock(&sched_lock);
741 }
742 #endif /* MULTIPROCESSOR || LOCKDEBUG */
743
744 /*
745 * Make all processes sleeping on the specified identifier runnable.
746 */
747
748 void
749 wakeup(volatile const void *ident)
750 {
751 int s;
752
753 SCHED_ASSERT_UNLOCKED();
754
755 SCHED_LOCK(s);
756 sched_wakeup(ident);
757 SCHED_UNLOCK(s);
758 }
759
760 void
761 sched_wakeup(volatile const void *ident)
762 {
763 struct slpque *qp;
764 struct lwp *l, **q;
765
766 SCHED_ASSERT_LOCKED();
767
768 qp = SLPQUE(ident);
769 restart:
770 for (q = &qp->sq_head; (l = *q) != NULL; ) {
771 #ifdef DIAGNOSTIC
772 if (l->l_back || (l->l_stat != LSSLEEP &&
773 l->l_stat != LSSTOP && l->l_stat != LSSUSPENDED))
774 panic("wakeup");
775 #endif
776 if (l->l_wchan == ident) {
777 l->l_wchan = 0;
778 *q = l->l_forw;
779 if (qp->sq_tailp == &l->l_forw)
780 qp->sq_tailp = q;
781 if (l->l_stat == LSSLEEP) {
782 awaken(l);
783 goto restart;
784 }
785 } else
786 q = &l->l_forw;
787 }
788 }
789
790 /*
791 * Make the highest priority process first in line on the specified
792 * identifier runnable.
793 */
794 void
795 wakeup_one(volatile const void *ident)
796 {
797 struct slpque *qp;
798 struct lwp *l, **q;
799 struct lwp *best_sleepp, **best_sleepq;
800 struct lwp *best_stopp, **best_stopq;
801 int s;
802
803 best_sleepp = best_stopp = NULL;
804 best_sleepq = best_stopq = NULL;
805
806 SCHED_LOCK(s);
807
808 qp = SLPQUE(ident);
809
810 for (q = &qp->sq_head; (l = *q) != NULL; q = &l->l_forw) {
811 #ifdef DIAGNOSTIC
812 if (l->l_back || (l->l_stat != LSSLEEP &&
813 l->l_stat != LSSTOP && l->l_stat != LSSUSPENDED))
814 panic("wakeup_one");
815 #endif
816 if (l->l_wchan == ident) {
817 if (l->l_stat == LSSLEEP) {
818 if (best_sleepp == NULL ||
819 l->l_priority < best_sleepp->l_priority) {
820 best_sleepp = l;
821 best_sleepq = q;
822 }
823 } else {
824 if (best_stopp == NULL ||
825 l->l_priority < best_stopp->l_priority) {
826 best_stopp = l;
827 best_stopq = q;
828 }
829 }
830 }
831 }
832
833 /*
834 * Consider any SSLEEP process higher than the highest priority SSTOP
835 * process.
836 */
837 if (best_sleepp != NULL) {
838 l = best_sleepp;
839 q = best_sleepq;
840 } else {
841 l = best_stopp;
842 q = best_stopq;
843 }
844
845 if (l != NULL) {
846 l->l_wchan = NULL;
847 *q = l->l_forw;
848 if (qp->sq_tailp == &l->l_forw)
849 qp->sq_tailp = q;
850 if (l->l_stat == LSSLEEP)
851 awaken(l);
852 }
853 SCHED_UNLOCK(s);
854 }
855
856 /*
857 * General yield call. Puts the current process back on its run queue and
858 * performs a voluntary context switch. Should only be called when the
859 * current process explicitly requests it (eg sched_yield(2) in compat code).
860 */
861 void
862 yield(void)
863 {
864 struct lwp *l = curlwp;
865 int s;
866
867 SCHED_LOCK(s);
868 l->l_priority = l->l_usrpri;
869 l->l_stat = LSRUN;
870 setrunqueue(l);
871 l->l_proc->p_stats->p_ru.ru_nvcsw++;
872 mi_switch(l, NULL);
873 SCHED_ASSERT_UNLOCKED();
874 splx(s);
875 }
876
877 /*
878 * General preemption call. Puts the current process back on its run queue
879 * and performs an involuntary context switch.
880 * The 'more' ("more work to do") argument is boolean. Returning to userspace
881 * preempt() calls pass 0. "Voluntary" preemptions in e.g. uiomove() pass 1.
882 * This will be used to indicate to the SA subsystem that the LWP is
883 * not yet finished in the kernel.
884 */
885
886 void
887 preempt(int more)
888 {
889 struct lwp *l = curlwp;
890 int r, s;
891
892 SCHED_LOCK(s);
893 l->l_priority = l->l_usrpri;
894 l->l_stat = LSRUN;
895 setrunqueue(l);
896 l->l_proc->p_stats->p_ru.ru_nivcsw++;
897 r = mi_switch(l, NULL);
898 SCHED_ASSERT_UNLOCKED();
899 splx(s);
900 if ((l->l_flag & L_SA) != 0 && r != 0 && more == 0)
901 sa_preempt(l);
902 }
903
904 /*
905 * The machine independent parts of context switch.
906 * Must be called at splsched() (no higher!) and with
907 * the sched_lock held.
908 * Switch to "new" if non-NULL, otherwise let cpu_switch choose
909 * the next lwp.
910 *
911 * Returns 1 if another process was actually run.
912 */
913 int
914 mi_switch(struct lwp *l, struct lwp *newl)
915 {
916 struct schedstate_percpu *spc;
917 struct rlimit *rlim;
918 long s, u;
919 struct timeval tv;
920 int hold_count;
921 struct proc *p = l->l_proc;
922 int retval;
923
924 SCHED_ASSERT_LOCKED();
925
926 /*
927 * Release the kernel_lock, as we are about to yield the CPU.
928 * The scheduler lock is still held until cpu_switch()
929 * selects a new process and removes it from the run queue.
930 */
931 hold_count = KERNEL_LOCK_RELEASE_ALL();
932
933 KDASSERT(l->l_cpu != NULL);
934 KDASSERT(l->l_cpu == curcpu());
935
936 spc = &l->l_cpu->ci_schedstate;
937
938 #ifdef LOCKDEBUG
939 spinlock_switchcheck();
940 simple_lock_switchcheck();
941 #endif
942
943 /*
944 * Compute the amount of time during which the current
945 * process was running.
946 */
947 microtime(&tv);
948 u = p->p_rtime.tv_usec +
949 (tv.tv_usec - spc->spc_runtime.tv_usec);
950 s = p->p_rtime.tv_sec + (tv.tv_sec - spc->spc_runtime.tv_sec);
951 if (u < 0) {
952 u += 1000000;
953 s--;
954 } else if (u >= 1000000) {
955 u -= 1000000;
956 s++;
957 }
958 p->p_rtime.tv_usec = u;
959 p->p_rtime.tv_sec = s;
960
961 /*
962 * Process is about to yield the CPU; clear the appropriate
963 * scheduling flags.
964 */
965 spc->spc_flags &= ~SPCF_SWITCHCLEAR;
966
967 #ifdef KSTACK_CHECK_MAGIC
968 kstack_check_magic(l);
969 #endif
970
971 /*
972 * If we are using h/w performance counters, save context.
973 */
974 #if PERFCTRS
975 if (PMC_ENABLED(p)) {
976 pmc_save_context(p);
977 }
978 #endif
979
980 /*
981 * Switch to the new current process. When we
982 * run again, we'll return back here.
983 */
984 uvmexp.swtch++;
985 if (newl == NULL) {
986 retval = cpu_switch(l, NULL);
987 } else {
988 remrunqueue(newl);
989 cpu_switchto(l, newl);
990 retval = 0;
991 }
992
993 /*
994 * If we are using h/w performance counters, restore context.
995 */
996 #if PERFCTRS
997 if (PMC_ENABLED(p)) {
998 pmc_restore_context(p);
999 }
1000 #endif
1001
1002 /*
1003 * Make sure that MD code released the scheduler lock before
1004 * resuming us.
1005 */
1006 SCHED_ASSERT_UNLOCKED();
1007
1008 /*
1009 * We're running again; record our new start time. We might
1010 * be running on a new CPU now, so don't use the cache'd
1011 * schedstate_percpu pointer.
1012 */
1013 KDASSERT(l->l_cpu != NULL);
1014 KDASSERT(l->l_cpu == curcpu());
1015 microtime(&l->l_cpu->ci_schedstate.spc_runtime);
1016
1017 /*
1018 * Check if the process exceeds its CPU resource allocation.
1019 * If over max, kill it. In any case, if it has run for more
1020 * than 10 minutes, reduce priority to give others a chance.
1021 */
1022 rlim = &p->p_rlimit[RLIMIT_CPU];
1023 if (s >= rlim->rlim_cur) {
1024 if (s >= rlim->rlim_max) {
1025 psignal(p, SIGKILL);
1026 } else {
1027 psignal(p, SIGXCPU);
1028 if (rlim->rlim_cur < rlim->rlim_max)
1029 rlim->rlim_cur += 5;
1030 }
1031 }
1032 if (autonicetime && s > autonicetime &&
1033 kauth_cred_geteuid(p->p_cred) && p->p_nice == NZERO) {
1034 SCHED_LOCK(s);
1035 p->p_nice = autoniceval + NZERO;
1036 resetpriority(l);
1037 SCHED_UNLOCK(s);
1038 }
1039
1040 /*
1041 * Reacquire the kernel_lock now. We do this after we've
1042 * released the scheduler lock to avoid deadlock, and before
1043 * we reacquire the interlock.
1044 */
1045 KERNEL_LOCK_ACQUIRE_COUNT(hold_count);
1046
1047 return retval;
1048 }
1049
1050 /*
1051 * Initialize the (doubly-linked) run queues
1052 * to be empty.
1053 */
1054 void
1055 rqinit()
1056 {
1057 int i;
1058
1059 for (i = 0; i < RUNQUE_NQS; i++)
1060 sched_qs[i].ph_link = sched_qs[i].ph_rlink =
1061 (struct lwp *)&sched_qs[i];
1062 }
1063
1064 static inline void
1065 resched_proc(struct lwp *l, u_char pri)
1066 {
1067 struct cpu_info *ci;
1068
1069 /*
1070 * XXXSMP
1071 * Since l->l_cpu persists across a context switch,
1072 * this gives us *very weak* processor affinity, in
1073 * that we notify the CPU on which the process last
1074 * ran that it should try to switch.
1075 *
1076 * This does not guarantee that the process will run on
1077 * that processor next, because another processor might
1078 * grab it the next time it performs a context switch.
1079 *
1080 * This also does not handle the case where its last
1081 * CPU is running a higher-priority process, but every
1082 * other CPU is running a lower-priority process. There
1083 * are ways to handle this situation, but they're not
1084 * currently very pretty, and we also need to weigh the
1085 * cost of moving a process from one CPU to another.
1086 *
1087 * XXXSMP
1088 * There is also the issue of locking the other CPU's
1089 * sched state, which we currently do not do.
1090 */
1091 ci = (l->l_cpu != NULL) ? l->l_cpu : curcpu();
1092 if (pri < ci->ci_schedstate.spc_curpriority)
1093 need_resched(ci);
1094 }
1095
1096 /*
1097 * Change process state to be runnable,
1098 * placing it on the run queue if it is in memory,
1099 * and awakening the swapper if it isn't in memory.
1100 */
1101 void
1102 setrunnable(struct lwp *l)
1103 {
1104 struct proc *p = l->l_proc;
1105
1106 SCHED_ASSERT_LOCKED();
1107
1108 switch (l->l_stat) {
1109 case 0:
1110 case LSRUN:
1111 case LSONPROC:
1112 case LSZOMB:
1113 case LSDEAD:
1114 default:
1115 panic("setrunnable: lwp %p state was %d", l, l->l_stat);
1116 case LSSTOP:
1117 /*
1118 * If we're being traced (possibly because someone attached us
1119 * while we were stopped), check for a signal from the debugger.
1120 */
1121 if ((p->p_flag & P_TRACED) != 0 && p->p_xstat != 0) {
1122 sigaddset(&p->p_sigctx.ps_siglist, p->p_xstat);
1123 CHECKSIGS(p);
1124 }
1125 case LSSLEEP:
1126 unsleep(l); /* e.g. when sending signals */
1127 break;
1128
1129 case LSIDL:
1130 break;
1131 case LSSUSPENDED:
1132 break;
1133 }
1134
1135 if (l->l_proc->p_sa)
1136 sa_awaken(l);
1137
1138 l->l_stat = LSRUN;
1139 p->p_nrlwps++;
1140
1141 if (l->l_flag & L_INMEM)
1142 setrunqueue(l);
1143
1144 if (l->l_slptime > 1)
1145 updatepri(l);
1146 l->l_slptime = 0;
1147 if ((l->l_flag & L_INMEM) == 0)
1148 sched_wakeup((caddr_t)&proc0);
1149 else
1150 resched_proc(l, l->l_priority);
1151 }
1152
1153 /*
1154 * Compute the priority of a process when running in user mode.
1155 * Arrange to reschedule if the resulting priority is better
1156 * than that of the current process.
1157 */
1158 void
1159 resetpriority(struct lwp *l)
1160 {
1161 unsigned int newpriority;
1162 struct proc *p = l->l_proc;
1163
1164 SCHED_ASSERT_LOCKED();
1165
1166 newpriority = PUSER + (p->p_estcpu >> ESTCPU_SHIFT) +
1167 NICE_WEIGHT * (p->p_nice - NZERO);
1168 newpriority = min(newpriority, MAXPRI);
1169 l->l_usrpri = newpriority;
1170 resched_proc(l, l->l_usrpri);
1171 }
1172
1173 /*
1174 * Recompute priority for all LWPs in a process.
1175 */
1176 void
1177 resetprocpriority(struct proc *p)
1178 {
1179 struct lwp *l;
1180
1181 LIST_FOREACH(l, &p->p_lwps, l_sibling)
1182 resetpriority(l);
1183 }
1184
1185 /*
1186 * We adjust the priority of the current process. The priority of a process
1187 * gets worse as it accumulates CPU time. The CPU usage estimator (p_estcpu)
1188 * is increased here. The formula for computing priorities (in kern_synch.c)
1189 * will compute a different value each time p_estcpu increases. This can
1190 * cause a switch, but unless the priority crosses a PPQ boundary the actual
1191 * queue will not change. The CPU usage estimator ramps up quite quickly
1192 * when the process is running (linearly), and decays away exponentially, at
1193 * a rate which is proportionally slower when the system is busy. The basic
1194 * principle is that the system will 90% forget that the process used a lot
1195 * of CPU time in 5 * loadav seconds. This causes the system to favor
1196 * processes which haven't run much recently, and to round-robin among other
1197 * processes.
1198 */
1199
1200 void
1201 schedclock(struct lwp *l)
1202 {
1203 struct proc *p = l->l_proc;
1204 int s;
1205
1206 p->p_estcpu = ESTCPULIM(p->p_estcpu + (1 << ESTCPU_SHIFT));
1207 SCHED_LOCK(s);
1208 resetpriority(l);
1209 SCHED_UNLOCK(s);
1210
1211 if (l->l_priority >= PUSER)
1212 l->l_priority = l->l_usrpri;
1213 }
1214
1215 void
1216 suspendsched()
1217 {
1218 struct lwp *l;
1219 int s;
1220
1221 /*
1222 * Convert all non-P_SYSTEM LSSLEEP or LSRUN processes to
1223 * LSSUSPENDED.
1224 */
1225 proclist_lock_read();
1226 SCHED_LOCK(s);
1227 LIST_FOREACH(l, &alllwp, l_list) {
1228 if ((l->l_proc->p_flag & P_SYSTEM) != 0)
1229 continue;
1230
1231 switch (l->l_stat) {
1232 case LSRUN:
1233 l->l_proc->p_nrlwps--;
1234 if ((l->l_flag & L_INMEM) != 0)
1235 remrunqueue(l);
1236 /* FALLTHROUGH */
1237 case LSSLEEP:
1238 l->l_stat = LSSUSPENDED;
1239 break;
1240 case LSONPROC:
1241 /*
1242 * XXX SMP: we need to deal with processes on
1243 * others CPU !
1244 */
1245 break;
1246 default:
1247 break;
1248 }
1249 }
1250 SCHED_UNLOCK(s);
1251 proclist_unlock_read();
1252 }
1253
1254 /*
1255 * scheduler_fork_hook:
1256 *
1257 * Inherit the parent's scheduler history.
1258 */
1259 void
1260 scheduler_fork_hook(struct proc *parent, struct proc *child)
1261 {
1262
1263 child->p_estcpu = child->p_estcpu_inherited = parent->p_estcpu;
1264 child->p_forktime = schedcpu_ticks;
1265 }
1266
1267 /*
1268 * scheduler_wait_hook:
1269 *
1270 * Chargeback parents for the sins of their children.
1271 */
1272 void
1273 scheduler_wait_hook(struct proc *parent, struct proc *child)
1274 {
1275 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
1276 fixpt_t estcpu;
1277
1278 /* XXX Only if parent != init?? */
1279
1280 estcpu = decay_cpu_batch(loadfac, child->p_estcpu_inherited,
1281 schedcpu_ticks - child->p_forktime);
1282 if (child->p_estcpu > estcpu) {
1283 parent->p_estcpu =
1284 ESTCPULIM(parent->p_estcpu + child->p_estcpu - estcpu);
1285 }
1286 }
1287
1288 /*
1289 * Low-level routines to access the run queue. Optimised assembler
1290 * routines can override these.
1291 */
1292
1293 #ifndef __HAVE_MD_RUNQUEUE
1294
1295 /*
1296 * On some architectures, it's faster to use a MSB ordering for the priorites
1297 * than the traditional LSB ordering.
1298 */
1299 #ifdef __HAVE_BIGENDIAN_BITOPS
1300 #define RQMASK(n) (0x80000000 >> (n))
1301 #else
1302 #define RQMASK(n) (0x00000001 << (n))
1303 #endif
1304
1305 /*
1306 * The primitives that manipulate the run queues. whichqs tells which
1307 * of the 32 queues qs have processes in them. Setrunqueue puts processes
1308 * into queues, remrunqueue removes them from queues. The running process is
1309 * on no queue, other processes are on a queue related to p->p_priority,
1310 * divided by 4 actually to shrink the 0-127 range of priorities into the 32
1311 * available queues.
1312 */
1313
1314 #ifdef RQDEBUG
1315 static void
1316 checkrunqueue(int whichq, struct lwp *l)
1317 {
1318 const struct prochd * const rq = &sched_qs[whichq];
1319 struct lwp *l2;
1320 int found = 0;
1321 int die = 0;
1322 int empty = 1;
1323 for (l2 = rq->ph_link; l2 != (const void*) rq; l2 = l2->l_forw) {
1324 if (l2->l_stat != LSRUN) {
1325 printf("checkrunqueue[%d]: lwp %p state (%d) "
1326 " != LSRUN\n", whichq, l2, l2->l_stat);
1327 }
1328 if (l2->l_back->l_forw != l2) {
1329 printf("checkrunqueue[%d]: lwp %p back-qptr (%p) "
1330 "corrupt %p\n", whichq, l2, l2->l_back,
1331 l2->l_back->l_forw);
1332 die = 1;
1333 }
1334 if (l2->l_forw->l_back != l2) {
1335 printf("checkrunqueue[%d]: lwp %p forw-qptr (%p) "
1336 "corrupt %p\n", whichq, l2, l2->l_forw,
1337 l2->l_forw->l_back);
1338 die = 1;
1339 }
1340 if (l2 == l)
1341 found = 1;
1342 empty = 0;
1343 }
1344 if (empty && (sched_whichqs & RQMASK(whichq)) != 0) {
1345 printf("checkrunqueue[%d]: bit set for empty run-queue %p\n",
1346 whichq, rq);
1347 die = 1;
1348 } else if (!empty && (sched_whichqs & RQMASK(whichq)) == 0) {
1349 printf("checkrunqueue[%d]: bit clear for non-empty "
1350 "run-queue %p\n", whichq, rq);
1351 die = 1;
1352 }
1353 if (l != NULL && (sched_whichqs & RQMASK(whichq)) == 0) {
1354 printf("checkrunqueue[%d]: bit clear for active lwp %p\n",
1355 whichq, l);
1356 die = 1;
1357 }
1358 if (l != NULL && empty) {
1359 printf("checkrunqueue[%d]: empty run-queue %p with "
1360 "active lwp %p\n", whichq, rq, l);
1361 die = 1;
1362 }
1363 if (l != NULL && !found) {
1364 printf("checkrunqueue[%d]: lwp %p not in runqueue %p!",
1365 whichq, l, rq);
1366 die = 1;
1367 }
1368 if (die)
1369 panic("checkrunqueue: inconsistency found");
1370 }
1371 #endif /* RQDEBUG */
1372
1373 void
1374 setrunqueue(struct lwp *l)
1375 {
1376 struct prochd *rq;
1377 struct lwp *prev;
1378 const int whichq = l->l_priority / PPQ;
1379
1380 #ifdef RQDEBUG
1381 checkrunqueue(whichq, NULL);
1382 #endif
1383 #ifdef DIAGNOSTIC
1384 if (l->l_back != NULL || l->l_wchan != NULL || l->l_stat != LSRUN)
1385 panic("setrunqueue");
1386 #endif
1387 sched_whichqs |= RQMASK(whichq);
1388 rq = &sched_qs[whichq];
1389 prev = rq->ph_rlink;
1390 l->l_forw = (struct lwp *)rq;
1391 rq->ph_rlink = l;
1392 prev->l_forw = l;
1393 l->l_back = prev;
1394 #ifdef RQDEBUG
1395 checkrunqueue(whichq, l);
1396 #endif
1397 }
1398
1399 void
1400 remrunqueue(struct lwp *l)
1401 {
1402 struct lwp *prev, *next;
1403 const int whichq = l->l_priority / PPQ;
1404 #ifdef RQDEBUG
1405 checkrunqueue(whichq, l);
1406 #endif
1407 #ifdef DIAGNOSTIC
1408 if (((sched_whichqs & RQMASK(whichq)) == 0))
1409 panic("remrunqueue: bit %d not set", whichq);
1410 #endif
1411 prev = l->l_back;
1412 l->l_back = NULL;
1413 next = l->l_forw;
1414 prev->l_forw = next;
1415 next->l_back = prev;
1416 if (prev == next)
1417 sched_whichqs &= ~RQMASK(whichq);
1418 #ifdef RQDEBUG
1419 checkrunqueue(whichq, NULL);
1420 #endif
1421 }
1422
1423 #undef RQMASK
1424 #endif /* !defined(__HAVE_MD_RUNQUEUE) */
1425