kern_synch.c revision 1.101.2.17 1 /* $NetBSD: kern_synch.c,v 1.101.2.17 2002/08/01 02:46:21 nathanw Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*-
41 * Copyright (c) 1982, 1986, 1990, 1991, 1993
42 * The Regents of the University of California. All rights reserved.
43 * (c) UNIX System Laboratories, Inc.
44 * All or some portions of this file are derived from material licensed
45 * to the University of California by American Telephone and Telegraph
46 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
47 * the permission of UNIX System Laboratories, Inc.
48 *
49 * Redistribution and use in source and binary forms, with or without
50 * modification, are permitted provided that the following conditions
51 * are met:
52 * 1. Redistributions of source code must retain the above copyright
53 * notice, this list of conditions and the following disclaimer.
54 * 2. Redistributions in binary form must reproduce the above copyright
55 * notice, this list of conditions and the following disclaimer in the
56 * documentation and/or other materials provided with the distribution.
57 * 3. All advertising materials mentioning features or use of this software
58 * must display the following acknowledgement:
59 * This product includes software developed by the University of
60 * California, Berkeley and its contributors.
61 * 4. Neither the name of the University nor the names of its contributors
62 * may be used to endorse or promote products derived from this software
63 * without specific prior written permission.
64 *
65 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
66 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
67 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
68 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
69 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
70 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
71 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
72 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
73 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
74 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
75 * SUCH DAMAGE.
76 *
77 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
78 */
79
80 #include <sys/cdefs.h>
81 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.101.2.17 2002/08/01 02:46:21 nathanw Exp $");
82
83 #include "opt_ddb.h"
84 #include "opt_ktrace.h"
85 #include "opt_kstack.h"
86 #include "opt_lockdebug.h"
87 #include "opt_multiprocessor.h"
88
89 #include <sys/param.h>
90 #include <sys/systm.h>
91 #include <sys/callout.h>
92 #include <sys/proc.h>
93 #include <sys/kernel.h>
94 #include <sys/buf.h>
95 #include <sys/signalvar.h>
96 #include <sys/resourcevar.h>
97 #include <sys/sched.h>
98 #include <sys/sa.h>
99 #include <sys/savar.h>
100
101 #include <uvm/uvm_extern.h>
102
103 #ifdef KTRACE
104 #include <sys/ktrace.h>
105 #endif
106
107 #include <machine/cpu.h>
108
109 int lbolt; /* once a second sleep address */
110 int rrticks; /* number of hardclock ticks per roundrobin() */
111
112 /*
113 * The global scheduler state.
114 */
115 struct prochd sched_qs[RUNQUE_NQS]; /* run queues */
116 __volatile u_int32_t sched_whichqs; /* bitmap of non-empty queues */
117 struct slpque sched_slpque[SLPQUE_TABLESIZE]; /* sleep queues */
118
119 struct simplelock sched_lock = SIMPLELOCK_INITIALIZER;
120
121 void schedcpu(void *);
122 void updatepri(struct lwp *);
123 void endtsleep(void *);
124
125 __inline void awaken(struct lwp *);
126
127 struct callout schedcpu_ch = CALLOUT_INITIALIZER;
128
129
130
131 /*
132 * Force switch among equal priority processes every 100ms.
133 * Called from hardclock every hz/10 == rrticks hardclock ticks.
134 */
135 /* ARGSUSED */
136 void
137 roundrobin(struct cpu_info *ci)
138 {
139 struct schedstate_percpu *spc = &ci->ci_schedstate;
140
141 spc->spc_rrticks = rrticks;
142
143 if (curlwp != NULL) {
144 if (spc->spc_flags & SPCF_SEENRR) {
145 /*
146 * The process has already been through a roundrobin
147 * without switching and may be hogging the CPU.
148 * Indicate that the process should yield.
149 */
150 spc->spc_flags |= SPCF_SHOULDYIELD;
151 } else
152 spc->spc_flags |= SPCF_SEENRR;
153 }
154 need_resched(curcpu());
155 }
156
157 /*
158 * Constants for digital decay and forget:
159 * 90% of (p_estcpu) usage in 5 * loadav time
160 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive)
161 * Note that, as ps(1) mentions, this can let percentages
162 * total over 100% (I've seen 137.9% for 3 processes).
163 *
164 * Note that hardclock updates p_estcpu and p_cpticks independently.
165 *
166 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
167 * That is, the system wants to compute a value of decay such
168 * that the following for loop:
169 * for (i = 0; i < (5 * loadavg); i++)
170 * p_estcpu *= decay;
171 * will compute
172 * p_estcpu *= 0.1;
173 * for all values of loadavg:
174 *
175 * Mathematically this loop can be expressed by saying:
176 * decay ** (5 * loadavg) ~= .1
177 *
178 * The system computes decay as:
179 * decay = (2 * loadavg) / (2 * loadavg + 1)
180 *
181 * We wish to prove that the system's computation of decay
182 * will always fulfill the equation:
183 * decay ** (5 * loadavg) ~= .1
184 *
185 * If we compute b as:
186 * b = 2 * loadavg
187 * then
188 * decay = b / (b + 1)
189 *
190 * We now need to prove two things:
191 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
192 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
193 *
194 * Facts:
195 * For x close to zero, exp(x) =~ 1 + x, since
196 * exp(x) = 0! + x**1/1! + x**2/2! + ... .
197 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
198 * For x close to zero, ln(1+x) =~ x, since
199 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1
200 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
201 * ln(.1) =~ -2.30
202 *
203 * Proof of (1):
204 * Solve (factor)**(power) =~ .1 given power (5*loadav):
205 * solving for factor,
206 * ln(factor) =~ (-2.30/5*loadav), or
207 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
208 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED
209 *
210 * Proof of (2):
211 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
212 * solving for power,
213 * power*ln(b/(b+1)) =~ -2.30, or
214 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED
215 *
216 * Actual power values for the implemented algorithm are as follows:
217 * loadav: 1 2 3 4
218 * power: 5.68 10.32 14.94 19.55
219 */
220
221 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
222 #define loadfactor(loadav) (2 * (loadav))
223 #define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE))
224
225 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
226 fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
227
228 /*
229 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
230 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
231 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
232 *
233 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
234 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
235 *
236 * If you dont want to bother with the faster/more-accurate formula, you
237 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
238 * (more general) method of calculating the %age of CPU used by a process.
239 */
240 #define CCPU_SHIFT 11
241
242 /*
243 * Recompute process priorities, every hz ticks.
244 */
245 /* ARGSUSED */
246 void
247 schedcpu(void *arg)
248 {
249 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
250 struct lwp *l;
251 struct proc *p;
252 int s, s1;
253 unsigned int newcpu;
254 int clkhz;
255
256 proclist_lock_read();
257 for (l = LIST_FIRST(&alllwp); l != NULL; l = LIST_NEXT(l,l_list)) {
258 /*
259 * Increment time in/out of memory and sleep time
260 * (if sleeping). We ignore overflow; with 16-bit int's
261 * (remember them?) overflow takes 45 days.
262 */
263 p = l->l_proc;
264 l->l_swtime++;
265 if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
266 l->l_stat == LSSUSPENDED)
267 l->l_slptime++;
268 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
269 /*
270 * If the process has slept the entire second,
271 * stop recalculating its priority until it wakes up.
272 */
273 if (l->l_slptime > 1)
274 continue;
275 s = splstatclock(); /* prevent state changes */
276 /*
277 * p_pctcpu is only for ps.
278 */
279 clkhz = stathz != 0 ? stathz : hz;
280 #if (FSHIFT >= CCPU_SHIFT)
281 p->p_pctcpu += (clkhz == 100)?
282 ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
283 100 * (((fixpt_t) p->p_cpticks)
284 << (FSHIFT - CCPU_SHIFT)) / clkhz;
285 #else
286 l->l_pctcpu += ((FSCALE - ccpu) *
287 (p->p_cpticks * FSCALE / clkhz)) >> FSHIFT;
288 #endif
289 p->p_cpticks = 0;
290 newcpu = (u_int)decay_cpu(loadfac, p->p_estcpu);
291 p->p_estcpu = newcpu;
292 SCHED_LOCK(s1);
293 resetpriority(l);
294 if (l->l_priority >= PUSER) {
295 if (l->l_stat == LSRUN &&
296 (l->l_flag & L_INMEM) &&
297 (l->l_priority / PPQ) != (l->l_usrpri / PPQ)) {
298 remrunqueue(l);
299 l->l_priority = l->l_usrpri;
300 setrunqueue(l);
301 } else
302 l->l_priority = l->l_usrpri;
303 }
304 SCHED_UNLOCK(s1);
305 splx(s);
306 }
307 proclist_unlock_read();
308 uvm_meter();
309 wakeup((caddr_t)&lbolt);
310 callout_reset(&schedcpu_ch, hz, schedcpu, NULL);
311 }
312
313 /*
314 * Recalculate the priority of a process after it has slept for a while.
315 * For all load averages >= 1 and max p_estcpu of 255, sleeping for at
316 * least six times the loadfactor will decay p_estcpu to zero.
317 */
318 void
319 updatepri(struct lwp *l)
320 {
321 struct proc *p = l->l_proc;
322 unsigned int newcpu;
323 fixpt_t loadfac;
324
325 SCHED_ASSERT_LOCKED();
326
327 newcpu = p->p_estcpu;
328 loadfac = loadfactor(averunnable.ldavg[0]);
329
330 if (l->l_slptime > 5 * loadfac)
331 p->p_estcpu = 0; /* XXX NJWLWP */
332 else {
333 l->l_slptime--; /* the first time was done in schedcpu */
334 while (newcpu && --l->l_slptime)
335 newcpu = (int) decay_cpu(loadfac, newcpu);
336 p->p_estcpu = newcpu;
337 }
338 resetpriority(l);
339 }
340
341 /*
342 * During autoconfiguration or after a panic, a sleep will simply
343 * lower the priority briefly to allow interrupts, then return.
344 * The priority to be used (safepri) is machine-dependent, thus this
345 * value is initialized and maintained in the machine-dependent layers.
346 * This priority will typically be 0, or the lowest priority
347 * that is safe for use on the interrupt stack; it can be made
348 * higher to block network software interrupts after panics.
349 */
350 int safepri;
351
352 /*
353 * General sleep call. Suspends the current process until a wakeup is
354 * performed on the specified identifier. The process will then be made
355 * runnable with the specified priority. Sleeps at most timo/hz seconds
356 * (0 means no timeout). If pri includes PCATCH flag, signals are checked
357 * before and after sleeping, else signals are not checked. Returns 0 if
358 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
359 * signal needs to be delivered, ERESTART is returned if the current system
360 * call should be restarted if possible, and EINTR is returned if the system
361 * call should be interrupted by the signal (return EINTR).
362 *
363 * The interlock is held until the scheduler_slock is acquired. The
364 * interlock will be locked before returning back to the caller
365 * unless the PNORELOCK flag is specified, in which case the
366 * interlock will always be unlocked upon return.
367 */
368 int
369 ltsleep(void *ident, int priority, const char *wmesg, int timo,
370 __volatile struct simplelock *interlock)
371 {
372 struct lwp *l = curlwp;
373 struct proc *p = l->l_proc;
374 struct slpque *qp;
375 int sig, s;
376 int catch = priority & PCATCH;
377 int relock = (priority & PNORELOCK) == 0;
378 int exiterr = (priority & PNOEXITERR) == 0;
379
380 /*
381 * XXXSMP
382 * This is probably bogus. Figure out what the right
383 * thing to do here really is.
384 * Note that not sleeping if ltsleep is called with curlwp == NULL
385 * in the shutdown case is disgusting but partly necessary given
386 * how shutdown (barely) works.
387 */
388 if (cold || (doing_shutdown && (panicstr || (l == NULL)))) {
389 /*
390 * After a panic, or during autoconfiguration,
391 * just give interrupts a chance, then just return;
392 * don't run any other procs or panic below,
393 * in case this is the idle process and already asleep.
394 */
395 s = splhigh();
396 splx(safepri);
397 splx(s);
398 if (interlock != NULL && relock == 0)
399 simple_unlock(interlock);
400 return (0);
401 }
402
403 KASSERT(p != NULL);
404 LOCK_ASSERT(interlock == NULL || simple_lock_held(interlock));
405
406 #ifdef KTRACE
407 if (KTRPOINT(p, KTR_CSW))
408 ktrcsw(p, 1, 0);
409 #endif
410
411 SCHED_LOCK(s);
412
413 #ifdef DIAGNOSTIC
414 if (ident == NULL)
415 panic("ltsleep: ident == NULL");
416 if (l->l_stat != LSONPROC)
417 panic("ltsleep: l_stat %d != LSONPROC", l->l_stat);
418 if (l->l_back != NULL)
419 panic("ltsleep: p_back != NULL");
420 #endif
421
422 l->l_wchan = ident;
423 l->l_wmesg = wmesg;
424 l->l_slptime = 0;
425 l->l_priority = priority & PRIMASK;
426
427 qp = SLPQUE(ident);
428 if (qp->sq_head == 0)
429 qp->sq_head = l;
430 else {
431 *qp->sq_tailp = l;
432 }
433 *(qp->sq_tailp = &l->l_forw) = 0;
434
435 if (timo)
436 callout_reset(&l->l_tsleep_ch, timo, endtsleep, l);
437
438 /*
439 * We can now release the interlock; the scheduler_slock
440 * is held, so a thread can't get in to do wakeup() before
441 * we do the switch.
442 *
443 * XXX We leave the code block here, after inserting ourselves
444 * on the sleep queue, because we might want a more clever
445 * data structure for the sleep queues at some point.
446 */
447 if (interlock != NULL)
448 simple_unlock(interlock);
449
450 /*
451 * We put ourselves on the sleep queue and start our timeout
452 * before calling CURSIG, as we could stop there, and a wakeup
453 * or a SIGCONT (or both) could occur while we were stopped.
454 * A SIGCONT would cause us to be marked as SSLEEP
455 * without resuming us, thus we must be ready for sleep
456 * when CURSIG is called. If the wakeup happens while we're
457 * stopped, p->p_wchan will be 0 upon return from CURSIG.
458 */
459 if (catch) {
460 l->l_flag |= L_SINTR;
461 if ((sig = CURSIG(l)) != 0) {
462 if (l->l_wchan != NULL)
463 unsleep(l);
464 l->l_stat = LSONPROC;
465 SCHED_UNLOCK(s);
466 goto resume;
467 }
468 if (l->l_wchan == NULL) {
469 catch = 0;
470 SCHED_UNLOCK(s);
471 goto resume;
472 }
473 } else
474 sig = 0;
475 l->l_stat = LSSLEEP;
476 p->p_nrlwps--;
477 p->p_stats->p_ru.ru_nvcsw++;
478 SCHED_ASSERT_LOCKED();
479 if (l->l_flag & L_SA)
480 sa_switch(l, SA_UPCALL_BLOCKED);
481 else
482 mi_switch(l, NULL);
483
484 #if defined(DDB) && !defined(GPROF)
485 /* handy breakpoint location after process "wakes" */
486 __asm(".globl bpendtsleep ; bpendtsleep:");
487 #endif
488 /*
489 * p->p_nrlwps is incremented by whoever made us runnable again,
490 * either setrunnable() or awaken().
491 */
492
493 SCHED_ASSERT_UNLOCKED();
494 splx(s);
495
496 resume:
497 KDASSERT(l->l_cpu != NULL);
498 KDASSERT(l->l_cpu == curcpu());
499 l->l_cpu->ci_schedstate.spc_curpriority = l->l_usrpri;
500
501 l->l_flag &= ~L_SINTR;
502 if (l->l_flag & L_TIMEOUT) {
503 l->l_flag &= ~L_TIMEOUT;
504 if (sig == 0) {
505 #ifdef KTRACE
506 if (KTRPOINT(p, KTR_CSW))
507 ktrcsw(p, 0, 0);
508 #endif
509 if (relock && interlock != NULL)
510 simple_lock(interlock);
511 return (EWOULDBLOCK);
512 }
513 } else if (timo)
514 callout_stop(&l->l_tsleep_ch);
515 if (catch && (sig != 0 || (sig = CURSIG(l)) != 0)) {
516 #ifdef KTRACE
517 if (KTRPOINT(p, KTR_CSW))
518 ktrcsw(p, 0, 0);
519 #endif
520 if (relock && interlock != NULL)
521 simple_lock(interlock);
522 if ((SIGACTION(p, sig).sa_flags & SA_RESTART) == 0)
523 return (EINTR);
524 return (ERESTART);
525 }
526 /* XXXNJW this is very much a kluge.
527 * revisit. a better way of preventing looping/hanging syscalls like
528 * wait4() and _lwp_wait() from wedging an exiting process
529 * would be preferred.
530 */
531 if (catch && ((p->p_flag & P_WEXIT) && exiterr))
532 return (EINTR);
533 #ifdef KTRACE
534 if (KTRPOINT(p, KTR_CSW))
535 ktrcsw(p, 0, 0);
536 #endif
537 if (relock && interlock != NULL)
538 simple_lock(interlock);
539 return (0);
540 }
541
542 /*
543 * Implement timeout for tsleep.
544 * If process hasn't been awakened (wchan non-zero),
545 * set timeout flag and undo the sleep. If proc
546 * is stopped, just unsleep so it will remain stopped.
547 */
548 void
549 endtsleep(void *arg)
550 {
551 struct lwp *l;
552 int s;
553
554 l = (struct lwp *)arg;
555 SCHED_LOCK(s);
556 if (l->l_wchan) {
557 if (l->l_stat == LSSLEEP)
558 setrunnable(l);
559 else
560 unsleep(l);
561 l->l_flag |= L_TIMEOUT;
562 }
563 SCHED_UNLOCK(s);
564 }
565
566 /*
567 * Remove a process from its wait queue
568 */
569 void
570 unsleep(struct lwp *l)
571 {
572 struct slpque *qp;
573 struct lwp **hp;
574
575 SCHED_ASSERT_LOCKED();
576
577 if (l->l_wchan) {
578 hp = &(qp = SLPQUE(l->l_wchan))->sq_head;
579 while (*hp != l)
580 hp = &(*hp)->l_forw;
581 *hp = l->l_forw;
582 if (qp->sq_tailp == &l->l_forw)
583 qp->sq_tailp = hp;
584 l->l_wchan = 0;
585 }
586 }
587
588 /*
589 * Optimized-for-wakeup() version of setrunnable().
590 */
591 __inline void
592 awaken(struct lwp *l)
593 {
594
595 SCHED_ASSERT_LOCKED();
596
597 if (l->l_slptime > 1)
598 updatepri(l);
599 l->l_slptime = 0;
600 l->l_stat = LSRUN;
601 l->l_proc->p_nrlwps++;
602 /*
603 * Since curpriority is a user priority, p->p_priority
604 * is always better than curpriority.
605 */
606 if (l->l_flag & L_INMEM) {
607 setrunqueue(l);
608 KASSERT(l->l_cpu != NULL);
609 need_resched(l->l_cpu);
610 } else
611 sched_wakeup(&proc0);
612 }
613
614 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
615 void
616 sched_unlock_idle(void)
617 {
618
619 simple_unlock(&sched_lock);
620 }
621
622 void
623 sched_lock_idle(void)
624 {
625
626 simple_lock(&sched_lock);
627 }
628 #endif /* MULTIPROCESSOR || LOCKDEBUG */
629
630 /*
631 * Make all processes sleeping on the specified identifier runnable.
632 */
633
634 void
635 wakeup(void *ident)
636 {
637 int s;
638
639 SCHED_ASSERT_UNLOCKED();
640
641 SCHED_LOCK(s);
642 sched_wakeup(ident);
643 SCHED_UNLOCK(s);
644 }
645
646 void
647 sched_wakeup(void *ident)
648 {
649 struct slpque *qp;
650 struct lwp *l, **q;
651
652 SCHED_ASSERT_LOCKED();
653
654 qp = SLPQUE(ident);
655 restart:
656 for (q = &qp->sq_head; (l = *q) != NULL; ) {
657 #ifdef DIAGNOSTIC
658 if (l->l_back || (l->l_stat != LSSLEEP &&
659 l->l_stat != LSSTOP && l->l_stat != LSSUSPENDED))
660 panic("wakeup");
661 #endif
662 if (l->l_wchan == ident) {
663 l->l_wchan = 0;
664 *q = l->l_forw;
665 if (qp->sq_tailp == &l->l_forw)
666 qp->sq_tailp = q;
667 if (l->l_stat == LSSLEEP) {
668 awaken(l);
669 goto restart;
670 }
671 } else
672 q = &l->l_forw;
673 }
674 }
675
676 /*
677 * Make the highest priority process first in line on the specified
678 * identifier runnable.
679 */
680 void
681 wakeup_one(void *ident)
682 {
683 struct slpque *qp;
684 struct lwp *l, **q;
685 struct lwp *best_sleepp, **best_sleepq;
686 struct lwp *best_stopp, **best_stopq;
687 int s;
688
689 best_sleepp = best_stopp = NULL;
690 best_sleepq = best_stopq = NULL;
691
692 SCHED_LOCK(s);
693
694 qp = SLPQUE(ident);
695
696 for (q = &qp->sq_head; (l = *q) != NULL; q = &l->l_forw) {
697 #ifdef DIAGNOSTIC
698 if (l->l_back || (l->l_stat != LSSLEEP &&
699 l->l_stat != LSSTOP && l->l_stat != LSSUSPENDED))
700 panic("wakeup_one");
701 #endif
702 if (l->l_wchan == ident) {
703 if (l->l_stat == LSSLEEP) {
704 if (best_sleepp == NULL ||
705 l->l_priority < best_sleepp->l_priority) {
706 best_sleepp = l;
707 best_sleepq = q;
708 }
709 } else {
710 if (best_stopp == NULL ||
711 l->l_priority < best_stopp->l_priority) {
712 best_stopp = l;
713 best_stopq = q;
714 }
715 }
716 }
717 }
718
719 /*
720 * Consider any SSLEEP process higher than the highest priority SSTOP
721 * process.
722 */
723 if (best_sleepp != NULL) {
724 l = best_sleepp;
725 q = best_sleepq;
726 } else {
727 l = best_stopp;
728 q = best_stopq;
729 }
730
731 if (l != NULL) {
732 l->l_wchan = NULL;
733 *q = l->l_forw;
734 if (qp->sq_tailp == &l->l_forw)
735 qp->sq_tailp = q;
736 if (l->l_stat == LSSLEEP)
737 awaken(l);
738 }
739 SCHED_UNLOCK(s);
740 }
741
742 /*
743 * General yield call. Puts the current process back on its run queue and
744 * performs a voluntary context switch.
745 */
746 void
747 yield(void)
748 {
749 struct lwp *l = curlwp;
750 int s;
751
752 SCHED_LOCK(s);
753 l->l_priority = l->l_usrpri;
754 l->l_stat = LSRUN;
755 setrunqueue(l);
756 l->l_proc->p_stats->p_ru.ru_nvcsw++;
757 mi_switch(l, NULL);
758 SCHED_ASSERT_UNLOCKED();
759 splx(s);
760 }
761
762 /*
763 * General preemption call. Puts the current process back on its run queue
764 * and performs an involuntary context switch. If a process is supplied,
765 * we switch to that process. Otherwise, we use the normal process selection
766 * criteria.
767 */
768
769 void
770 preempt(struct lwp *newl)
771 {
772 struct lwp *l = curlwp;
773 int r, s;
774
775 if (l->l_flag & L_SA) {
776 SCHED_LOCK(s);
777 l->l_priority = l->l_usrpri;
778 l->l_stat = LSRUN;
779 setrunqueue(l);
780 l->l_proc->p_stats->p_ru.ru_nivcsw++;
781 r = mi_switch(l, newl);
782 SCHED_ASSERT_UNLOCKED();
783 splx(s);
784 if (r != 0)
785 sa_preempt(l);
786 } else {
787 SCHED_LOCK(s);
788 l->l_priority = l->l_usrpri;
789 l->l_stat = LSRUN;
790 setrunqueue(l);
791 l->l_proc->p_stats->p_ru.ru_nivcsw++;
792 mi_switch(l, newl);
793 SCHED_ASSERT_UNLOCKED();
794 splx(s);
795 }
796
797 }
798
799 /*
800 * The machine independent parts of context switch.
801 * Must be called at splsched() (no higher!) and with
802 * the sched_lock held.
803 * Switch to "new" if non-NULL, otherwise let cpu_switch choose
804 * the next lwp.
805 *
806 * Returns 1 if another process was actually run.
807 */
808 int
809 mi_switch(struct lwp *l, struct lwp *new)
810 {
811 struct schedstate_percpu *spc;
812 struct rlimit *rlim;
813 long s, u;
814 struct timeval tv;
815 #if defined(MULTIPROCESSOR)
816 int hold_count;
817 #endif
818 struct proc *p = l->l_proc;
819 int retval;
820
821 SCHED_ASSERT_LOCKED();
822
823 #if defined(MULTIPROCESSOR)
824 /*
825 * Release the kernel_lock, as we are about to yield the CPU.
826 * The scheduler lock is still held until cpu_switch()
827 * selects a new process and removes it from the run queue.
828 */
829 if (p->p_flag & P_BIGLOCK)
830 hold_count = spinlock_release_all(&kernel_lock);
831 #endif
832
833 KDASSERT(l->l_cpu != NULL);
834 KDASSERT(l->l_cpu == curcpu());
835
836 spc = &l->l_cpu->ci_schedstate;
837
838 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC)
839 spinlock_switchcheck();
840 #endif
841 #ifdef LOCKDEBUG
842 simple_lock_switchcheck();
843 #endif
844
845 /*
846 * Compute the amount of time during which the current
847 * process was running, and add that to its total so far.
848 */
849 microtime(&tv);
850 u = p->p_rtime.tv_usec +
851 (tv.tv_usec - spc->spc_runtime.tv_usec);
852 s = p->p_rtime.tv_sec + (tv.tv_sec - spc->spc_runtime.tv_sec);
853 if (u < 0) {
854 u += 1000000;
855 s--;
856 } else if (u >= 1000000) {
857 u -= 1000000;
858 s++;
859 }
860 p->p_rtime.tv_usec = u;
861 p->p_rtime.tv_sec = s;
862
863 /*
864 * Check if the process exceeds its cpu resource allocation.
865 * If over max, kill it. In any case, if it has run for more
866 * than 10 minutes, reduce priority to give others a chance.
867 */
868 rlim = &p->p_rlimit[RLIMIT_CPU];
869 if (s >= rlim->rlim_cur) {
870 /*
871 * XXXSMP: we're inside the scheduler lock perimeter;
872 * use sched_psignal.
873 */
874 if (s >= rlim->rlim_max)
875 sched_psignal(p, SIGKILL);
876 else {
877 sched_psignal(p, SIGXCPU);
878 if (rlim->rlim_cur < rlim->rlim_max)
879 rlim->rlim_cur += 5;
880 }
881 }
882 if (autonicetime && s > autonicetime && p->p_ucred->cr_uid &&
883 p->p_nice == NZERO) {
884 p->p_nice = autoniceval + NZERO;
885 resetpriority(l);
886 }
887
888 /*
889 * Process is about to yield the CPU; clear the appropriate
890 * scheduling flags.
891 */
892 spc->spc_flags &= ~SPCF_SWITCHCLEAR;
893
894 #ifdef KSTACK_CHECK_MAGIC
895 kstack_check_magic(p);
896 #endif
897
898 /*
899 * Pick a new current process and switch to it. When we
900 * run again, we'll return back here.
901 */
902 uvmexp.swtch++;
903 if (new == NULL) {
904 retval = cpu_switch(l);
905 } else {
906 cpu_preempt(l, new);
907 retval = 0;
908 }
909
910 /*
911 * Make sure that MD code released the scheduler lock before
912 * resuming us.
913 */
914 SCHED_ASSERT_UNLOCKED();
915
916 /*
917 * We're running again; record our new start time. We might
918 * be running on a new CPU now, so don't use the cache'd
919 * schedstate_percpu pointer.
920 */
921 KDASSERT(l->l_cpu != NULL);
922 KDASSERT(l->l_cpu == curcpu());
923 microtime(&l->l_cpu->ci_schedstate.spc_runtime);
924
925 #if defined(MULTIPROCESSOR)
926 /*
927 * Reacquire the kernel_lock now. We do this after we've
928 * released the scheduler lock to avoid deadlock, and before
929 * we reacquire the interlock.
930 */
931 if (p->p_flag & P_BIGLOCK)
932 spinlock_acquire_count(&kernel_lock, hold_count);
933 #endif
934
935 return retval;
936 }
937
938 /*
939 * Initialize the (doubly-linked) run queues
940 * to be empty.
941 */
942 void
943 rqinit()
944 {
945 int i;
946
947 for (i = 0; i < RUNQUE_NQS; i++)
948 sched_qs[i].ph_link = sched_qs[i].ph_rlink =
949 (struct lwp *)&sched_qs[i];
950 }
951
952 /*
953 * Change process state to be runnable,
954 * placing it on the run queue if it is in memory,
955 * and awakening the swapper if it isn't in memory.
956 */
957 void
958 setrunnable(struct lwp *l)
959 {
960 struct proc *p = l->l_proc;
961
962 SCHED_ASSERT_LOCKED();
963
964 switch (l->l_stat) {
965 case 0:
966 case LSRUN:
967 case LSONPROC:
968 case LSZOMB:
969 case LSDEAD:
970 default:
971 panic("setrunnable");
972 case LSSTOP:
973 /*
974 * If we're being traced (possibly because someone attached us
975 * while we were stopped), check for a signal from the debugger.
976 */
977 if ((p->p_flag & P_TRACED) != 0 && p->p_xstat != 0) {
978 sigaddset(&p->p_sigctx.ps_siglist, p->p_xstat);
979 CHECKSIGS(p);
980 }
981 case LSSLEEP:
982 unsleep(l); /* e.g. when sending signals */
983 break;
984
985 case LSIDL:
986 break;
987 case LSSUSPENDED:
988 break;
989 }
990 l->l_stat = LSRUN;
991 p->p_nrlwps++;
992
993 if (l->l_flag & L_INMEM)
994 setrunqueue(l);
995
996 if (l->l_slptime > 1)
997 updatepri(l);
998 l->l_slptime = 0;
999 if ((l->l_flag & L_INMEM) == 0)
1000 wakeup((caddr_t)&proc0);
1001 else if (l->l_priority < curcpu()->ci_schedstate.spc_curpriority) {
1002 /*
1003 * XXXSMP
1004 * This is not exactly right. Since p->p_cpu persists
1005 * across a context switch, this gives us some sort
1006 * of processor affinity. But we need to figure out
1007 * at what point it's better to reschedule on a different
1008 * CPU than the last one.
1009 */
1010 need_resched((l->l_cpu != NULL) ? l->l_cpu : curcpu());
1011 }
1012 }
1013
1014 /*
1015 * Compute the priority of a process when running in user mode.
1016 * Arrange to reschedule if the resulting priority is better
1017 * than that of the current process.
1018 */
1019 void
1020 resetpriority(struct lwp *l)
1021 {
1022 unsigned int newpriority;
1023 struct proc *p = l->l_proc;
1024
1025 SCHED_ASSERT_LOCKED();
1026
1027 newpriority = PUSER + p->p_estcpu +
1028 NICE_WEIGHT * (p->p_nice - NZERO);
1029 newpriority = min(newpriority, MAXPRI);
1030 l->l_usrpri = newpriority;
1031 if (newpriority < curcpu()->ci_schedstate.spc_curpriority) {
1032 /*
1033 * XXXSMP
1034 * Same applies as in setrunnable() above.
1035 */
1036 need_resched((l->l_cpu != NULL) ? l->l_cpu : curcpu());
1037 }
1038 }
1039
1040 /*
1041 * Recompute priority for all LWPs in a process.
1042 */
1043 void
1044 resetprocpriority(struct proc *p)
1045 {
1046 struct lwp *l;
1047
1048 LIST_FOREACH(l, &p->p_lwps, l_list)
1049 resetpriority(l);
1050 }
1051
1052 /*
1053 * We adjust the priority of the current process. The priority of a process
1054 * gets worse as it accumulates CPU time. The cpu usage estimator (p_estcpu)
1055 * is increased here. The formula for computing priorities (in kern_synch.c)
1056 * will compute a different value each time p_estcpu increases. This can
1057 * cause a switch, but unless the priority crosses a PPQ boundary the actual
1058 * queue will not change. The cpu usage estimator ramps up quite quickly
1059 * when the process is running (linearly), and decays away exponentially, at
1060 * a rate which is proportionally slower when the system is busy. The basic
1061 * principle is that the system will 90% forget that the process used a lot
1062 * of CPU time in 5 * loadav seconds. This causes the system to favor
1063 * processes which haven't run much recently, and to round-robin among other
1064 * processes.
1065 */
1066
1067 void
1068 schedclock(struct lwp *l)
1069 {
1070 struct proc *p = l->l_proc;
1071 int s;
1072
1073 p->p_estcpu = ESTCPULIM(p->p_estcpu + 1);
1074 SCHED_LOCK(s);
1075 resetpriority(l);
1076 SCHED_UNLOCK(s);
1077
1078 if (l->l_priority >= PUSER)
1079 l->l_priority = l->l_usrpri;
1080 }
1081
1082 void
1083 suspendsched()
1084 {
1085 struct lwp *l;
1086 int s;
1087
1088 /*
1089 * Convert all non-P_SYSTEM LSSLEEP or LSRUN processes to
1090 * LSSUSPENDED.
1091 */
1092 proclist_lock_read();
1093 SCHED_LOCK(s);
1094 for (l = LIST_FIRST(&alllwp); l != NULL; l = LIST_NEXT(l, l_list)) {
1095 if ((l->l_proc->p_flag & P_SYSTEM) != 0)
1096 continue;
1097
1098 switch (l->l_stat) {
1099 case LSRUN:
1100 l->l_proc->p_nrlwps--;
1101 if ((l->l_flag & L_INMEM) != 0)
1102 remrunqueue(l);
1103 /* FALLTHROUGH */
1104 case LSSLEEP:
1105 l->l_stat = LSSUSPENDED;
1106 break;
1107 case LSONPROC:
1108 /*
1109 * XXX SMP: we need to deal with processes on
1110 * others CPU !
1111 */
1112 break;
1113 default:
1114 break;
1115 }
1116 }
1117 SCHED_UNLOCK(s);
1118 proclist_unlock_read();
1119 }
1120
1121
1122