kern_synch.c revision 1.101.2.12 1 /* $NetBSD: kern_synch.c,v 1.101.2.12 2002/04/02 00:16:00 nathanw Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*-
41 * Copyright (c) 1982, 1986, 1990, 1991, 1993
42 * The Regents of the University of California. All rights reserved.
43 * (c) UNIX System Laboratories, Inc.
44 * All or some portions of this file are derived from material licensed
45 * to the University of California by American Telephone and Telegraph
46 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
47 * the permission of UNIX System Laboratories, Inc.
48 *
49 * Redistribution and use in source and binary forms, with or without
50 * modification, are permitted provided that the following conditions
51 * are met:
52 * 1. Redistributions of source code must retain the above copyright
53 * notice, this list of conditions and the following disclaimer.
54 * 2. Redistributions in binary form must reproduce the above copyright
55 * notice, this list of conditions and the following disclaimer in the
56 * documentation and/or other materials provided with the distribution.
57 * 3. All advertising materials mentioning features or use of this software
58 * must display the following acknowledgement:
59 * This product includes software developed by the University of
60 * California, Berkeley and its contributors.
61 * 4. Neither the name of the University nor the names of its contributors
62 * may be used to endorse or promote products derived from this software
63 * without specific prior written permission.
64 *
65 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
66 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
67 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
68 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
69 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
70 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
71 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
72 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
73 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
74 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
75 * SUCH DAMAGE.
76 *
77 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
78 */
79
80 #include <sys/cdefs.h>
81 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.101.2.12 2002/04/02 00:16:00 nathanw Exp $");
82
83 #include "opt_ddb.h"
84 #include "opt_ktrace.h"
85 #include "opt_lockdebug.h"
86 #include "opt_multiprocessor.h"
87
88 #include <sys/param.h>
89 #include <sys/systm.h>
90 #include <sys/callout.h>
91 #include <sys/proc.h>
92 #include <sys/lwp.h>
93 #include <sys/kernel.h>
94 #include <sys/buf.h>
95 #include <sys/signalvar.h>
96 #include <sys/resourcevar.h>
97 #include <sys/sched.h>
98 #include <sys/sa.h>
99 #include <sys/savar.h>
100
101 #include <uvm/uvm_extern.h>
102
103 #ifdef KTRACE
104 #include <sys/ktrace.h>
105 #endif
106
107 #include <machine/cpu.h>
108
109 int lbolt; /* once a second sleep address */
110 int rrticks; /* number of hardclock ticks per roundrobin() */
111
112 /*
113 * The global scheduler state.
114 */
115 struct prochd sched_qs[RUNQUE_NQS]; /* run queues */
116 __volatile u_int32_t sched_whichqs; /* bitmap of non-empty queues */
117 struct slpque sched_slpque[SLPQUE_TABLESIZE]; /* sleep queues */
118
119 struct simplelock sched_lock = SIMPLELOCK_INITIALIZER;
120 #if defined(MULTIPROCESSOR)
121 struct lock kernel_lock;
122 #endif
123
124 void schedcpu(void *);
125 void updatepri(struct lwp *);
126 void endtsleep(void *);
127
128 __inline void awaken(struct lwp *);
129
130 struct callout schedcpu_ch = CALLOUT_INITIALIZER;
131
132
133
134 /*
135 * Force switch among equal priority processes every 100ms.
136 * Called from hardclock every hz/10 == rrticks hardclock ticks.
137 */
138 /* ARGSUSED */
139 void
140 roundrobin(struct cpu_info *ci)
141 {
142 struct schedstate_percpu *spc = &ci->ci_schedstate;
143
144 spc->spc_rrticks = rrticks;
145
146 if (curproc != NULL) {
147 if (spc->spc_flags & SPCF_SEENRR) {
148 /*
149 * The process has already been through a roundrobin
150 * without switching and may be hogging the CPU.
151 * Indicate that the process should yield.
152 */
153 spc->spc_flags |= SPCF_SHOULDYIELD;
154 } else
155 spc->spc_flags |= SPCF_SEENRR;
156 }
157 need_resched(curcpu());
158 }
159
160 /*
161 * Constants for digital decay and forget:
162 * 90% of (p_estcpu) usage in 5 * loadav time
163 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive)
164 * Note that, as ps(1) mentions, this can let percentages
165 * total over 100% (I've seen 137.9% for 3 processes).
166 *
167 * Note that hardclock updates p_estcpu and p_cpticks independently.
168 *
169 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
170 * That is, the system wants to compute a value of decay such
171 * that the following for loop:
172 * for (i = 0; i < (5 * loadavg); i++)
173 * p_estcpu *= decay;
174 * will compute
175 * p_estcpu *= 0.1;
176 * for all values of loadavg:
177 *
178 * Mathematically this loop can be expressed by saying:
179 * decay ** (5 * loadavg) ~= .1
180 *
181 * The system computes decay as:
182 * decay = (2 * loadavg) / (2 * loadavg + 1)
183 *
184 * We wish to prove that the system's computation of decay
185 * will always fulfill the equation:
186 * decay ** (5 * loadavg) ~= .1
187 *
188 * If we compute b as:
189 * b = 2 * loadavg
190 * then
191 * decay = b / (b + 1)
192 *
193 * We now need to prove two things:
194 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
195 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
196 *
197 * Facts:
198 * For x close to zero, exp(x) =~ 1 + x, since
199 * exp(x) = 0! + x**1/1! + x**2/2! + ... .
200 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
201 * For x close to zero, ln(1+x) =~ x, since
202 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1
203 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
204 * ln(.1) =~ -2.30
205 *
206 * Proof of (1):
207 * Solve (factor)**(power) =~ .1 given power (5*loadav):
208 * solving for factor,
209 * ln(factor) =~ (-2.30/5*loadav), or
210 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
211 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED
212 *
213 * Proof of (2):
214 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
215 * solving for power,
216 * power*ln(b/(b+1)) =~ -2.30, or
217 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED
218 *
219 * Actual power values for the implemented algorithm are as follows:
220 * loadav: 1 2 3 4
221 * power: 5.68 10.32 14.94 19.55
222 */
223
224 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
225 #define loadfactor(loadav) (2 * (loadav))
226 #define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE))
227
228 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
229 fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
230
231 /*
232 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
233 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
234 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
235 *
236 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
237 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
238 *
239 * If you dont want to bother with the faster/more-accurate formula, you
240 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
241 * (more general) method of calculating the %age of CPU used by a process.
242 */
243 #define CCPU_SHIFT 11
244
245 /*
246 * Recompute process priorities, every hz ticks.
247 */
248 /* ARGSUSED */
249 void
250 schedcpu(void *arg)
251 {
252 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
253 struct lwp *l;
254 struct proc *p;
255 int s, s1;
256 unsigned int newcpu;
257 int clkhz;
258
259 proclist_lock_read();
260 for (l = LIST_FIRST(&alllwp); l != NULL; l = LIST_NEXT(l,l_list)) {
261 /*
262 * Increment time in/out of memory and sleep time
263 * (if sleeping). We ignore overflow; with 16-bit int's
264 * (remember them?) overflow takes 45 days.
265 */
266 p = l->l_proc;
267 l->l_swtime++;
268 if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
269 l->l_stat == LSSUSPENDED)
270 l->l_slptime++;
271 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
272 /*
273 * If the process has slept the entire second,
274 * stop recalculating its priority until it wakes up.
275 */
276 if (l->l_slptime > 1)
277 continue;
278 s = splstatclock(); /* prevent state changes */
279 /*
280 * p_pctcpu is only for ps.
281 */
282 clkhz = stathz != 0 ? stathz : hz;
283 #if (FSHIFT >= CCPU_SHIFT)
284 p->p_pctcpu += (clkhz == 100)?
285 ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
286 100 * (((fixpt_t) p->p_cpticks)
287 << (FSHIFT - CCPU_SHIFT)) / clkhz;
288 #else
289 l->l_pctcpu += ((FSCALE - ccpu) *
290 (p->p_cpticks * FSCALE / clkhz)) >> FSHIFT;
291 #endif
292 p->p_cpticks = 0;
293 newcpu = (u_int)decay_cpu(loadfac, p->p_estcpu);
294 p->p_estcpu = newcpu;
295 SCHED_LOCK(s1);
296 resetpriority(l);
297 if (l->l_priority >= PUSER) {
298 if (l->l_stat == LSRUN &&
299 (l->l_flag & L_INMEM) &&
300 (l->l_priority / PPQ) != (l->l_usrpri / PPQ)) {
301 remrunqueue(l);
302 l->l_priority = l->l_usrpri;
303 setrunqueue(l);
304 } else
305 l->l_priority = l->l_usrpri;
306 }
307 SCHED_UNLOCK(s1);
308 splx(s);
309 }
310 proclist_unlock_read();
311 uvm_meter();
312 wakeup((caddr_t)&lbolt);
313 callout_reset(&schedcpu_ch, hz, schedcpu, NULL);
314 }
315
316 /*
317 * Recalculate the priority of a process after it has slept for a while.
318 * For all load averages >= 1 and max p_estcpu of 255, sleeping for at
319 * least six times the loadfactor will decay p_estcpu to zero.
320 */
321 void
322 updatepri(struct lwp *l)
323 {
324 struct proc *p = l->l_proc;
325 unsigned int newcpu;
326 fixpt_t loadfac;
327
328 SCHED_ASSERT_LOCKED();
329
330 newcpu = p->p_estcpu;
331 loadfac = loadfactor(averunnable.ldavg[0]);
332
333 if (l->l_slptime > 5 * loadfac)
334 p->p_estcpu = 0; /* XXX NJWLWP */
335 else {
336 l->l_slptime--; /* the first time was done in schedcpu */
337 while (newcpu && --l->l_slptime)
338 newcpu = (int) decay_cpu(loadfac, newcpu);
339 p->p_estcpu = newcpu;
340 }
341 resetpriority(l);
342 }
343
344 /*
345 * During autoconfiguration or after a panic, a sleep will simply
346 * lower the priority briefly to allow interrupts, then return.
347 * The priority to be used (safepri) is machine-dependent, thus this
348 * value is initialized and maintained in the machine-dependent layers.
349 * This priority will typically be 0, or the lowest priority
350 * that is safe for use on the interrupt stack; it can be made
351 * higher to block network software interrupts after panics.
352 */
353 int safepri;
354
355 /*
356 * General sleep call. Suspends the current process until a wakeup is
357 * performed on the specified identifier. The process will then be made
358 * runnable with the specified priority. Sleeps at most timo/hz seconds
359 * (0 means no timeout). If pri includes PCATCH flag, signals are checked
360 * before and after sleeping, else signals are not checked. Returns 0 if
361 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
362 * signal needs to be delivered, ERESTART is returned if the current system
363 * call should be restarted if possible, and EINTR is returned if the system
364 * call should be interrupted by the signal (return EINTR).
365 *
366 * The interlock is held until the scheduler_slock is acquired. The
367 * interlock will be locked before returning back to the caller
368 * unless the PNORELOCK flag is specified, in which case the
369 * interlock will always be unlocked upon return.
370 */
371 int
372 ltsleep(void *ident, int priority, const char *wmesg, int timo,
373 __volatile struct simplelock *interlock)
374 {
375 struct lwp *l = curproc;
376 struct proc *p = l->l_proc;
377 struct slpque *qp;
378 int sig, s;
379 int catch = priority & PCATCH;
380 int relock = (priority & PNORELOCK) == 0;
381 int exiterr = (priority & PNOEXITERR) == 0;
382
383 /*
384 * XXXSMP
385 * This is probably bogus. Figure out what the right
386 * thing to do here really is.
387 * Note that not sleeping if ltsleep is called with curproc == NULL
388 * in the shutdown case is disgusting but partly necessary given
389 * how shutdown (barely) works.
390 */
391 if (cold || (doing_shutdown && (panicstr || (l == NULL)))) {
392 /*
393 * After a panic, or during autoconfiguration,
394 * just give interrupts a chance, then just return;
395 * don't run any other procs or panic below,
396 * in case this is the idle process and already asleep.
397 */
398 s = splhigh();
399 splx(safepri);
400 splx(s);
401 if (interlock != NULL && relock == 0)
402 simple_unlock(interlock);
403 return (0);
404 }
405
406 KASSERT(p != NULL);
407 LOCK_ASSERT(interlock == NULL || simple_lock_held(interlock));
408
409 #ifdef KTRACE
410 if (KTRPOINT(p, KTR_CSW))
411 ktrcsw(p, 1, 0);
412 #endif
413
414 SCHED_LOCK(s);
415
416 #ifdef DIAGNOSTIC
417 if (ident == NULL)
418 panic("ltsleep: ident == NULL");
419 if (l->l_stat != LSONPROC)
420 panic("ltsleep: l_stat %d != LSONPROC", l->l_stat);
421 if (l->l_back != NULL)
422 panic("ltsleep: p_back != NULL");
423 #endif
424
425 l->l_wchan = ident;
426 l->l_wmesg = wmesg;
427 l->l_slptime = 0;
428 l->l_priority = priority & PRIMASK;
429
430 qp = SLPQUE(ident);
431 if (qp->sq_head == 0)
432 qp->sq_head = l;
433 else {
434 *qp->sq_tailp = l;
435 }
436 *(qp->sq_tailp = &l->l_forw) = 0;
437
438 if (timo)
439 callout_reset(&l->l_tsleep_ch, timo, endtsleep, l);
440
441 /*
442 * We can now release the interlock; the scheduler_slock
443 * is held, so a thread can't get in to do wakeup() before
444 * we do the switch.
445 *
446 * XXX We leave the code block here, after inserting ourselves
447 * on the sleep queue, because we might want a more clever
448 * data structure for the sleep queues at some point.
449 */
450 if (interlock != NULL)
451 simple_unlock(interlock);
452
453 /*
454 * We put ourselves on the sleep queue and start our timeout
455 * before calling CURSIG, as we could stop there, and a wakeup
456 * or a SIGCONT (or both) could occur while we were stopped.
457 * A SIGCONT would cause us to be marked as SSLEEP
458 * without resuming us, thus we must be ready for sleep
459 * when CURSIG is called. If the wakeup happens while we're
460 * stopped, p->p_wchan will be 0 upon return from CURSIG.
461 */
462 if (catch) {
463 l->l_flag |= L_SINTR;
464 if ((sig = CURSIG(l)) != 0) {
465 if (l->l_wchan != NULL)
466 unsleep(l);
467 l->l_stat = LSONPROC;
468 SCHED_UNLOCK(s);
469 goto resume;
470 }
471 if (l->l_wchan == NULL) {
472 catch = 0;
473 SCHED_UNLOCK(s);
474 goto resume;
475 }
476 } else
477 sig = 0;
478 l->l_stat = LSSLEEP;
479 p->p_nrlwps--;
480 p->p_stats->p_ru.ru_nvcsw++;
481 SCHED_ASSERT_LOCKED();
482 if (l->l_flag & L_SA)
483 sa_switch(l, SA_UPCALL_BLOCKED);
484 else
485 mi_switch(l, NULL);
486
487 #if defined(DDB) && !defined(GPROF)
488 /* handy breakpoint location after process "wakes" */
489 __asm(".globl bpendtsleep ; bpendtsleep:");
490 #endif
491 /* p->p_nrlwps is incremented by whoever made us runnable again,
492 * either setrunnable() or awaken().
493 */
494
495 SCHED_ASSERT_UNLOCKED();
496 splx(s);
497
498 resume:
499 KDASSERT(l->l_cpu != NULL);
500 KDASSERT(l->l_cpu == curcpu());
501 l->l_cpu->ci_schedstate.spc_curpriority = l->l_usrpri;
502
503 l->l_flag &= ~L_SINTR;
504 if (l->l_flag & L_TIMEOUT) {
505 l->l_flag &= ~L_TIMEOUT;
506 if (sig == 0) {
507 #ifdef KTRACE
508 if (KTRPOINT(p, KTR_CSW))
509 ktrcsw(p, 0, 0);
510 #endif
511 if (relock && interlock != NULL)
512 simple_lock(interlock);
513 return (EWOULDBLOCK);
514 }
515 } else if (timo)
516 callout_stop(&l->l_tsleep_ch);
517 if (catch && (sig != 0 || (sig = CURSIG(l)) != 0)) {
518 #ifdef KTRACE
519 if (KTRPOINT(p, KTR_CSW))
520 ktrcsw(p, 0, 0);
521 #endif
522 if (relock && interlock != NULL)
523 simple_lock(interlock);
524 if ((SIGACTION(p, sig).sa_flags & SA_RESTART) == 0)
525 return (EINTR);
526 return (ERESTART);
527 }
528 /* XXXNJW this is very much a kluge.
529 * revisit. a better way of preventing looping/hanging syscalls like
530 * wait4() and _lwp_wait() from wedging an exiting process
531 * would be preferred.
532 */
533 if (catch && ((p->p_flag & P_WEXIT) && exiterr))
534 return (EINTR);
535 #ifdef KTRACE
536 if (KTRPOINT(p, KTR_CSW))
537 ktrcsw(p, 0, 0);
538 #endif
539 if (relock && interlock != NULL)
540 simple_lock(interlock);
541 return (0);
542 }
543
544 /*
545 * Implement timeout for tsleep.
546 * If process hasn't been awakened (wchan non-zero),
547 * set timeout flag and undo the sleep. If proc
548 * is stopped, just unsleep so it will remain stopped.
549 */
550 void
551 endtsleep(void *arg)
552 {
553 struct lwp *l;
554 int s;
555
556 l = (struct lwp *)arg;
557 SCHED_LOCK(s);
558 if (l->l_wchan) {
559 if (l->l_stat == LSSLEEP)
560 setrunnable(l);
561 else
562 unsleep(l);
563 l->l_flag |= L_TIMEOUT;
564 }
565 SCHED_UNLOCK(s);
566 }
567
568 /*
569 * Remove a process from its wait queue
570 */
571 void
572 unsleep(struct lwp *l)
573 {
574 struct slpque *qp;
575 struct lwp **hp;
576
577 SCHED_ASSERT_LOCKED();
578
579 if (l->l_wchan) {
580 hp = &(qp = SLPQUE(l->l_wchan))->sq_head;
581 while (*hp != l)
582 hp = &(*hp)->l_forw;
583 *hp = l->l_forw;
584 if (qp->sq_tailp == &l->l_forw)
585 qp->sq_tailp = hp;
586 l->l_wchan = 0;
587 }
588 }
589
590 /*
591 * Optimized-for-wakeup() version of setrunnable().
592 */
593 __inline void
594 awaken(struct lwp *l)
595 {
596
597 SCHED_ASSERT_LOCKED();
598
599 if (l->l_slptime > 1)
600 updatepri(l);
601 l->l_slptime = 0;
602 l->l_stat = LSRUN;
603 l->l_proc->p_nrlwps++;
604 /*
605 * Since curpriority is a user priority, p->p_priority
606 * is always better than curpriority.
607 */
608 if (l->l_flag & L_INMEM) {
609 setrunqueue(l);
610 KASSERT(l->l_cpu != NULL);
611 need_resched(l->l_cpu);
612 } else
613 sched_wakeup(&proc0);
614 }
615
616 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
617 void
618 sched_unlock_idle(void)
619 {
620
621 simple_unlock(&sched_lock);
622 }
623
624 void
625 sched_lock_idle(void)
626 {
627
628 simple_lock(&sched_lock);
629 }
630 #endif /* MULTIPROCESSOR || LOCKDEBUG */
631
632 /*
633 * Make all processes sleeping on the specified identifier runnable.
634 */
635
636 void
637 wakeup(void *ident)
638 {
639 int s;
640
641 SCHED_ASSERT_UNLOCKED();
642
643 SCHED_LOCK(s);
644 sched_wakeup(ident);
645 SCHED_UNLOCK(s);
646 }
647
648 void
649 sched_wakeup(void *ident)
650 {
651 struct slpque *qp;
652 struct lwp *l, **q;
653
654 SCHED_ASSERT_LOCKED();
655
656 qp = SLPQUE(ident);
657 restart:
658 for (q = &qp->sq_head; (l = *q) != NULL; ) {
659 #ifdef DIAGNOSTIC
660 if (l->l_back || (l->l_stat != LSSLEEP &&
661 l->l_stat != LSSTOP && l->l_stat != LSSUSPENDED))
662 panic("wakeup");
663 #endif
664 if (l->l_wchan == ident) {
665 l->l_wchan = 0;
666 *q = l->l_forw;
667 if (qp->sq_tailp == &l->l_forw)
668 qp->sq_tailp = q;
669 if (l->l_stat == LSSLEEP) {
670 awaken(l);
671 goto restart;
672 }
673 } else
674 q = &l->l_forw;
675 }
676 }
677
678 /*
679 * Make the highest priority process first in line on the specified
680 * identifier runnable.
681 */
682 void
683 wakeup_one(void *ident)
684 {
685 struct slpque *qp;
686 struct lwp *l, **q;
687 struct lwp *best_sleepp, **best_sleepq;
688 struct lwp *best_stopp, **best_stopq;
689 int s;
690
691 best_sleepp = best_stopp = NULL;
692 best_sleepq = best_stopq = NULL;
693
694 SCHED_LOCK(s);
695
696 qp = SLPQUE(ident);
697
698 for (q = &qp->sq_head; (l = *q) != NULL; q = &l->l_forw) {
699 #ifdef DIAGNOSTIC
700 if (l->l_back || (l->l_stat != LSSLEEP &&
701 l->l_stat != LSSTOP && l->l_stat != LSSUSPENDED))
702 panic("wakeup_one");
703 #endif
704 if (l->l_wchan == ident) {
705 if (l->l_stat == LSSLEEP) {
706 if (best_sleepp == NULL ||
707 l->l_priority < best_sleepp->l_priority) {
708 best_sleepp = l;
709 best_sleepq = q;
710 }
711 } else {
712 if (best_stopp == NULL ||
713 l->l_priority < best_stopp->l_priority) {
714 best_stopp = l;
715 best_stopq = q;
716 }
717 }
718 }
719 }
720
721 /*
722 * Consider any SSLEEP process higher than the highest priority SSTOP
723 * process.
724 */
725 if (best_sleepp != NULL) {
726 l = best_sleepp;
727 q = best_sleepq;
728 } else {
729 l = best_stopp;
730 q = best_stopq;
731 }
732
733 if (l != NULL) {
734 l->l_wchan = NULL;
735 *q = l->l_forw;
736 if (qp->sq_tailp == &l->l_forw)
737 qp->sq_tailp = q;
738 if (l->l_stat == LSSLEEP)
739 awaken(l);
740 }
741 SCHED_UNLOCK(s);
742 }
743
744 /*
745 * General yield call. Puts the current process back on its run queue and
746 * performs a voluntary context switch.
747 */
748 void
749 yield(void)
750 {
751 struct lwp *l = curproc;
752 int s;
753
754 SCHED_LOCK(s);
755 l->l_priority = l->l_usrpri;
756 l->l_stat = LSRUN;
757 setrunqueue(l);
758 l->l_proc->p_stats->p_ru.ru_nvcsw++;
759 mi_switch(l, NULL);
760 SCHED_ASSERT_UNLOCKED();
761 splx(s);
762 }
763
764 /*
765 * General preemption call. Puts the current process back on its run queue
766 * and performs an involuntary context switch. If a process is supplied,
767 * we switch to that process. Otherwise, we use the normal process selection
768 * criteria.
769 */
770
771 void
772 preempt(struct lwp *newl)
773 {
774 struct lwp *l = curproc;
775 int r, s;
776
777 if (l->l_flag & L_SA) {
778 SCHED_LOCK(s);
779 l->l_priority = l->l_usrpri;
780 l->l_stat = LSRUN;
781 setrunqueue(l);
782 l->l_proc->p_stats->p_ru.ru_nivcsw++;
783 r = mi_switch(l, newl);
784 SCHED_ASSERT_UNLOCKED();
785 splx(s);
786 if (r != 0)
787 sa_upcall(l, SA_UPCALL_PREEMPTED, l, NULL, 0, NULL);
788 } else {
789 SCHED_LOCK(s);
790 l->l_priority = l->l_usrpri;
791 l->l_stat = LSRUN;
792 setrunqueue(l);
793 l->l_proc->p_stats->p_ru.ru_nivcsw++;
794 mi_switch(l, newl);
795 SCHED_ASSERT_UNLOCKED();
796 splx(s);
797 }
798
799 }
800
801 /*
802 * The machine independent parts of context switch.
803 * Must be called at splsched() (no higher!) and with
804 * the sched_lock held.
805 * Switch to "new" if non-NULL, otherwise let cpu_switch choose
806 * the next lwp.
807 *
808 * Returns 1 if another process was actually run.
809 */
810 int
811 mi_switch(struct lwp *l, struct lwp *new)
812 {
813 struct schedstate_percpu *spc;
814 struct rlimit *rlim;
815 long s, u;
816 struct timeval tv;
817 #if defined(MULTIPROCESSOR)
818 int hold_count;
819 #endif
820 struct proc *p = l->l_proc;
821 int retval;
822
823 SCHED_ASSERT_LOCKED();
824
825 #if defined(MULTIPROCESSOR)
826 /*
827 * Release the kernel_lock, as we are about to yield the CPU.
828 * The scheduler lock is still held until cpu_switch()
829 * selects a new process and removes it from the run queue.
830 */
831 if (p->p_flag & P_BIGLOCK)
832 hold_count = spinlock_release_all(&kernel_lock);
833 #endif
834
835 KDASSERT(l->l_cpu != NULL);
836 KDASSERT(l->l_cpu == curcpu());
837
838 spc = &l->l_cpu->ci_schedstate;
839
840 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC)
841 spinlock_switchcheck();
842 #endif
843 #ifdef LOCKDEBUG
844 simple_lock_switchcheck();
845 #endif
846
847 /*
848 * Compute the amount of time during which the current
849 * process was running, and add that to its total so far.
850 */
851 microtime(&tv);
852 u = p->p_rtime.tv_usec +
853 (tv.tv_usec - spc->spc_runtime.tv_usec);
854 s = p->p_rtime.tv_sec + (tv.tv_sec - spc->spc_runtime.tv_sec);
855 if (u < 0) {
856 u += 1000000;
857 s--;
858 } else if (u >= 1000000) {
859 u -= 1000000;
860 s++;
861 }
862 p->p_rtime.tv_usec = u;
863 p->p_rtime.tv_sec = s;
864
865 /*
866 * Check if the process exceeds its cpu resource allocation.
867 * If over max, kill it. In any case, if it has run for more
868 * than 10 minutes, reduce priority to give others a chance.
869 */
870 rlim = &p->p_rlimit[RLIMIT_CPU];
871 if (s >= rlim->rlim_cur) {
872 /*
873 * XXXSMP: we're inside the scheduler lock perimeter;
874 * use sched_psignal.
875 */
876 if (s >= rlim->rlim_max)
877 sched_psignal(p, SIGKILL);
878 else {
879 sched_psignal(p, SIGXCPU);
880 if (rlim->rlim_cur < rlim->rlim_max)
881 rlim->rlim_cur += 5;
882 }
883 }
884 if (autonicetime && s > autonicetime && p->p_ucred->cr_uid &&
885 p->p_nice == NZERO) {
886 p->p_nice = autoniceval + NZERO;
887 resetpriority(l);
888 }
889
890 /*
891 * Process is about to yield the CPU; clear the appropriate
892 * scheduling flags.
893 */
894 spc->spc_flags &= ~SPCF_SWITCHCLEAR;
895
896 /*
897 * Pick a new current process and switch to it. When we
898 * run again, we'll return back here.
899 */
900 uvmexp.swtch++;
901 if (new == NULL) {
902 retval = cpu_switch(l);
903 } else {
904 cpu_preempt(l, new);
905 retval = 0;
906 }
907
908 /*
909 * Make sure that MD code released the scheduler lock before
910 * resuming us.
911 */
912 SCHED_ASSERT_UNLOCKED();
913
914 /*
915 * We're running again; record our new start time. We might
916 * be running on a new CPU now, so don't use the cache'd
917 * schedstate_percpu pointer.
918 */
919 KDASSERT(l->l_cpu != NULL);
920 KDASSERT(l->l_cpu == curcpu());
921 microtime(&l->l_cpu->ci_schedstate.spc_runtime);
922
923 #if defined(MULTIPROCESSOR)
924 /*
925 * Reacquire the kernel_lock now. We do this after we've
926 * released the scheduler lock to avoid deadlock, and before
927 * we reacquire the interlock.
928 */
929 if (p->p_flag & P_BIGLOCK)
930 spinlock_acquire_count(&kernel_lock, hold_count);
931 #endif
932
933 return retval;
934 }
935
936 /*
937 * Initialize the (doubly-linked) run queues
938 * to be empty.
939 */
940 void
941 rqinit()
942 {
943 int i;
944
945 for (i = 0; i < RUNQUE_NQS; i++)
946 sched_qs[i].ph_link = sched_qs[i].ph_rlink =
947 (struct lwp *)&sched_qs[i];
948 }
949
950 /*
951 * Change process state to be runnable,
952 * placing it on the run queue if it is in memory,
953 * and awakening the swapper if it isn't in memory.
954 */
955 void
956 setrunnable(struct lwp *l)
957 {
958 struct proc *p = l->l_proc;
959
960 SCHED_ASSERT_LOCKED();
961
962 switch (l->l_stat) {
963 case 0:
964 case LSRUN:
965 case LSONPROC:
966 case LSZOMB:
967 case LSDEAD:
968 default:
969 panic("setrunnable");
970 case LSSTOP:
971 /*
972 * If we're being traced (possibly because someone attached us
973 * while we were stopped), check for a signal from the debugger.
974 */
975 if ((p->p_flag & P_TRACED) != 0 && p->p_xstat != 0) {
976 sigaddset(&p->p_sigctx.ps_siglist, p->p_xstat);
977 CHECKSIGS(p);
978 }
979 case LSSLEEP:
980 unsleep(l); /* e.g. when sending signals */
981 break;
982
983 case LSIDL:
984 break;
985 case LSSUSPENDED:
986 break;
987 }
988 l->l_stat = LSRUN;
989 p->p_nrlwps++;
990
991 if (l->l_flag & L_INMEM)
992 setrunqueue(l);
993
994 if (l->l_slptime > 1)
995 updatepri(l);
996 l->l_slptime = 0;
997 if ((l->l_flag & L_INMEM) == 0)
998 wakeup((caddr_t)&proc0);
999 else if (l->l_priority < curcpu()->ci_schedstate.spc_curpriority) {
1000 /*
1001 * XXXSMP
1002 * This is not exactly right. Since p->p_cpu persists
1003 * across a context switch, this gives us some sort
1004 * of processor affinity. But we need to figure out
1005 * at what point it's better to reschedule on a different
1006 * CPU than the last one.
1007 */
1008 need_resched((l->l_cpu != NULL) ? l->l_cpu : curcpu());
1009 }
1010 }
1011
1012 /*
1013 * Compute the priority of a process when running in user mode.
1014 * Arrange to reschedule if the resulting priority is better
1015 * than that of the current process.
1016 */
1017 void
1018 resetpriority(struct lwp *l)
1019 {
1020 unsigned int newpriority;
1021 struct proc *p = l->l_proc;
1022
1023 SCHED_ASSERT_LOCKED();
1024
1025 newpriority = PUSER + p->p_estcpu +
1026 NICE_WEIGHT * (p->p_nice - NZERO);
1027 newpriority = min(newpriority, MAXPRI);
1028 l->l_usrpri = newpriority;
1029 if (newpriority < curcpu()->ci_schedstate.spc_curpriority) {
1030 /*
1031 * XXXSMP
1032 * Same applies as in setrunnable() above.
1033 */
1034 need_resched((l->l_cpu != NULL) ? l->l_cpu : curcpu());
1035 }
1036 }
1037
1038 /*
1039 * Recompute priority for all LWPs in a process.
1040 */
1041 void
1042 resetprocpriority(struct proc *p)
1043 {
1044 struct lwp *l;
1045
1046 LIST_FOREACH(l, &p->p_lwps, l_list)
1047 resetpriority(l);
1048 }
1049
1050 /*
1051 * We adjust the priority of the current process. The priority of a process
1052 * gets worse as it accumulates CPU time. The cpu usage estimator (p_estcpu)
1053 * is increased here. The formula for computing priorities (in kern_synch.c)
1054 * will compute a different value each time p_estcpu increases. This can
1055 * cause a switch, but unless the priority crosses a PPQ boundary the actual
1056 * queue will not change. The cpu usage estimator ramps up quite quickly
1057 * when the process is running (linearly), and decays away exponentially, at
1058 * a rate which is proportionally slower when the system is busy. The basic
1059 * principle is that the system will 90% forget that the process used a lot
1060 * of CPU time in 5 * loadav seconds. This causes the system to favor
1061 * processes which haven't run much recently, and to round-robin among other
1062 * processes.
1063 */
1064
1065 void
1066 schedclock(struct lwp *l)
1067 {
1068 struct proc *p = l->l_proc;
1069 int s;
1070
1071 p->p_estcpu = ESTCPULIM(p->p_estcpu + 1);
1072 SCHED_LOCK(s);
1073 resetpriority(l);
1074 SCHED_UNLOCK(s);
1075
1076 if (l->l_priority >= PUSER)
1077 l->l_priority = l->l_usrpri;
1078 }
1079
1080 void
1081 suspendsched()
1082 {
1083 struct lwp *l;
1084 int s;
1085
1086 /*
1087 * Convert all non-P_SYSTEM LSSLEEP or LSRUN processes to
1088 * LSSUSPENDED.
1089 */
1090 proclist_lock_read();
1091 SCHED_LOCK(s);
1092 for (l = LIST_FIRST(&alllwp); l != NULL; l = LIST_NEXT(l, l_list)) {
1093 if ((l->l_proc->p_flag & P_SYSTEM) != 0)
1094 continue;
1095
1096 switch (l->l_stat) {
1097 case LSRUN:
1098 l->l_proc->p_nrlwps--;
1099 if ((l->l_flag & L_INMEM) != 0)
1100 remrunqueue(l);
1101 /* FALLTHROUGH */
1102 case LSSLEEP:
1103 l->l_stat = LSSUSPENDED;
1104 break;
1105 case LSONPROC:
1106 /*
1107 * XXX SMP: we need to deal with processes on
1108 * others CPU !
1109 */
1110 break;
1111 default:
1112 break;
1113 }
1114 }
1115 SCHED_UNLOCK(s);
1116 proclist_unlock_read();
1117 }
1118
1119
1120