kern_synch.c revision 1.78 1 /* $NetBSD: kern_synch.c,v 1.78 2000/06/10 18:44:44 sommerfeld Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*-
41 * Copyright (c) 1982, 1986, 1990, 1991, 1993
42 * The Regents of the University of California. All rights reserved.
43 * (c) UNIX System Laboratories, Inc.
44 * All or some portions of this file are derived from material licensed
45 * to the University of California by American Telephone and Telegraph
46 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
47 * the permission of UNIX System Laboratories, Inc.
48 *
49 * Redistribution and use in source and binary forms, with or without
50 * modification, are permitted provided that the following conditions
51 * are met:
52 * 1. Redistributions of source code must retain the above copyright
53 * notice, this list of conditions and the following disclaimer.
54 * 2. Redistributions in binary form must reproduce the above copyright
55 * notice, this list of conditions and the following disclaimer in the
56 * documentation and/or other materials provided with the distribution.
57 * 3. All advertising materials mentioning features or use of this software
58 * must display the following acknowledgement:
59 * This product includes software developed by the University of
60 * California, Berkeley and its contributors.
61 * 4. Neither the name of the University nor the names of its contributors
62 * may be used to endorse or promote products derived from this software
63 * without specific prior written permission.
64 *
65 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
66 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
67 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
68 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
69 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
70 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
71 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
72 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
73 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
74 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
75 * SUCH DAMAGE.
76 *
77 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
78 */
79
80 #include "opt_ddb.h"
81 #include "opt_ktrace.h"
82
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/callout.h>
86 #include <sys/proc.h>
87 #include <sys/kernel.h>
88 #include <sys/buf.h>
89 #include <sys/signalvar.h>
90 #include <sys/resourcevar.h>
91 #include <vm/vm.h>
92 #include <sys/sched.h>
93
94 #include <uvm/uvm_extern.h>
95
96 #ifdef KTRACE
97 #include <sys/ktrace.h>
98 #endif
99
100 #include <machine/cpu.h>
101
102 int lbolt; /* once a second sleep address */
103
104 /*
105 * The global scheduler state.
106 */
107 struct prochd sched_qs[RUNQUE_NQS]; /* run queues */
108 __volatile u_int32_t sched_whichqs; /* bitmap of non-empty queues */
109 struct slpque sched_slpque[SLPQUE_TABLESIZE]; /* sleep queues */
110
111 void roundrobin(void *);
112 void schedcpu(void *);
113 void updatepri(struct proc *);
114 void endtsleep(void *);
115
116 __inline void awaken(struct proc *);
117
118 struct callout roundrobin_ch = CALLOUT_INITIALIZER;
119 struct callout schedcpu_ch = CALLOUT_INITIALIZER;
120
121 /*
122 * Force switch among equal priority processes every 100ms.
123 */
124 /* ARGSUSED */
125 void
126 roundrobin(void *arg)
127 {
128 struct schedstate_percpu *spc = &curcpu()->ci_schedstate;
129 int s;
130
131 if (curproc != NULL) {
132 s = splstatclock();
133 if (spc->spc_flags & SPCF_SEENRR) {
134 /*
135 * The process has already been through a roundrobin
136 * without switching and may be hogging the CPU.
137 * Indicate that the process should yield.
138 */
139 spc->spc_flags |= SPCF_SHOULDYIELD;
140 } else
141 spc->spc_flags |= SPCF_SEENRR;
142 splx(s);
143 }
144 need_resched();
145 callout_reset(&roundrobin_ch, hz / 10, roundrobin, NULL);
146 }
147
148 /*
149 * Constants for digital decay and forget:
150 * 90% of (p_estcpu) usage in 5 * loadav time
151 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive)
152 * Note that, as ps(1) mentions, this can let percentages
153 * total over 100% (I've seen 137.9% for 3 processes).
154 *
155 * Note that hardclock updates p_estcpu and p_cpticks independently.
156 *
157 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
158 * That is, the system wants to compute a value of decay such
159 * that the following for loop:
160 * for (i = 0; i < (5 * loadavg); i++)
161 * p_estcpu *= decay;
162 * will compute
163 * p_estcpu *= 0.1;
164 * for all values of loadavg:
165 *
166 * Mathematically this loop can be expressed by saying:
167 * decay ** (5 * loadavg) ~= .1
168 *
169 * The system computes decay as:
170 * decay = (2 * loadavg) / (2 * loadavg + 1)
171 *
172 * We wish to prove that the system's computation of decay
173 * will always fulfill the equation:
174 * decay ** (5 * loadavg) ~= .1
175 *
176 * If we compute b as:
177 * b = 2 * loadavg
178 * then
179 * decay = b / (b + 1)
180 *
181 * We now need to prove two things:
182 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
183 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
184 *
185 * Facts:
186 * For x close to zero, exp(x) =~ 1 + x, since
187 * exp(x) = 0! + x**1/1! + x**2/2! + ... .
188 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
189 * For x close to zero, ln(1+x) =~ x, since
190 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1
191 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
192 * ln(.1) =~ -2.30
193 *
194 * Proof of (1):
195 * Solve (factor)**(power) =~ .1 given power (5*loadav):
196 * solving for factor,
197 * ln(factor) =~ (-2.30/5*loadav), or
198 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
199 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED
200 *
201 * Proof of (2):
202 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
203 * solving for power,
204 * power*ln(b/(b+1)) =~ -2.30, or
205 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED
206 *
207 * Actual power values for the implemented algorithm are as follows:
208 * loadav: 1 2 3 4
209 * power: 5.68 10.32 14.94 19.55
210 */
211
212 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
213 #define loadfactor(loadav) (2 * (loadav))
214 #define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE))
215
216 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
217 fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
218
219 /*
220 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
221 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
222 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
223 *
224 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
225 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
226 *
227 * If you dont want to bother with the faster/more-accurate formula, you
228 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
229 * (more general) method of calculating the %age of CPU used by a process.
230 */
231 #define CCPU_SHIFT 11
232
233 /*
234 * Recompute process priorities, every hz ticks.
235 */
236 /* ARGSUSED */
237 void
238 schedcpu(void *arg)
239 {
240 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
241 struct proc *p;
242 int s;
243 unsigned int newcpu;
244 int clkhz;
245
246 proclist_lock_read();
247 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
248 /*
249 * Increment time in/out of memory and sleep time
250 * (if sleeping). We ignore overflow; with 16-bit int's
251 * (remember them?) overflow takes 45 days.
252 */
253 p->p_swtime++;
254 if (p->p_stat == SSLEEP || p->p_stat == SSTOP)
255 p->p_slptime++;
256 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
257 /*
258 * If the process has slept the entire second,
259 * stop recalculating its priority until it wakes up.
260 */
261 if (p->p_slptime > 1)
262 continue;
263 s = splstatclock(); /* prevent state changes */
264 /*
265 * p_pctcpu is only for ps.
266 */
267 clkhz = stathz != 0 ? stathz : hz;
268 #if (FSHIFT >= CCPU_SHIFT)
269 p->p_pctcpu += (clkhz == 100)?
270 ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
271 100 * (((fixpt_t) p->p_cpticks)
272 << (FSHIFT - CCPU_SHIFT)) / clkhz;
273 #else
274 p->p_pctcpu += ((FSCALE - ccpu) *
275 (p->p_cpticks * FSCALE / clkhz)) >> FSHIFT;
276 #endif
277 p->p_cpticks = 0;
278 newcpu = (u_int)decay_cpu(loadfac, p->p_estcpu);
279 p->p_estcpu = newcpu;
280 resetpriority(p);
281 if (p->p_priority >= PUSER) {
282 if (p->p_stat == SRUN &&
283 (p->p_flag & P_INMEM) &&
284 (p->p_priority / PPQ) != (p->p_usrpri / PPQ)) {
285 remrunqueue(p);
286 p->p_priority = p->p_usrpri;
287 setrunqueue(p);
288 } else
289 p->p_priority = p->p_usrpri;
290 }
291 splx(s);
292 }
293 proclist_unlock_read();
294 uvm_meter();
295 wakeup((caddr_t)&lbolt);
296 callout_reset(&schedcpu_ch, hz, schedcpu, NULL);
297 }
298
299 /*
300 * Recalculate the priority of a process after it has slept for a while.
301 * For all load averages >= 1 and max p_estcpu of 255, sleeping for at
302 * least six times the loadfactor will decay p_estcpu to zero.
303 */
304 void
305 updatepri(struct proc *p)
306 {
307 unsigned int newcpu = p->p_estcpu;
308 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
309
310 if (p->p_slptime > 5 * loadfac)
311 p->p_estcpu = 0;
312 else {
313 p->p_slptime--; /* the first time was done in schedcpu */
314 while (newcpu && --p->p_slptime)
315 newcpu = (int) decay_cpu(loadfac, newcpu);
316 p->p_estcpu = newcpu;
317 }
318 resetpriority(p);
319 }
320
321 /*
322 * During autoconfiguration or after a panic, a sleep will simply
323 * lower the priority briefly to allow interrupts, then return.
324 * The priority to be used (safepri) is machine-dependent, thus this
325 * value is initialized and maintained in the machine-dependent layers.
326 * This priority will typically be 0, or the lowest priority
327 * that is safe for use on the interrupt stack; it can be made
328 * higher to block network software interrupts after panics.
329 */
330 int safepri;
331
332 /*
333 * General sleep call. Suspends the current process until a wakeup is
334 * performed on the specified identifier. The process will then be made
335 * runnable with the specified priority. Sleeps at most timo/hz seconds
336 * (0 means no timeout). If pri includes PCATCH flag, signals are checked
337 * before and after sleeping, else signals are not checked. Returns 0 if
338 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
339 * signal needs to be delivered, ERESTART is returned if the current system
340 * call should be restarted if possible, and EINTR is returned if the system
341 * call should be interrupted by the signal (return EINTR).
342 *
343 * The interlock is held until the scheduler_slock is held. The
344 * interlock will be locked before returning back to the caller
345 * unless the PNORELOCK flag is specified, in which case the
346 * interlock will always be unlocked upon return.
347 */
348 int
349 ltsleep(void *ident, int priority, const char *wmesg, int timo,
350 __volatile struct simplelock *interlock)
351 {
352 struct proc *p = curproc;
353 struct slpque *qp;
354 int sig, s;
355 int catch = priority & PCATCH;
356 int relock = (priority & PNORELOCK) == 0;
357 #if 0 /* XXXSMP */
358 int dobiglock;
359 #endif
360
361 /*
362 * XXXSMP
363 * This is probably bogus. Figure out what the right
364 * thing to do here really is.
365 * Note that not sleeping if ltsleep is called with curproc == NULL
366 * in the shutdown case is disgusting but partly necessary given
367 * how shutdown (barely) works.
368 */
369 if (cold || (doing_shutdown && (panicstr || (p == NULL)))) {
370 /*
371 * After a panic, or during autoconfiguration,
372 * just give interrupts a chance, then just return;
373 * don't run any other procs or panic below,
374 * in case this is the idle process and already asleep.
375 */
376 s = splhigh();
377 splx(safepri);
378 splx(s);
379 if (interlock != NULL && relock == 0)
380 simple_unlock(interlock);
381 return (0);
382 }
383
384 #if 0 /* XXXSMP */
385 dobiglock = (p->p_flags & P_BIGLOCK) != 0;
386 #endif
387
388 #ifdef KTRACE
389 if (KTRPOINT(p, KTR_CSW))
390 ktrcsw(p, 1, 0);
391 #endif
392
393 s = splhigh(); /* XXXSMP: SCHED_LOCK(s) */
394
395 #ifdef DIAGNOSTIC
396 if (ident == NULL)
397 panic("ltsleep: ident == NULL");
398 if (p->p_stat != SONPROC)
399 panic("ltsleep: p_stat %d != SONPROC", p->p_stat);
400 if (p->p_back != NULL)
401 panic("ltsleep: p_back != NULL");
402 #endif
403
404 p->p_wchan = ident;
405 p->p_wmesg = wmesg;
406 p->p_slptime = 0;
407 p->p_priority = priority & PRIMASK;
408
409 qp = SLPQUE(ident);
410 if (qp->sq_head == 0)
411 qp->sq_head = p;
412 else
413 *qp->sq_tailp = p;
414 *(qp->sq_tailp = &p->p_forw) = 0;
415
416 if (timo)
417 callout_reset(&p->p_tsleep_ch, timo, endtsleep, p);
418
419 /*
420 * We can now release the interlock; the scheduler_slock
421 * is held, so a thread can't get in to do wakeup() before
422 * we do the switch.
423 *
424 * XXX We leave the code block here, after inserting ourselves
425 * on the sleep queue, because we might want a more clever
426 * data structure for the sleep queues at some point.
427 */
428 if (interlock != NULL)
429 simple_unlock(interlock);
430
431 /*
432 * We put ourselves on the sleep queue and start our timeout
433 * before calling CURSIG, as we could stop there, and a wakeup
434 * or a SIGCONT (or both) could occur while we were stopped.
435 * A SIGCONT would cause us to be marked as SSLEEP
436 * without resuming us, thus we must be ready for sleep
437 * when CURSIG is called. If the wakeup happens while we're
438 * stopped, p->p_wchan will be 0 upon return from CURSIG.
439 */
440 if (catch) {
441 p->p_flag |= P_SINTR;
442 if ((sig = CURSIG(p)) != 0) {
443 if (p->p_wchan != NULL)
444 unsleep(p);
445 p->p_stat = SONPROC;
446 #if 0 /* XXXSMP */
447 /*
448 * We're going to skip the unlock, so
449 * we don't need to relock after resume.
450 */
451 dobiglock = 0;
452 #endif
453 goto resume;
454 }
455 if (p->p_wchan == NULL) {
456 catch = 0;
457 #if 0 /* XXXSMP */
458 /* See above. */
459 dobiglock = 0;
460 #endif
461 goto resume;
462 }
463 } else
464 sig = 0;
465 p->p_stat = SSLEEP;
466 p->p_stats->p_ru.ru_nvcsw++;
467
468 #if 0 /* XXXSMP */
469 if (dobiglock) {
470 /*
471 * Release the kernel_lock, as we are about to
472 * yield the CPU. The scheduler_slock is still
473 * held until cpu_switch() selects a new process
474 * and removes it from the run queue.
475 */
476 kernel_lock_release();
477 }
478 #endif
479
480 /* scheduler_slock held */
481 mi_switch(p);
482 /* scheduler_slock held */
483 #ifdef DDB
484 /* handy breakpoint location after process "wakes" */
485 asm(".globl bpendtsleep ; bpendtsleep:");
486 #endif
487
488 resume:
489 KDASSERT(p->p_cpu != NULL);
490 KDASSERT(p->p_cpu == curcpu());
491 p->p_cpu->ci_schedstate.spc_curpriority = p->p_usrpri;
492 splx(s); /* XXXSMP: SCHED_UNLOCK(s) */
493 #if 0 /* XXXSMP */
494 if (dobiglock) {
495 /*
496 * Reacquire the kernel_lock now. We do this after
497 * we've released scheduler_slock to avoid deadlock.
498 */
499 kernel_lock_acquire(LK_EXCLUSIVE);
500 }
501 #endif
502 p->p_flag &= ~P_SINTR;
503 if (p->p_flag & P_TIMEOUT) {
504 p->p_flag &= ~P_TIMEOUT;
505 if (sig == 0) {
506 #ifdef KTRACE
507 if (KTRPOINT(p, KTR_CSW))
508 ktrcsw(p, 0, 0);
509 #endif
510 if (relock && interlock != NULL)
511 simple_lock(interlock);
512 return (EWOULDBLOCK);
513 }
514 } else if (timo)
515 callout_stop(&p->p_tsleep_ch);
516 if (catch && (sig != 0 || (sig = CURSIG(p)) != 0)) {
517 #ifdef KTRACE
518 if (KTRPOINT(p, KTR_CSW))
519 ktrcsw(p, 0, 0);
520 #endif
521 if (relock && interlock != NULL)
522 simple_lock(interlock);
523 if ((p->p_sigacts->ps_sigact[sig].sa_flags & SA_RESTART) == 0)
524 return (EINTR);
525 return (ERESTART);
526 }
527 #ifdef KTRACE
528 if (KTRPOINT(p, KTR_CSW))
529 ktrcsw(p, 0, 0);
530 #endif
531 if (relock && interlock != NULL)
532 simple_lock(interlock);
533 return (0);
534 }
535
536 /*
537 * Implement timeout for tsleep.
538 * If process hasn't been awakened (wchan non-zero),
539 * set timeout flag and undo the sleep. If proc
540 * is stopped, just unsleep so it will remain stopped.
541 */
542 void
543 endtsleep(void *arg)
544 {
545 struct proc *p;
546 int s;
547
548 p = (struct proc *)arg;
549 s = splhigh();
550 if (p->p_wchan) {
551 if (p->p_stat == SSLEEP)
552 setrunnable(p);
553 else
554 unsleep(p);
555 p->p_flag |= P_TIMEOUT;
556 }
557 splx(s);
558 }
559
560 /*
561 * Remove a process from its wait queue
562 */
563 void
564 unsleep(struct proc *p)
565 {
566 struct slpque *qp;
567 struct proc **hp;
568 int s;
569
570 s = splhigh();
571 if (p->p_wchan) {
572 hp = &(qp = SLPQUE(p->p_wchan))->sq_head;
573 while (*hp != p)
574 hp = &(*hp)->p_forw;
575 *hp = p->p_forw;
576 if (qp->sq_tailp == &p->p_forw)
577 qp->sq_tailp = hp;
578 p->p_wchan = 0;
579 }
580 splx(s);
581 }
582
583 /*
584 * Optimized-for-wakeup() version of setrunnable().
585 */
586 __inline void
587 awaken(struct proc *p)
588 {
589
590 if (p->p_slptime > 1)
591 updatepri(p);
592 p->p_slptime = 0;
593 p->p_stat = SRUN;
594
595 /*
596 * Since curpriority is a user priority, p->p_priority
597 * is always better than curpriority.
598 */
599 if (p->p_flag & P_INMEM) {
600 setrunqueue(p);
601 need_resched();
602 } else
603 wakeup(&proc0);
604 }
605
606 /*
607 * Make all processes sleeping on the specified identifier runnable.
608 */
609 void
610 wakeup(void *ident)
611 {
612 struct slpque *qp;
613 struct proc *p, **q;
614 int s;
615
616 s = splhigh(); /* XXXSMP: SCHED_LOCK(s) */
617
618 qp = SLPQUE(ident);
619 restart:
620 for (q = &qp->sq_head; (p = *q) != NULL; ) {
621 #ifdef DIAGNOSTIC
622 if (p->p_back || (p->p_stat != SSLEEP && p->p_stat != SSTOP))
623 panic("wakeup");
624 #endif
625 if (p->p_wchan == ident) {
626 p->p_wchan = 0;
627 *q = p->p_forw;
628 if (qp->sq_tailp == &p->p_forw)
629 qp->sq_tailp = q;
630 if (p->p_stat == SSLEEP) {
631 awaken(p);
632 goto restart;
633 }
634 } else
635 q = &p->p_forw;
636 }
637 splx(s); /* XXXSMP: SCHED_UNLOCK(s) */
638 }
639
640 /*
641 * Make the highest priority process first in line on the specified
642 * identifier runnable.
643 */
644 void
645 wakeup_one(void *ident)
646 {
647 struct slpque *qp;
648 struct proc *p, **q;
649 struct proc *best_sleepp, **best_sleepq;
650 struct proc *best_stopp, **best_stopq;
651 int s;
652
653 best_sleepp = best_stopp = NULL;
654 best_sleepq = best_stopq = NULL;
655
656 s = splhigh(); /* XXXSMP: SCHED_LOCK(s) */
657
658 qp = SLPQUE(ident);
659
660 for (q = &qp->sq_head; (p = *q) != NULL; q = &p->p_forw) {
661 #ifdef DIAGNOSTIC
662 if (p->p_back || (p->p_stat != SSLEEP && p->p_stat != SSTOP))
663 panic("wakeup_one");
664 #endif
665 if (p->p_wchan == ident) {
666 if (p->p_stat == SSLEEP) {
667 if (best_sleepp == NULL ||
668 p->p_priority < best_sleepp->p_priority) {
669 best_sleepp = p;
670 best_sleepq = q;
671 }
672 } else {
673 if (best_stopp == NULL ||
674 p->p_priority < best_stopp->p_priority) {
675 best_stopp = p;
676 best_stopq = q;
677 }
678 }
679 }
680 }
681
682 /*
683 * Consider any SSLEEP process higher than the highest priority SSTOP
684 * process.
685 */
686 if (best_sleepp != NULL) {
687 p = best_sleepp;
688 q = best_sleepq;
689 } else {
690 p = best_stopp;
691 q = best_stopq;
692 }
693
694 if (p != NULL) {
695 p->p_wchan = NULL;
696 *q = p->p_forw;
697 if (qp->sq_tailp == &p->p_forw)
698 qp->sq_tailp = q;
699 if (p->p_stat == SSLEEP)
700 awaken(p);
701 }
702 splx(s); /* XXXSMP: SCHED_UNLOCK(s) */
703 }
704
705 /*
706 * General yield call. Puts the current process back on its run queue and
707 * performs a voluntary context switch.
708 */
709 void
710 yield(void)
711 {
712 struct proc *p = curproc;
713 int s;
714
715 s = splstatclock();
716 p->p_priority = p->p_usrpri;
717 p->p_stat = SRUN;
718 setrunqueue(p);
719 p->p_stats->p_ru.ru_nvcsw++;
720 mi_switch(p);
721 splx(s);
722 }
723
724 /*
725 * General preemption call. Puts the current process back on its run queue
726 * and performs an involuntary context switch. If a process is supplied,
727 * we switch to that process. Otherwise, we use the normal process selection
728 * criteria.
729 */
730 void
731 preempt(struct proc *newp)
732 {
733 struct proc *p = curproc;
734 int s;
735
736 /*
737 * XXX Switching to a specific process is not supported yet.
738 */
739 if (newp != NULL)
740 panic("preempt: cpu_preempt not yet implemented");
741
742 s = splstatclock();
743 p->p_priority = p->p_usrpri;
744 p->p_stat = SRUN;
745 setrunqueue(p);
746 p->p_stats->p_ru.ru_nivcsw++;
747 mi_switch(p);
748 splx(s);
749 }
750
751 /*
752 * The machine independent parts of context switch.
753 * Must be called at splstatclock() or higher.
754 */
755 void
756 mi_switch(struct proc *p)
757 {
758 struct schedstate_percpu *spc;
759 struct rlimit *rlim;
760 long s, u;
761 struct timeval tv;
762
763 KDASSERT(p->p_cpu != NULL);
764 KDASSERT(p->p_cpu == curcpu());
765
766 spc = &p->p_cpu->ci_schedstate;
767
768 #ifdef DEBUG
769 if (p->p_simple_locks) {
770 printf("p->p_simple_locks %d\n", p->p_simple_locks);
771 #ifdef LOCKDEBUG
772 simple_lock_dump();
773 #endif
774 panic("sleep: holding simple lock");
775 }
776 #endif
777 /*
778 * Compute the amount of time during which the current
779 * process was running, and add that to its total so far.
780 */
781 microtime(&tv);
782 u = p->p_rtime.tv_usec + (tv.tv_usec - spc->spc_runtime.tv_usec);
783 s = p->p_rtime.tv_sec + (tv.tv_sec - spc->spc_runtime.tv_sec);
784 if (u < 0) {
785 u += 1000000;
786 s--;
787 } else if (u >= 1000000) {
788 u -= 1000000;
789 s++;
790 }
791 p->p_rtime.tv_usec = u;
792 p->p_rtime.tv_sec = s;
793
794 /*
795 * Check if the process exceeds its cpu resource allocation.
796 * If over max, kill it. In any case, if it has run for more
797 * than 10 minutes, reduce priority to give others a chance.
798 */
799 rlim = &p->p_rlimit[RLIMIT_CPU];
800 if (s >= rlim->rlim_cur) {
801 if (s >= rlim->rlim_max)
802 psignal(p, SIGKILL);
803 else {
804 psignal(p, SIGXCPU);
805 if (rlim->rlim_cur < rlim->rlim_max)
806 rlim->rlim_cur += 5;
807 }
808 }
809 if (autonicetime && s > autonicetime && p->p_ucred->cr_uid &&
810 p->p_nice == NZERO) {
811 p->p_nice = autoniceval + NZERO;
812 resetpriority(p);
813 }
814
815 /*
816 * Process is about to yield the CPU; clear the appropriate
817 * scheduling flags.
818 */
819 spc->spc_flags &= ~SPCF_SWITCHCLEAR;
820
821 /*
822 * Pick a new current process and switch to it. When we
823 * run again, we'll return back here.
824 */
825 uvmexp.swtch++;
826 cpu_switch(p);
827
828 /*
829 * We're running again; record our new start time. We might
830 * be running on a new CPU now, so don't use the cache'd
831 * schedstate_percpu pointer.
832 */
833 KDASSERT(p->p_cpu != NULL);
834 KDASSERT(p->p_cpu == curcpu());
835 microtime(&p->p_cpu->ci_schedstate.spc_runtime);
836 }
837
838 /*
839 * Initialize the (doubly-linked) run queues
840 * to be empty.
841 */
842 void
843 rqinit()
844 {
845 int i;
846
847 for (i = 0; i < RUNQUE_NQS; i++)
848 sched_qs[i].ph_link = sched_qs[i].ph_rlink =
849 (struct proc *)&sched_qs[i];
850 }
851
852 /*
853 * Change process state to be runnable,
854 * placing it on the run queue if it is in memory,
855 * and awakening the swapper if it isn't in memory.
856 */
857 void
858 setrunnable(struct proc *p)
859 {
860 int s;
861
862 s = splhigh();
863 switch (p->p_stat) {
864 case 0:
865 case SRUN:
866 case SONPROC:
867 case SZOMB:
868 case SDEAD:
869 default:
870 panic("setrunnable");
871 case SSTOP:
872 /*
873 * If we're being traced (possibly because someone attached us
874 * while we were stopped), check for a signal from the debugger.
875 */
876 if ((p->p_flag & P_TRACED) != 0 && p->p_xstat != 0) {
877 sigaddset(&p->p_siglist, p->p_xstat);
878 p->p_sigcheck = 1;
879 }
880 case SSLEEP:
881 unsleep(p); /* e.g. when sending signals */
882 break;
883
884 case SIDL:
885 break;
886 }
887 p->p_stat = SRUN;
888 if (p->p_flag & P_INMEM)
889 setrunqueue(p);
890 splx(s);
891 if (p->p_slptime > 1)
892 updatepri(p);
893 p->p_slptime = 0;
894 if ((p->p_flag & P_INMEM) == 0)
895 wakeup((caddr_t)&proc0);
896 else if (p->p_priority < curcpu()->ci_schedstate.spc_curpriority) {
897 /*
898 * XXXSMP
899 * This is wrong. It will work, but what really
900 * needs to happen is:
901 *
902 * - Need to check if p is higher priority
903 * than the process currently running on
904 * the CPU p last ran on (let p_cpu persist
905 * after a context switch?), and preempt
906 * that one (or, if there is no process
907 * there, simply need_resched() that CPU.
908 *
909 * - Failing that, traverse a list of
910 * available CPUs and need_resched() the
911 * CPU with the lowest priority that's
912 * lower than p's.
913 */
914 need_resched();
915 }
916 }
917
918 /*
919 * Compute the priority of a process when running in user mode.
920 * Arrange to reschedule if the resulting priority is better
921 * than that of the current process.
922 */
923 void
924 resetpriority(struct proc *p)
925 {
926 unsigned int newpriority;
927
928 newpriority = PUSER + p->p_estcpu + NICE_WEIGHT * (p->p_nice - NZERO);
929 newpriority = min(newpriority, MAXPRI);
930 p->p_usrpri = newpriority;
931 if (newpriority < curcpu()->ci_schedstate.spc_curpriority) {
932 /*
933 * XXXSMP
934 * Same applies as in setrunnable() above.
935 */
936 need_resched();
937 }
938 }
939
940 /*
941 * We adjust the priority of the current process. The priority of a process
942 * gets worse as it accumulates CPU time. The cpu usage estimator (p_estcpu)
943 * is increased here. The formula for computing priorities (in kern_synch.c)
944 * will compute a different value each time p_estcpu increases. This can
945 * cause a switch, but unless the priority crosses a PPQ boundary the actual
946 * queue will not change. The cpu usage estimator ramps up quite quickly
947 * when the process is running (linearly), and decays away exponentially, at
948 * a rate which is proportionally slower when the system is busy. The basic
949 * principal is that the system will 90% forget that the process used a lot
950 * of CPU time in 5 * loadav seconds. This causes the system to favor
951 * processes which haven't run much recently, and to round-robin among other
952 * processes.
953 */
954
955 void
956 schedclock(struct proc *p)
957 {
958
959 p->p_estcpu = ESTCPULIM(p->p_estcpu + 1);
960 resetpriority(p);
961 if (p->p_priority >= PUSER)
962 p->p_priority = p->p_usrpri;
963 }
964