kern_synch.c revision 1.55 1 /* $NetBSD: kern_synch.c,v 1.55 1999/02/23 02:56:03 ross Exp $ */
2
3 /*-
4 * Copyright (c) 1982, 1986, 1990, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
41 */
42
43 #include "opt_ddb.h"
44 #include "opt_ktrace.h"
45 #include "opt_uvm.h"
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/proc.h>
50 #include <sys/kernel.h>
51 #include <sys/buf.h>
52 #include <sys/signalvar.h>
53 #include <sys/resourcevar.h>
54 #include <vm/vm.h>
55 #include <sys/sched.h>
56
57 #if defined(UVM)
58 #include <uvm/uvm_extern.h>
59 #endif
60
61 #ifdef KTRACE
62 #include <sys/ktrace.h>
63 #endif
64
65 #define NICE_WEIGHT 2 /* priorities per nice level */
66 #define PPQ (128 / NQS) /* priorities per queue */
67
68 #define ESTCPULIM(e) min((e), NICE_WEIGHT * PRIO_MAX - PPQ)
69
70 #include <machine/cpu.h>
71
72 u_char curpriority; /* usrpri of curproc */
73 int lbolt; /* once a second sleep address */
74
75 void roundrobin __P((void *));
76 void schedcpu __P((void *));
77 void updatepri __P((struct proc *));
78 void endtsleep __P((void *));
79
80 /*
81 * Force switch among equal priority processes every 100ms.
82 */
83 /* ARGSUSED */
84 void
85 roundrobin(arg)
86 void *arg;
87 {
88
89 need_resched();
90 timeout(roundrobin, NULL, hz / 10);
91 }
92
93 /*
94 * Constants for digital decay and forget:
95 * 90% of (p_estcpu) usage in 5 * loadav time
96 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive)
97 * Note that, as ps(1) mentions, this can let percentages
98 * total over 100% (I've seen 137.9% for 3 processes).
99 *
100 * Note that hardclock updates p_estcpu and p_cpticks independently.
101 *
102 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
103 * That is, the system wants to compute a value of decay such
104 * that the following for loop:
105 * for (i = 0; i < (5 * loadavg); i++)
106 * p_estcpu *= decay;
107 * will compute
108 * p_estcpu *= 0.1;
109 * for all values of loadavg:
110 *
111 * Mathematically this loop can be expressed by saying:
112 * decay ** (5 * loadavg) ~= .1
113 *
114 * The system computes decay as:
115 * decay = (2 * loadavg) / (2 * loadavg + 1)
116 *
117 * We wish to prove that the system's computation of decay
118 * will always fulfill the equation:
119 * decay ** (5 * loadavg) ~= .1
120 *
121 * If we compute b as:
122 * b = 2 * loadavg
123 * then
124 * decay = b / (b + 1)
125 *
126 * We now need to prove two things:
127 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
128 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
129 *
130 * Facts:
131 * For x close to zero, exp(x) =~ 1 + x, since
132 * exp(x) = 0! + x**1/1! + x**2/2! + ... .
133 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
134 * For x close to zero, ln(1+x) =~ x, since
135 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1
136 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
137 * ln(.1) =~ -2.30
138 *
139 * Proof of (1):
140 * Solve (factor)**(power) =~ .1 given power (5*loadav):
141 * solving for factor,
142 * ln(factor) =~ (-2.30/5*loadav), or
143 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
144 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED
145 *
146 * Proof of (2):
147 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
148 * solving for power,
149 * power*ln(b/(b+1)) =~ -2.30, or
150 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED
151 *
152 * Actual power values for the implemented algorithm are as follows:
153 * loadav: 1 2 3 4
154 * power: 5.68 10.32 14.94 19.55
155 */
156
157 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
158 #define loadfactor(loadav) (2 * (loadav))
159 #define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE))
160
161 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
162 fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
163
164 /*
165 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
166 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
167 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
168 *
169 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
170 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
171 *
172 * If you dont want to bother with the faster/more-accurate formula, you
173 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
174 * (more general) method of calculating the %age of CPU used by a process.
175 */
176 #define CCPU_SHIFT 11
177
178 /*
179 * Recompute process priorities, every hz ticks.
180 */
181 /* ARGSUSED */
182 void
183 schedcpu(arg)
184 void *arg;
185 {
186 register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
187 register struct proc *p;
188 register int s;
189 register unsigned int newcpu;
190
191 wakeup((caddr_t)&lbolt);
192 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
193 /*
194 * Increment time in/out of memory and sleep time
195 * (if sleeping). We ignore overflow; with 16-bit int's
196 * (remember them?) overflow takes 45 days.
197 */
198 p->p_swtime++;
199 if (p->p_stat == SSLEEP || p->p_stat == SSTOP)
200 p->p_slptime++;
201 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
202 /*
203 * If the process has slept the entire second,
204 * stop recalculating its priority until it wakes up.
205 */
206 if (p->p_slptime > 1)
207 continue;
208 s = splstatclock(); /* prevent state changes */
209 /*
210 * p_pctcpu is only for ps.
211 */
212 KASSERT(profhz);
213 #if (FSHIFT >= CCPU_SHIFT)
214 p->p_pctcpu += (profhz == 100)?
215 ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
216 100 * (((fixpt_t) p->p_cpticks)
217 << (FSHIFT - CCPU_SHIFT)) / profhz;
218 #else
219 p->p_pctcpu += ((FSCALE - ccpu) *
220 (p->p_cpticks * FSCALE / profhz)) >> FSHIFT;
221 #endif
222 p->p_cpticks = 0;
223 newcpu = (u_int)decay_cpu(loadfac, p->p_estcpu);
224 p->p_estcpu = newcpu;
225 resetpriority(p);
226 if (p->p_priority >= PUSER) {
227 if ((p != curproc) &&
228 p->p_stat == SRUN &&
229 (p->p_flag & P_INMEM) &&
230 (p->p_priority / PPQ) != (p->p_usrpri / PPQ)) {
231 remrunqueue(p);
232 p->p_priority = p->p_usrpri;
233 setrunqueue(p);
234 } else
235 p->p_priority = p->p_usrpri;
236 }
237 splx(s);
238 }
239 #if defined(UVM)
240 uvm_meter();
241 #else
242 vmmeter();
243 #endif
244 timeout(schedcpu, (void *)0, hz);
245 }
246
247 /*
248 * Recalculate the priority of a process after it has slept for a while.
249 * For all load averages >= 1 and max p_estcpu of 255, sleeping for at
250 * least six times the loadfactor will decay p_estcpu to zero.
251 */
252 void
253 updatepri(p)
254 register struct proc *p;
255 {
256 register unsigned int newcpu = p->p_estcpu;
257 register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
258
259 if (p->p_slptime > 5 * loadfac)
260 p->p_estcpu = 0;
261 else {
262 p->p_slptime--; /* the first time was done in schedcpu */
263 while (newcpu && --p->p_slptime)
264 newcpu = (int) decay_cpu(loadfac, newcpu);
265 p->p_estcpu = newcpu;
266 }
267 resetpriority(p);
268 }
269
270 /*
271 * We're only looking at 7 bits of the address; everything is
272 * aligned to 4, lots of things are aligned to greater powers
273 * of 2. Shift right by 8, i.e. drop the bottom 256 worth.
274 */
275 #define TABLESIZE 128
276 #define LOOKUP(x) (((long)(x) >> 8) & (TABLESIZE - 1))
277 struct slpque {
278 struct proc *sq_head;
279 struct proc **sq_tailp;
280 } slpque[TABLESIZE];
281
282 /*
283 * During autoconfiguration or after a panic, a sleep will simply
284 * lower the priority briefly to allow interrupts, then return.
285 * The priority to be used (safepri) is machine-dependent, thus this
286 * value is initialized and maintained in the machine-dependent layers.
287 * This priority will typically be 0, or the lowest priority
288 * that is safe for use on the interrupt stack; it can be made
289 * higher to block network software interrupts after panics.
290 */
291 int safepri;
292
293 /*
294 * General sleep call. Suspends the current process until a wakeup is
295 * performed on the specified identifier. The process will then be made
296 * runnable with the specified priority. Sleeps at most timo/hz seconds
297 * (0 means no timeout). If pri includes PCATCH flag, signals are checked
298 * before and after sleeping, else signals are not checked. Returns 0 if
299 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
300 * signal needs to be delivered, ERESTART is returned if the current system
301 * call should be restarted if possible, and EINTR is returned if the system
302 * call should be interrupted by the signal (return EINTR).
303 */
304 int
305 tsleep(ident, priority, wmesg, timo)
306 void *ident;
307 int priority, timo;
308 const char *wmesg;
309 {
310 register struct proc *p = curproc;
311 register struct slpque *qp;
312 register int s;
313 int sig, catch = priority & PCATCH;
314 extern int cold;
315 void endtsleep __P((void *));
316
317 if (cold || panicstr) {
318 /*
319 * After a panic, or during autoconfiguration,
320 * just give interrupts a chance, then just return;
321 * don't run any other procs or panic below,
322 * in case this is the idle process and already asleep.
323 */
324 s = splhigh();
325 splx(safepri);
326 splx(s);
327 return (0);
328 }
329
330 #ifdef KTRACE
331 if (KTRPOINT(p, KTR_CSW))
332 ktrcsw(p->p_tracep, 1, 0);
333 #endif
334 s = splhigh();
335
336 #ifdef DIAGNOSTIC
337 if (ident == NULL || p->p_stat != SRUN || p->p_back)
338 panic("tsleep");
339 #endif
340 p->p_wchan = ident;
341 p->p_wmesg = wmesg;
342 p->p_slptime = 0;
343 p->p_priority = priority & PRIMASK;
344 qp = &slpque[LOOKUP(ident)];
345 if (qp->sq_head == 0)
346 qp->sq_head = p;
347 else
348 *qp->sq_tailp = p;
349 *(qp->sq_tailp = &p->p_forw) = 0;
350 if (timo)
351 timeout(endtsleep, (void *)p, timo);
352 /*
353 * We put ourselves on the sleep queue and start our timeout
354 * before calling CURSIG, as we could stop there, and a wakeup
355 * or a SIGCONT (or both) could occur while we were stopped.
356 * A SIGCONT would cause us to be marked as SSLEEP
357 * without resuming us, thus we must be ready for sleep
358 * when CURSIG is called. If the wakeup happens while we're
359 * stopped, p->p_wchan will be 0 upon return from CURSIG.
360 */
361 if (catch) {
362 p->p_flag |= P_SINTR;
363 if ((sig = CURSIG(p)) != 0) {
364 if (p->p_wchan)
365 unsleep(p);
366 p->p_stat = SRUN;
367 goto resume;
368 }
369 if (p->p_wchan == 0) {
370 catch = 0;
371 goto resume;
372 }
373 } else
374 sig = 0;
375 p->p_stat = SSLEEP;
376 p->p_stats->p_ru.ru_nvcsw++;
377 mi_switch();
378 #ifdef DDB
379 /* handy breakpoint location after process "wakes" */
380 asm(".globl bpendtsleep ; bpendtsleep:");
381 #endif
382 resume:
383 curpriority = p->p_usrpri;
384 splx(s);
385 p->p_flag &= ~P_SINTR;
386 if (p->p_flag & P_TIMEOUT) {
387 p->p_flag &= ~P_TIMEOUT;
388 if (sig == 0) {
389 #ifdef KTRACE
390 if (KTRPOINT(p, KTR_CSW))
391 ktrcsw(p->p_tracep, 0, 0);
392 #endif
393 return (EWOULDBLOCK);
394 }
395 } else if (timo)
396 untimeout(endtsleep, (void *)p);
397 if (catch && (sig != 0 || (sig = CURSIG(p)) != 0)) {
398 #ifdef KTRACE
399 if (KTRPOINT(p, KTR_CSW))
400 ktrcsw(p->p_tracep, 0, 0);
401 #endif
402 if ((p->p_sigacts->ps_sigact[sig].sa_flags & SA_RESTART) == 0)
403 return (EINTR);
404 return (ERESTART);
405 }
406 #ifdef KTRACE
407 if (KTRPOINT(p, KTR_CSW))
408 ktrcsw(p->p_tracep, 0, 0);
409 #endif
410 return (0);
411 }
412
413 /*
414 * Implement timeout for tsleep.
415 * If process hasn't been awakened (wchan non-zero),
416 * set timeout flag and undo the sleep. If proc
417 * is stopped, just unsleep so it will remain stopped.
418 */
419 void
420 endtsleep(arg)
421 void *arg;
422 {
423 register struct proc *p;
424 int s;
425
426 p = (struct proc *)arg;
427 s = splhigh();
428 if (p->p_wchan) {
429 if (p->p_stat == SSLEEP)
430 setrunnable(p);
431 else
432 unsleep(p);
433 p->p_flag |= P_TIMEOUT;
434 }
435 splx(s);
436 }
437
438 /*
439 * Short-term, non-interruptable sleep.
440 */
441 void
442 sleep(ident, priority)
443 void *ident;
444 int priority;
445 {
446 register struct proc *p = curproc;
447 register struct slpque *qp;
448 register int s;
449 extern int cold;
450
451 #ifdef DIAGNOSTIC
452 if (priority > PZERO) {
453 printf("sleep called with priority %d > PZERO, wchan: %p\n",
454 priority, ident);
455 panic("old sleep");
456 }
457 #endif
458 s = splhigh();
459 if (cold || panicstr) {
460 /*
461 * After a panic, or during autoconfiguration,
462 * just give interrupts a chance, then just return;
463 * don't run any other procs or panic below,
464 * in case this is the idle process and already asleep.
465 */
466 splx(safepri);
467 splx(s);
468 return;
469 }
470 #ifdef DIAGNOSTIC
471 if (ident == NULL || p->p_stat != SRUN || p->p_back)
472 panic("sleep");
473 #endif
474 p->p_wchan = ident;
475 p->p_wmesg = NULL;
476 p->p_slptime = 0;
477 p->p_priority = priority;
478 qp = &slpque[LOOKUP(ident)];
479 if (qp->sq_head == 0)
480 qp->sq_head = p;
481 else
482 *qp->sq_tailp = p;
483 *(qp->sq_tailp = &p->p_forw) = 0;
484 p->p_stat = SSLEEP;
485 p->p_stats->p_ru.ru_nvcsw++;
486 #ifdef KTRACE
487 if (KTRPOINT(p, KTR_CSW))
488 ktrcsw(p->p_tracep, 1, 0);
489 #endif
490 mi_switch();
491 #ifdef DDB
492 /* handy breakpoint location after process "wakes" */
493 asm(".globl bpendsleep ; bpendsleep:");
494 #endif
495 #ifdef KTRACE
496 if (KTRPOINT(p, KTR_CSW))
497 ktrcsw(p->p_tracep, 0, 0);
498 #endif
499 curpriority = p->p_usrpri;
500 splx(s);
501 }
502
503 /*
504 * Remove a process from its wait queue
505 */
506 void
507 unsleep(p)
508 register struct proc *p;
509 {
510 register struct slpque *qp;
511 register struct proc **hp;
512 int s;
513
514 s = splhigh();
515 if (p->p_wchan) {
516 hp = &(qp = &slpque[LOOKUP(p->p_wchan)])->sq_head;
517 while (*hp != p)
518 hp = &(*hp)->p_forw;
519 *hp = p->p_forw;
520 if (qp->sq_tailp == &p->p_forw)
521 qp->sq_tailp = hp;
522 p->p_wchan = 0;
523 }
524 splx(s);
525 }
526
527 /*
528 * Make all processes sleeping on the specified identifier runnable.
529 */
530 void
531 wakeup(ident)
532 register void *ident;
533 {
534 register struct slpque *qp;
535 register struct proc *p, **q;
536 int s;
537
538 s = splhigh();
539 qp = &slpque[LOOKUP(ident)];
540 restart:
541 for (q = &qp->sq_head; (p = *q) != NULL; ) {
542 #ifdef DIAGNOSTIC
543 if (p->p_back || (p->p_stat != SSLEEP && p->p_stat != SSTOP))
544 panic("wakeup");
545 #endif
546 if (p->p_wchan == ident) {
547 p->p_wchan = 0;
548 *q = p->p_forw;
549 if (qp->sq_tailp == &p->p_forw)
550 qp->sq_tailp = q;
551 if (p->p_stat == SSLEEP) {
552 /* OPTIMIZED EXPANSION OF setrunnable(p); */
553 if (p->p_slptime > 1)
554 updatepri(p);
555 p->p_slptime = 0;
556 p->p_stat = SRUN;
557 if (p->p_flag & P_INMEM)
558 setrunqueue(p);
559 /*
560 * Since curpriority is a user priority,
561 * p->p_priority is always better than
562 * curpriority.
563 */
564 if ((p->p_flag & P_INMEM) == 0)
565 wakeup((caddr_t)&proc0);
566 else
567 need_resched();
568 /* END INLINE EXPANSION */
569 goto restart;
570 }
571 } else
572 q = &p->p_forw;
573 }
574 splx(s);
575 }
576
577 /*
578 * The machine independent parts of mi_switch().
579 * Must be called at splstatclock() or higher.
580 */
581 void
582 mi_switch()
583 {
584 register struct proc *p = curproc; /* XXX */
585 register struct rlimit *rlim;
586 register long s, u;
587 struct timeval tv;
588
589 #ifdef DEBUG
590 if (p->p_simple_locks) {
591 printf("p->p_simple_locks %d\n", p->p_simple_locks);
592 #ifdef LOCKDEBUG
593 simple_lock_dump();
594 #endif
595 panic("sleep: holding simple lock");
596 }
597 #endif
598 /*
599 * Compute the amount of time during which the current
600 * process was running, and add that to its total so far.
601 */
602 microtime(&tv);
603 u = p->p_rtime.tv_usec + (tv.tv_usec - runtime.tv_usec);
604 s = p->p_rtime.tv_sec + (tv.tv_sec - runtime.tv_sec);
605 if (u < 0) {
606 u += 1000000;
607 s--;
608 } else if (u >= 1000000) {
609 u -= 1000000;
610 s++;
611 }
612 p->p_rtime.tv_usec = u;
613 p->p_rtime.tv_sec = s;
614
615 /*
616 * Check if the process exceeds its cpu resource allocation.
617 * If over max, kill it. In any case, if it has run for more
618 * than 10 minutes, reduce priority to give others a chance.
619 */
620 rlim = &p->p_rlimit[RLIMIT_CPU];
621 if (s >= rlim->rlim_cur) {
622 if (s >= rlim->rlim_max)
623 psignal(p, SIGKILL);
624 else {
625 psignal(p, SIGXCPU);
626 if (rlim->rlim_cur < rlim->rlim_max)
627 rlim->rlim_cur += 5;
628 }
629 }
630 if (autonicetime && s > autonicetime && p->p_ucred->cr_uid && p->p_nice == NZERO) {
631 p->p_nice = autoniceval + NZERO;
632 resetpriority(p);
633 }
634
635 /*
636 * Pick a new current process and record its start time.
637 */
638 #if defined(UVM)
639 uvmexp.swtch++;
640 #else
641 cnt.v_swtch++;
642 #endif
643 cpu_switch(p);
644 microtime(&runtime);
645 }
646
647 /*
648 * Initialize the (doubly-linked) run queues
649 * to be empty.
650 */
651 void
652 rqinit()
653 {
654 register int i;
655
656 for (i = 0; i < NQS; i++)
657 qs[i].ph_link = qs[i].ph_rlink = (struct proc *)&qs[i];
658 }
659
660 /*
661 * Change process state to be runnable,
662 * placing it on the run queue if it is in memory,
663 * and awakening the swapper if it isn't in memory.
664 */
665 void
666 setrunnable(p)
667 register struct proc *p;
668 {
669 register int s;
670
671 s = splhigh();
672 switch (p->p_stat) {
673 case 0:
674 case SRUN:
675 case SZOMB:
676 default:
677 panic("setrunnable");
678 case SSTOP:
679 /*
680 * If we're being traced (possibly because someone attached us
681 * while we were stopped), check for a signal from the debugger.
682 */
683 if ((p->p_flag & P_TRACED) != 0 && p->p_xstat != 0) {
684 sigaddset(&p->p_siglist, p->p_xstat);
685 p->p_sigcheck = 1;
686 }
687 case SSLEEP:
688 unsleep(p); /* e.g. when sending signals */
689 break;
690
691 case SIDL:
692 break;
693 }
694 p->p_stat = SRUN;
695 if (p->p_flag & P_INMEM)
696 setrunqueue(p);
697 splx(s);
698 if (p->p_slptime > 1)
699 updatepri(p);
700 p->p_slptime = 0;
701 if ((p->p_flag & P_INMEM) == 0)
702 wakeup((caddr_t)&proc0);
703 else if (p->p_priority < curpriority)
704 need_resched();
705 }
706
707 /*
708 * Compute the priority of a process when running in user mode.
709 * Arrange to reschedule if the resulting priority is better
710 * than that of the current process.
711 */
712 void
713 resetpriority(p)
714 register struct proc *p;
715 {
716 register unsigned int newpriority;
717
718 newpriority = PUSER + p->p_estcpu + NICE_WEIGHT * (p->p_nice - NZERO);
719 newpriority = min(newpriority, MAXPRI);
720 p->p_usrpri = newpriority;
721 if (newpriority < curpriority)
722 need_resched();
723 }
724
725 /*
726 * We adjust the priority of the current process. The priority of
727 * a process gets worse as it accumulates CPU time. The cpu usage
728 * estimator (p_estcpu) is increased here. The formula for computing
729 * priorities (in kern_synch.c) will compute a different value each
730 * time p_estcpu increases by 4. The cpu usage estimator ramps up
731 * quite quickly when the process is running (linearly), and decays
732 * away exponentially, at a rate which is proportionally slower when
733 * the system is busy. The basic principal is that the system will
734 * 90% forget that the process used a lot of CPU time in 5 * loadav
735 * seconds. This causes the system to favor processes which haven't
736 * run much recently, and to round-robin among other processes.
737 */
738
739 void
740 schedclk(p)
741 struct proc *p;
742 {
743 p->p_estcpu = ESTCPULIM(p->p_estcpu + 1);
744 resetpriority(p);
745 if (p->p_priority >= PUSER)
746 p->p_priority = p->p_usrpri;
747 }
748