kern_synch.c revision 1.166.2.13 1 /* $NetBSD: kern_synch.c,v 1.166.2.13 2007/01/27 01:14:54 ad Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*-
41 * Copyright (c) 1982, 1986, 1990, 1991, 1993
42 * The Regents of the University of California. All rights reserved.
43 * (c) UNIX System Laboratories, Inc.
44 * All or some portions of this file are derived from material licensed
45 * to the University of California by American Telephone and Telegraph
46 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
47 * the permission of UNIX System Laboratories, Inc.
48 *
49 * Redistribution and use in source and binary forms, with or without
50 * modification, are permitted provided that the following conditions
51 * are met:
52 * 1. Redistributions of source code must retain the above copyright
53 * notice, this list of conditions and the following disclaimer.
54 * 2. Redistributions in binary form must reproduce the above copyright
55 * notice, this list of conditions and the following disclaimer in the
56 * documentation and/or other materials provided with the distribution.
57 * 3. Neither the name of the University nor the names of its contributors
58 * may be used to endorse or promote products derived from this software
59 * without specific prior written permission.
60 *
61 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
62 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
63 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
64 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
65 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
66 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
67 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
68 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
69 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
70 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
71 * SUCH DAMAGE.
72 *
73 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
74 */
75
76 #include <sys/cdefs.h>
77 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.166.2.13 2007/01/27 01:14:54 ad Exp $");
78
79 #include "opt_ddb.h"
80 #include "opt_kstack.h"
81 #include "opt_lockdebug.h"
82 #include "opt_multiprocessor.h"
83 #include "opt_perfctrs.h"
84
85 #define __MUTEX_PRIVATE
86
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/callout.h>
90 #include <sys/proc.h>
91 #include <sys/kernel.h>
92 #include <sys/buf.h>
93 #if defined(PERFCTRS)
94 #include <sys/pmc.h>
95 #endif
96 #include <sys/signalvar.h>
97 #include <sys/resourcevar.h>
98 #include <sys/sched.h>
99 #include <sys/sa.h>
100 #include <sys/savar.h>
101 #include <sys/kauth.h>
102 #include <sys/sleepq.h>
103 #include <sys/lockdebug.h>
104
105 #include <uvm/uvm_extern.h>
106
107 #include <machine/cpu.h>
108
109 int lbolt; /* once a second sleep address */
110 int rrticks; /* number of hardclock ticks per roundrobin() */
111
112 /*
113 * The global scheduler state.
114 */
115 kmutex_t sched_mutex; /* global sched state mutex */
116 struct prochd sched_qs[RUNQUE_NQS]; /* run queues */
117 volatile uint32_t sched_whichqs; /* bitmap of non-empty queues */
118
119 void schedcpu(void *);
120 void updatepri(struct lwp *);
121 void sa_awaken(struct lwp *);
122
123 void sched_unsleep(struct lwp *);
124 void sched_changepri(struct lwp *, int);
125
126 struct callout schedcpu_ch = CALLOUT_INITIALIZER_SETFUNC(schedcpu, NULL);
127 static unsigned int schedcpu_ticks;
128
129 syncobj_t sleep_syncobj = {
130 SOBJ_SLEEPQ_SORTED,
131 sleepq_unsleep,
132 sleepq_changepri
133 };
134
135 syncobj_t sched_syncobj = {
136 SOBJ_SLEEPQ_SORTED,
137 sched_unsleep,
138 sched_changepri
139 };
140
141 /*
142 * Force switch among equal priority processes every 100ms.
143 * Called from hardclock every hz/10 == rrticks hardclock ticks.
144 */
145 /* ARGSUSED */
146 void
147 roundrobin(struct cpu_info *ci)
148 {
149 struct schedstate_percpu *spc = &ci->ci_schedstate;
150
151 spc->spc_rrticks = rrticks;
152
153 if (curlwp != NULL) {
154 if (spc->spc_flags & SPCF_SEENRR) {
155 /*
156 * The process has already been through a roundrobin
157 * without switching and may be hogging the CPU.
158 * Indicate that the process should yield.
159 */
160 spc->spc_flags |= SPCF_SHOULDYIELD;
161 } else
162 spc->spc_flags |= SPCF_SEENRR;
163 }
164 cpu_need_resched(curcpu());
165 }
166
167 #define PPQ (128 / RUNQUE_NQS) /* priorities per queue */
168 #define NICE_WEIGHT 2 /* priorities per nice level */
169
170 #define ESTCPU_SHIFT 11
171 #define ESTCPU_MAX ((NICE_WEIGHT * PRIO_MAX - PPQ) << ESTCPU_SHIFT)
172 #define ESTCPULIM(e) min((e), ESTCPU_MAX)
173
174 /*
175 * Constants for digital decay and forget:
176 * 90% of (p_estcpu) usage in 5 * loadav time
177 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive)
178 * Note that, as ps(1) mentions, this can let percentages
179 * total over 100% (I've seen 137.9% for 3 processes).
180 *
181 * Note that hardclock updates p_estcpu and p_cpticks independently.
182 *
183 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
184 * That is, the system wants to compute a value of decay such
185 * that the following for loop:
186 * for (i = 0; i < (5 * loadavg); i++)
187 * p_estcpu *= decay;
188 * will compute
189 * p_estcpu *= 0.1;
190 * for all values of loadavg:
191 *
192 * Mathematically this loop can be expressed by saying:
193 * decay ** (5 * loadavg) ~= .1
194 *
195 * The system computes decay as:
196 * decay = (2 * loadavg) / (2 * loadavg + 1)
197 *
198 * We wish to prove that the system's computation of decay
199 * will always fulfill the equation:
200 * decay ** (5 * loadavg) ~= .1
201 *
202 * If we compute b as:
203 * b = 2 * loadavg
204 * then
205 * decay = b / (b + 1)
206 *
207 * We now need to prove two things:
208 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
209 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
210 *
211 * Facts:
212 * For x close to zero, exp(x) =~ 1 + x, since
213 * exp(x) = 0! + x**1/1! + x**2/2! + ... .
214 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
215 * For x close to zero, ln(1+x) =~ x, since
216 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1
217 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
218 * ln(.1) =~ -2.30
219 *
220 * Proof of (1):
221 * Solve (factor)**(power) =~ .1 given power (5*loadav):
222 * solving for factor,
223 * ln(factor) =~ (-2.30/5*loadav), or
224 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
225 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED
226 *
227 * Proof of (2):
228 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
229 * solving for power,
230 * power*ln(b/(b+1)) =~ -2.30, or
231 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED
232 *
233 * Actual power values for the implemented algorithm are as follows:
234 * loadav: 1 2 3 4
235 * power: 5.68 10.32 14.94 19.55
236 */
237
238 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
239 #define loadfactor(loadav) (2 * (loadav))
240
241 static fixpt_t
242 decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
243 {
244
245 if (estcpu == 0) {
246 return 0;
247 }
248
249 #if !defined(_LP64)
250 /* avoid 64bit arithmetics. */
251 #define FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
252 if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
253 return estcpu * loadfac / (loadfac + FSCALE);
254 }
255 #endif /* !defined(_LP64) */
256
257 return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
258 }
259
260 /*
261 * For all load averages >= 1 and max p_estcpu of (255 << ESTCPU_SHIFT),
262 * sleeping for at least seven times the loadfactor will decay p_estcpu to
263 * less than (1 << ESTCPU_SHIFT).
264 *
265 * note that our ESTCPU_MAX is actually much smaller than (255 << ESTCPU_SHIFT).
266 */
267 static fixpt_t
268 decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n)
269 {
270
271 if ((n << FSHIFT) >= 7 * loadfac) {
272 return 0;
273 }
274
275 while (estcpu != 0 && n > 1) {
276 estcpu = decay_cpu(loadfac, estcpu);
277 n--;
278 }
279
280 return estcpu;
281 }
282
283 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
284 fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
285
286 /*
287 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
288 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
289 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
290 *
291 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
292 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
293 *
294 * If you dont want to bother with the faster/more-accurate formula, you
295 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
296 * (more general) method of calculating the %age of CPU used by a process.
297 */
298 #define CCPU_SHIFT 11
299
300 /*
301 * schedcpu:
302 *
303 * Recompute process priorities, every hz ticks.
304 *
305 * XXXSMP This needs to be reorganised in order to reduce the locking
306 * burden.
307 */
308 /* ARGSUSED */
309 void
310 schedcpu(void *arg)
311 {
312 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
313 struct rlimit *rlim;
314 struct lwp *l;
315 struct proc *p;
316 int minslp, clkhz, sig;
317 long runtm;
318
319 schedcpu_ticks++;
320
321 mutex_enter(&proclist_mutex);
322 PROCLIST_FOREACH(p, &allproc) {
323 /*
324 * Increment time in/out of memory and sleep time (if
325 * sleeping). We ignore overflow; with 16-bit int's
326 * (remember them?) overflow takes 45 days.
327 */
328 minslp = 2;
329 mutex_enter(&p->p_smutex);
330 runtm = p->p_rtime.tv_sec;
331 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
332 lwp_lock(l);
333 runtm += l->l_rtime.tv_sec;
334 l->l_swtime++;
335 if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
336 l->l_stat == LSSUSPENDED) {
337 l->l_slptime++;
338 minslp = min(minslp, l->l_slptime);
339 } else
340 minslp = 0;
341 lwp_unlock(l);
342 }
343 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
344
345 /*
346 * Check if the process exceeds its CPU resource allocation.
347 * If over max, kill it.
348 */
349 rlim = &p->p_rlimit[RLIMIT_CPU];
350 sig = 0;
351 if (runtm >= rlim->rlim_cur) {
352 if (runtm >= rlim->rlim_max)
353 sig = SIGKILL;
354 else {
355 sig = SIGXCPU;
356 if (rlim->rlim_cur < rlim->rlim_max)
357 rlim->rlim_cur += 5;
358 }
359 }
360
361 /*
362 * If the process has run for more than autonicetime, reduce
363 * priority to give others a chance.
364 */
365 if (autonicetime && runtm > autonicetime && p->p_nice == NZERO
366 && kauth_cred_geteuid(p->p_cred)) {
367 mutex_enter(&p->p_stmutex);
368 p->p_nice = autoniceval + NZERO;
369 resetprocpriority(p);
370 mutex_exit(&p->p_stmutex);
371 }
372
373 /*
374 * If the process has slept the entire second,
375 * stop recalculating its priority until it wakes up.
376 */
377 if (minslp <= 1) {
378 /*
379 * p_pctcpu is only for ps.
380 */
381 mutex_enter(&p->p_stmutex);
382 clkhz = stathz != 0 ? stathz : hz;
383 #if (FSHIFT >= CCPU_SHIFT)
384 p->p_pctcpu += (clkhz == 100)?
385 ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
386 100 * (((fixpt_t) p->p_cpticks)
387 << (FSHIFT - CCPU_SHIFT)) / clkhz;
388 #else
389 p->p_pctcpu += ((FSCALE - ccpu) *
390 (p->p_cpticks * FSCALE / clkhz)) >> FSHIFT;
391 #endif
392 p->p_cpticks = 0;
393 p->p_estcpu = decay_cpu(loadfac, p->p_estcpu);
394
395 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
396 lwp_lock(l);
397 if (l->l_slptime <= 1)
398 resetpriority(l);
399 lwp_unlock(l);
400 }
401 mutex_exit(&p->p_stmutex);
402 }
403
404 mutex_exit(&p->p_smutex);
405 if (sig) {
406 psignal(p, sig);
407 }
408 }
409 mutex_exit(&proclist_mutex);
410 uvm_meter();
411 wakeup((caddr_t)&lbolt);
412 callout_schedule(&schedcpu_ch, hz);
413 }
414
415 /*
416 * Recalculate the priority of a process after it has slept for a while.
417 */
418 void
419 updatepri(struct lwp *l)
420 {
421 struct proc *p = l->l_proc;
422 fixpt_t loadfac;
423
424 LOCK_ASSERT(lwp_locked(l, NULL));
425 KASSERT(l->l_slptime > 1);
426
427 loadfac = loadfactor(averunnable.ldavg[0]);
428
429 l->l_slptime--; /* the first time was done in schedcpu */
430 /* XXX NJWLWP */
431 /* XXXSMP occasionally unlocked, should be per-LWP */
432 p->p_estcpu = decay_cpu_batch(loadfac, p->p_estcpu, l->l_slptime);
433 resetpriority(l);
434 }
435
436 /*
437 * During autoconfiguration or after a panic, a sleep will simply lower the
438 * priority briefly to allow interrupts, then return. The priority to be
439 * used (safepri) is machine-dependent, thus this value is initialized and
440 * maintained in the machine-dependent layers. This priority will typically
441 * be 0, or the lowest priority that is safe for use on the interrupt stack;
442 * it can be made higher to block network software interrupts after panics.
443 */
444 int safepri;
445
446 /*
447 * ltsleep: see mtsleep() for comments.
448 */
449 int
450 ltsleep(wchan_t ident, int priority, const char *wmesg, int timo,
451 volatile struct simplelock *interlock)
452 {
453 struct lwp *l = curlwp;
454 sleepq_t *sq;
455 int error, catch;
456
457 if (sleepq_dontsleep(l)) {
458 (void)sleepq_abort(NULL, 0);
459 if ((priority & PNORELOCK) != 0)
460 simple_unlock(interlock);
461 return 0;
462 }
463
464 sq = sleeptab_lookup(&sleeptab, ident);
465 sleepq_enter(sq, l);
466
467 if (interlock != NULL) {
468 LOCK_ASSERT(simple_lock_held(interlock));
469 simple_unlock(interlock);
470 }
471
472 catch = priority & PCATCH;
473 sleepq_block(sq, priority & PRIMASK, ident, wmesg, timo, catch,
474 &sleep_syncobj);
475 error = sleepq_unblock(timo, catch);
476
477 if (interlock != NULL && (priority & PNORELOCK) == 0)
478 simple_lock(interlock);
479
480 return error;
481 }
482
483 /*
484 * General sleep call. Suspends the current process until a wakeup is
485 * performed on the specified identifier. The process will then be made
486 * runnable with the specified priority. Sleeps at most timo/hz seconds (0
487 * means no timeout). If pri includes PCATCH flag, signals are checked
488 * before and after sleeping, else signals are not checked. Returns 0 if
489 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
490 * signal needs to be delivered, ERESTART is returned if the current system
491 * call should be restarted if possible, and EINTR is returned if the system
492 * call should be interrupted by the signal (return EINTR).
493 *
494 * The interlock is held until we are on a sleep queue. The interlock will
495 * be locked before returning back to the caller unless the PNORELOCK flag
496 * is specified, in which case the interlock will always be unlocked upon
497 * return.
498 */
499 int
500 mtsleep(wchan_t ident, int priority, const char *wmesg, int timo,
501 kmutex_t *mtx)
502 {
503 struct lwp *l = curlwp;
504 sleepq_t *sq;
505 int error, catch;
506
507 if (sleepq_dontsleep(l))
508 return sleepq_abort(mtx, priority & PNORELOCK);
509
510 sq = sleeptab_lookup(&sleeptab, ident);
511 sleepq_enter(sq, l);
512
513 if (mtx != NULL) {
514 LOCK_ASSERT(mutex_owned(mtx));
515 mutex_exit(mtx);
516 }
517
518 catch = priority & PCATCH;
519 sleepq_block(sq, priority & PRIMASK, ident, wmesg, timo, catch,
520 &sleep_syncobj);
521 error = sleepq_unblock(timo, catch);
522
523 if (mtx != NULL && (priority & PNORELOCK) == 0)
524 mutex_enter(mtx);
525
526 return error;
527 }
528
529 /*
530 * sched_pause:
531 *
532 * General sleep call for situations where a wake-up is not expected.
533 */
534 int
535 sched_pause(const char *wmesg, boolean_t intr, int timo)
536 {
537 struct lwp *l = curlwp;
538 sleepq_t *sq;
539
540 if (sleepq_dontsleep(l))
541 return sleepq_abort(NULL, 0);
542
543 sq = sleeptab_lookup(&sleeptab, l);
544 sleepq_enter(sq, l);
545 sleepq_block(sq, sched_kpri(l), l, wmesg, timo, intr, &sleep_syncobj);
546 return sleepq_unblock(timo, intr);
547 }
548
549 void
550 sa_awaken(struct lwp *l)
551 {
552
553 LOCK_ASSERT(lwp_locked(l, NULL));
554
555 if (l == l->l_savp->savp_lwp && l->l_flag & L_SA_YIELD)
556 l->l_flag &= ~L_SA_IDLE;
557 }
558
559 /*
560 * Make all processes sleeping on the specified identifier runnable.
561 */
562 void
563 wakeup(wchan_t ident)
564 {
565 sleepq_t *sq;
566
567 if (cold)
568 return;
569
570 sq = sleeptab_lookup(&sleeptab, ident);
571 sleepq_wake(sq, ident, (u_int)-1);
572 }
573
574 /*
575 * Make the highest priority process first in line on the specified
576 * identifier runnable.
577 */
578 void
579 wakeup_one(wchan_t ident)
580 {
581 sleepq_t *sq;
582
583 if (cold)
584 return;
585
586 sq = sleeptab_lookup(&sleeptab, ident);
587 sleepq_wake(sq, ident, 1);
588 }
589
590
591 /*
592 * General yield call. Puts the current process back on its run queue and
593 * performs a voluntary context switch. Should only be called when the
594 * current process explicitly requests it (eg sched_yield(2) in compat code).
595 */
596 void
597 yield(void)
598 {
599 struct lwp *l = curlwp;
600
601 lwp_lock(l);
602 if (l->l_stat == LSONPROC) {
603 KASSERT(lwp_locked(l, &sched_mutex));
604 l->l_priority = l->l_usrpri;
605 }
606 l->l_nvcsw++;
607 mi_switch(l, NULL);
608 }
609
610 /*
611 * General preemption call. Puts the current process back on its run queue
612 * and performs an involuntary context switch.
613 * The 'more' ("more work to do") argument is boolean. Returning to userspace
614 * preempt() calls pass 0. "Voluntary" preemptions in e.g. uiomove() pass 1.
615 * This will be used to indicate to the SA subsystem that the LWP is
616 * not yet finished in the kernel.
617 */
618 void
619 preempt(int more)
620 {
621 struct lwp *l = curlwp;
622 int r;
623
624 lwp_lock(l);
625 if (l->l_stat == LSONPROC) {
626 KASSERT(lwp_locked(l, &sched_mutex));
627 l->l_priority = l->l_usrpri;
628 }
629 l->l_nivcsw++;
630 r = mi_switch(l, NULL);
631
632 if ((l->l_flag & L_SA) != 0 && r != 0 && more == 0)
633 sa_preempt(l);
634 }
635
636 /*
637 * The machine independent parts of context switch. Switch to "new"
638 * if non-NULL, otherwise let cpu_switch choose the next lwp.
639 *
640 * Returns 1 if another process was actually run.
641 */
642 int
643 mi_switch(struct lwp *l, struct lwp *newl)
644 {
645 struct schedstate_percpu *spc;
646 struct timeval tv;
647 #ifdef MULTIPROCESSOR
648 int hold_count;
649 #endif
650 int retval, oldspl;
651 long s, u;
652 #if PERFCTRS
653 struct proc *p = l->l_proc;
654 #endif
655
656 LOCK_ASSERT(lwp_locked(l, NULL));
657
658 /*
659 * Release the kernel_lock, as we are about to yield the CPU.
660 */
661 KERNEL_UNLOCK_ALL(l, &hold_count);
662
663 #ifdef LOCKDEBUG
664 spinlock_switchcheck();
665 simple_lock_switchcheck();
666 #endif
667 #ifdef KSTACK_CHECK_MAGIC
668 kstack_check_magic(l);
669 #endif
670
671 /*
672 * It's safe to read the per CPU schedstate unlocked here, as all we
673 * are after is the run time and that's guarenteed to have been last
674 * updated by this CPU.
675 */
676 KDASSERT(l->l_cpu == curcpu());
677 spc = &l->l_cpu->ci_schedstate;
678
679 /*
680 * Compute the amount of time during which the current
681 * process was running.
682 */
683 microtime(&tv);
684 u = l->l_rtime.tv_usec +
685 (tv.tv_usec - spc->spc_runtime.tv_usec);
686 s = l->l_rtime.tv_sec + (tv.tv_sec - spc->spc_runtime.tv_sec);
687 if (u < 0) {
688 u += 1000000;
689 s--;
690 } else if (u >= 1000000) {
691 u -= 1000000;
692 s++;
693 }
694 l->l_rtime.tv_usec = u;
695 l->l_rtime.tv_sec = s;
696
697 /*
698 * XXXSMP If we are using h/w performance counters, save context.
699 */
700 #if PERFCTRS
701 if (PMC_ENABLED(p)) {
702 pmc_save_context(p);
703 }
704 #endif
705
706 /*
707 * Acquire the sched_mutex if necessary. It will be released by
708 * cpu_switch once it has decided to idle, or picked another LWP
709 * to run.
710 */
711 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
712 if (l->l_mutex != &sched_mutex) {
713 mutex_enter(&sched_mutex);
714 lwp_unlock(l);
715 }
716 #endif
717
718 /*
719 * If on the CPU and we have gotten this far, then we must yield.
720 */
721 KASSERT(l->l_stat != LSRUN);
722 if (l->l_stat == LSONPROC) {
723 KASSERT(lwp_locked(l, &sched_mutex));
724 l->l_stat = LSRUN;
725 setrunqueue(l);
726 }
727 uvmexp.swtch++;
728
729 /*
730 * Process is about to yield the CPU; clear the appropriate
731 * scheduling flags.
732 */
733 spc->spc_flags &= ~SPCF_SWITCHCLEAR;
734
735 LOCKDEBUG_BARRIER(&sched_mutex, 1);
736
737 /*
738 * Switch to the new current LWP. When we run again, we'll
739 * return back here.
740 */
741 oldspl = MUTEX_SPIN_OLDSPL(l->l_cpu);
742
743 if (newl == NULL || newl->l_back == NULL)
744 retval = cpu_switch(l, NULL);
745 else {
746 KASSERT(lwp_locked(newl, &sched_mutex));
747 remrunqueue(newl);
748 cpu_switchto(l, newl);
749 retval = 0;
750 }
751
752 /*
753 * XXXSMP If we are using h/w performance counters, restore context.
754 */
755 #if PERFCTRS
756 if (PMC_ENABLED(p)) {
757 pmc_restore_context(p);
758 }
759 #endif
760
761 /*
762 * We're running again; record our new start time. We might
763 * be running on a new CPU now, so don't use the cached
764 * schedstate_percpu pointer.
765 */
766 KDASSERT(l->l_cpu == curcpu());
767 microtime(&l->l_cpu->ci_schedstate.spc_runtime);
768
769 /*
770 * Reacquire the kernel_lock.
771 */
772 splx(oldspl);
773 KERNEL_LOCK(hold_count, l);
774
775 return retval;
776 }
777
778 /*
779 * Initialize the (doubly-linked) run queues
780 * to be empty.
781 */
782 void
783 rqinit()
784 {
785 int i;
786
787 for (i = 0; i < RUNQUE_NQS; i++)
788 sched_qs[i].ph_link = sched_qs[i].ph_rlink =
789 (struct lwp *)&sched_qs[i];
790
791 mutex_init(&sched_mutex, MUTEX_SPIN, IPL_SCHED);
792 }
793
794 static inline void
795 resched_lwp(struct lwp *l, u_char pri)
796 {
797 struct cpu_info *ci;
798
799 /*
800 * XXXSMP
801 * Since l->l_cpu persists across a context switch,
802 * this gives us *very weak* processor affinity, in
803 * that we notify the CPU on which the process last
804 * ran that it should try to switch.
805 *
806 * This does not guarantee that the process will run on
807 * that processor next, because another processor might
808 * grab it the next time it performs a context switch.
809 *
810 * This also does not handle the case where its last
811 * CPU is running a higher-priority process, but every
812 * other CPU is running a lower-priority process. There
813 * are ways to handle this situation, but they're not
814 * currently very pretty, and we also need to weigh the
815 * cost of moving a process from one CPU to another.
816 *
817 * XXXSMP
818 * There is also the issue of locking the other CPU's
819 * sched state, which we currently do not do.
820 */
821 ci = (l->l_cpu != NULL) ? l->l_cpu : curcpu();
822 if (pri < ci->ci_schedstate.spc_curpriority)
823 cpu_need_resched(ci);
824 }
825
826 /*
827 * Change process state to be runnable, placing it on the run queue if it is
828 * in memory, and awakening the swapper if it isn't in memory.
829 *
830 * Call with the process and LWP locked. Will return with the LWP unlocked.
831 */
832 void
833 setrunnable(struct lwp *l)
834 {
835 struct proc *p = l->l_proc;
836
837 LOCK_ASSERT(mutex_owned(&p->p_smutex));
838 LOCK_ASSERT(lwp_locked(l, NULL));
839
840 switch (l->l_stat) {
841 case LSSTOP:
842 /*
843 * If we're being traced (possibly because someone attached us
844 * while we were stopped), check for a signal from the debugger.
845 */
846 if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0) {
847 sigaddset(&l->l_sigpend.sp_set, p->p_xstat);
848 signotify(l);
849 }
850 p->p_nrlwps++;
851 break;
852 case LSSUSPENDED:
853 l->l_flag &= ~L_WSUSPEND;
854 p->p_nrlwps++;
855 break;
856 case LSSLEEP:
857 KASSERT(l->l_wchan != NULL);
858 break;
859 default:
860 panic("setrunnable: lwp %p state was %d", l, l->l_stat);
861 }
862
863 /*
864 * If the LWP was sleeping interruptably, then it's OK to start it
865 * again. If not, mark it as still sleeping.
866 */
867 if (l->l_wchan != NULL) {
868 l->l_stat = LSSLEEP;
869 if ((l->l_flag & L_SINTR) != 0)
870 lwp_unsleep(l);
871 else {
872 lwp_unlock(l);
873 #ifdef DIAGNOSTIC
874 panic("setrunnable: !L_SINTR");
875 #endif
876 }
877 return;
878 }
879
880 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
881
882 if (l->l_proc->p_sa)
883 sa_awaken(l);
884
885 /*
886 * If the LWP is still on the CPU, mark it as LSONPROC. It may be
887 * about to call mi_switch(), in which case it will yield.
888 *
889 * XXXSMP Will need to change for preemption.
890 */
891 #ifdef MULTIPROCESSOR
892 if (l->l_cpu->ci_curlwp == l) {
893 #else
894 if (l == curlwp) {
895 #endif
896 l->l_stat = LSONPROC;
897 l->l_slptime = 0;
898 lwp_unlock(l);
899 return;
900 }
901
902 /*
903 * Set the LWP runnable. If it's swapped out, we need to wake the swapper
904 * to bring it back in. Otherwise, enter it into a run queue.
905 */
906 if (l->l_slptime > 1)
907 updatepri(l);
908 l->l_stat = LSRUN;
909 l->l_slptime = 0;
910
911 if (l->l_flag & L_INMEM) {
912 setrunqueue(l);
913 resched_lwp(l, l->l_priority);
914 lwp_unlock(l);
915 } else {
916 lwp_unlock(l);
917 wakeup(&proc0);
918 }
919 }
920
921 /*
922 * Compute the priority of a process when running in user mode.
923 * Arrange to reschedule if the resulting priority is better
924 * than that of the current process.
925 */
926 void
927 resetpriority(struct lwp *l)
928 {
929 unsigned int newpriority;
930 struct proc *p = l->l_proc;
931
932 /* XXXSMP LOCK_ASSERT(mutex_owned(&p->p_stmutex)); */
933 LOCK_ASSERT(lwp_locked(l, NULL));
934
935 if ((l->l_flag & L_SYSTEM) != 0)
936 return;
937
938 newpriority = PUSER + (p->p_estcpu >> ESTCPU_SHIFT) +
939 NICE_WEIGHT * (p->p_nice - NZERO);
940 newpriority = min(newpriority, MAXPRI);
941 l->l_usrpri = newpriority;
942 if (l->l_priority != newpriority)
943 lwp_changepri(l, newpriority);
944 }
945
946 /*
947 * Recompute priority for all LWPs in a process.
948 */
949 void
950 resetprocpriority(struct proc *p)
951 {
952 struct lwp *l;
953
954 LOCK_ASSERT(mutex_owned(&p->p_stmutex));
955
956 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
957 lwp_lock(l);
958 resetpriority(l);
959 lwp_unlock(l);
960 }
961 }
962
963 /*
964 * We adjust the priority of the current process. The priority of a process
965 * gets worse as it accumulates CPU time. The CPU usage estimator (p_estcpu)
966 * is increased here. The formula for computing priorities (in kern_synch.c)
967 * will compute a different value each time p_estcpu increases. This can
968 * cause a switch, but unless the priority crosses a PPQ boundary the actual
969 * queue will not change. The CPU usage estimator ramps up quite quickly
970 * when the process is running (linearly), and decays away exponentially, at
971 * a rate which is proportionally slower when the system is busy. The basic
972 * principle is that the system will 90% forget that the process used a lot
973 * of CPU time in 5 * loadav seconds. This causes the system to favor
974 * processes which haven't run much recently, and to round-robin among other
975 * processes.
976 */
977
978 void
979 schedclock(struct lwp *l)
980 {
981 struct proc *p = l->l_proc;
982
983 mutex_enter(&p->p_stmutex);
984 p->p_estcpu = ESTCPULIM(p->p_estcpu + (1 << ESTCPU_SHIFT));
985 lwp_lock(l);
986 resetpriority(l);
987 mutex_exit(&p->p_stmutex);
988 if ((l->l_flag & L_SYSTEM) == 0 && l->l_priority >= PUSER)
989 l->l_priority = l->l_usrpri;
990 lwp_unlock(l);
991 }
992
993 /*
994 * suspendsched:
995 *
996 * Convert all non-L_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
997 */
998 void
999 suspendsched(void)
1000 {
1001 #ifdef MULTIPROCESSOR
1002 CPU_INFO_ITERATOR cii;
1003 struct cpu_info *ci;
1004 #endif
1005 struct lwp *l;
1006 struct proc *p;
1007
1008 /*
1009 * We do this by process in order not to violate the locking rules.
1010 */
1011 mutex_enter(&proclist_mutex);
1012 PROCLIST_FOREACH(p, &allproc) {
1013 mutex_enter(&p->p_smutex);
1014
1015 if ((p->p_flag & P_SYSTEM) != 0) {
1016 mutex_exit(&p->p_smutex);
1017 continue;
1018 }
1019
1020 p->p_stat = SSTOP;
1021
1022 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1023 if (l == curlwp)
1024 continue;
1025
1026 lwp_lock(l);
1027
1028 /*
1029 * Set L_WREBOOT so that the LWP will suspend itself
1030 * when it tries to return to user mode. We want to
1031 * try and get to get as many LWPs as possible to
1032 * the user / kernel boundary, so that they will
1033 * release any locks that they hold.
1034 */
1035 l->l_flag |= (L_WREBOOT | L_WSUSPEND);
1036
1037 if (l->l_stat == LSSLEEP &&
1038 (l->l_flag & L_SINTR) != 0) {
1039 /* setrunnable() will release the lock. */
1040 setrunnable(l);
1041 continue;
1042 }
1043
1044 lwp_unlock(l);
1045 }
1046
1047 mutex_exit(&p->p_smutex);
1048 }
1049 mutex_exit(&proclist_mutex);
1050
1051 /*
1052 * Kick all CPUs to make them preempt any LWPs running in user mode.
1053 * They'll trap into the kernel and suspend themselves in userret().
1054 */
1055 sched_lock(0);
1056 #ifdef MULTIPROCESSOR
1057 for (CPU_INFO_FOREACH(cii, ci))
1058 cpu_need_resched(ci);
1059 #else
1060 cpu_need_resched(curcpu());
1061 #endif
1062 sched_unlock(0);
1063 }
1064
1065 /*
1066 * scheduler_fork_hook:
1067 *
1068 * Inherit the parent's scheduler history.
1069 */
1070 void
1071 scheduler_fork_hook(struct proc *parent, struct proc *child)
1072 {
1073
1074 LOCK_ASSERT(mutex_owned(&parent->p_smutex));
1075
1076 child->p_estcpu = child->p_estcpu_inherited = parent->p_estcpu;
1077 child->p_forktime = schedcpu_ticks;
1078 }
1079
1080 /*
1081 * scheduler_wait_hook:
1082 *
1083 * Chargeback parents for the sins of their children.
1084 */
1085 void
1086 scheduler_wait_hook(struct proc *parent, struct proc *child)
1087 {
1088 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
1089 fixpt_t estcpu;
1090
1091 /* XXX Only if parent != init?? */
1092
1093 mutex_enter(&parent->p_stmutex);
1094 estcpu = decay_cpu_batch(loadfac, child->p_estcpu_inherited,
1095 schedcpu_ticks - child->p_forktime);
1096 if (child->p_estcpu > estcpu)
1097 parent->p_estcpu =
1098 ESTCPULIM(parent->p_estcpu + child->p_estcpu - estcpu);
1099 mutex_exit(&parent->p_stmutex);
1100 }
1101
1102 /*
1103 * sched_kpri:
1104 *
1105 * Scale a priority level to a kernel priority level, usually
1106 * for an LWP that is about to sleep.
1107 */
1108 int
1109 sched_kpri(struct lwp *l)
1110 {
1111 static const uint8_t kpri_tab[] = {
1112 0, 1, 2, 3, 4, 5, 6, 7,
1113 8, 9, 10, 11, 12, 13, 14, 15,
1114 16, 17, 18, 19, 20, 21, 22, 23,
1115 24, 25, 26, 27, 28, 29, 30, 31,
1116 32, 33, 34, 35, 36, 37, 38, 39,
1117 40, 41, 42, 43, 44, 45, 46, 47,
1118 48, 49, 8, 8, 9, 9, 10, 10,
1119 11, 11, 12, 12, 13, 14, 14, 15,
1120 15, 16, 16, 17, 17, 18, 18, 19,
1121 20, 20, 21, 21, 22, 22, 23, 23,
1122 24, 24, 25, 26, 26, 27, 27, 28,
1123 28, 29, 29, 30, 30, 31, 32, 32,
1124 33, 33, 34, 34, 35, 35, 36, 36,
1125 37, 38, 38, 39, 39, 40, 40, 41,
1126 41, 42, 42, 43, 44, 44, 45, 45,
1127 46, 46, 47, 47, 48, 48, 49, 49,
1128 };
1129
1130 return kpri_tab[l->l_priority];
1131 }
1132
1133 /*
1134 * sched_unsleep:
1135 *
1136 * The is called when the LWP has not been awoken normally but instead
1137 * interrupted: for example, if the sleep timed out. Because of this,
1138 * it's not a valid action for running or idle LWPs.
1139 */
1140 void
1141 sched_unsleep(struct lwp *l)
1142 {
1143
1144 lwp_unlock(l);
1145 panic("sched_unsleep");
1146 }
1147
1148 /*
1149 * sched_changepri:
1150 *
1151 * Adjust the priority of an LWP.
1152 */
1153 void
1154 sched_changepri(struct lwp *l, int pri)
1155 {
1156
1157 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
1158
1159 if (l->l_stat != LSRUN || (l->l_flag & L_INMEM) == 0 ||
1160 (l->l_priority / PPQ) == (pri / PPQ)) {
1161 l->l_priority = pri;
1162 return;
1163 }
1164
1165 remrunqueue(l);
1166 l->l_priority = pri;
1167 setrunqueue(l);
1168 resched_lwp(l, pri);
1169 }
1170
1171 /*
1172 * Low-level routines to access the run queue. Optimised assembler
1173 * routines can override these.
1174 */
1175
1176 #ifndef __HAVE_MD_RUNQUEUE
1177
1178 /*
1179 * On some architectures, it's faster to use a MSB ordering for the priorites
1180 * than the traditional LSB ordering.
1181 */
1182 #ifdef __HAVE_BIGENDIAN_BITOPS
1183 #define RQMASK(n) (0x80000000 >> (n))
1184 #else
1185 #define RQMASK(n) (0x00000001 << (n))
1186 #endif
1187
1188 /*
1189 * The primitives that manipulate the run queues. whichqs tells which
1190 * of the 32 queues qs have processes in them. Setrunqueue puts processes
1191 * into queues, remrunqueue removes them from queues. The running process is
1192 * on no queue, other processes are on a queue related to p->p_priority,
1193 * divided by 4 actually to shrink the 0-127 range of priorities into the 32
1194 * available queues.
1195 */
1196 #ifdef RQDEBUG
1197 static void
1198 checkrunqueue(int whichq, struct lwp *l)
1199 {
1200 const struct prochd * const rq = &sched_qs[whichq];
1201 struct lwp *l2;
1202 int found = 0;
1203 int die = 0;
1204 int empty = 1;
1205 for (l2 = rq->ph_link; l2 != (const void*) rq; l2 = l2->l_forw) {
1206 if (l2->l_stat != LSRUN) {
1207 printf("checkrunqueue[%d]: lwp %p state (%d) "
1208 " != LSRUN\n", whichq, l2, l2->l_stat);
1209 }
1210 if (l2->l_back->l_forw != l2) {
1211 printf("checkrunqueue[%d]: lwp %p back-qptr (%p) "
1212 "corrupt %p\n", whichq, l2, l2->l_back,
1213 l2->l_back->l_forw);
1214 die = 1;
1215 }
1216 if (l2->l_forw->l_back != l2) {
1217 printf("checkrunqueue[%d]: lwp %p forw-qptr (%p) "
1218 "corrupt %p\n", whichq, l2, l2->l_forw,
1219 l2->l_forw->l_back);
1220 die = 1;
1221 }
1222 if (l2 == l)
1223 found = 1;
1224 empty = 0;
1225 }
1226 if (empty && (sched_whichqs & RQMASK(whichq)) != 0) {
1227 printf("checkrunqueue[%d]: bit set for empty run-queue %p\n",
1228 whichq, rq);
1229 die = 1;
1230 } else if (!empty && (sched_whichqs & RQMASK(whichq)) == 0) {
1231 printf("checkrunqueue[%d]: bit clear for non-empty "
1232 "run-queue %p\n", whichq, rq);
1233 die = 1;
1234 }
1235 if (l != NULL && (sched_whichqs & RQMASK(whichq)) == 0) {
1236 printf("checkrunqueue[%d]: bit clear for active lwp %p\n",
1237 whichq, l);
1238 die = 1;
1239 }
1240 if (l != NULL && empty) {
1241 printf("checkrunqueue[%d]: empty run-queue %p with "
1242 "active lwp %p\n", whichq, rq, l);
1243 die = 1;
1244 }
1245 if (l != NULL && !found) {
1246 printf("checkrunqueue[%d]: lwp %p not in runqueue %p!",
1247 whichq, l, rq);
1248 die = 1;
1249 }
1250 if (die)
1251 panic("checkrunqueue: inconsistency found");
1252 }
1253 #endif /* RQDEBUG */
1254
1255 void
1256 setrunqueue(struct lwp *l)
1257 {
1258 struct prochd *rq;
1259 struct lwp *prev;
1260 const int whichq = l->l_priority / PPQ;
1261
1262 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
1263
1264 #ifdef RQDEBUG
1265 checkrunqueue(whichq, NULL);
1266 #endif
1267 #ifdef DIAGNOSTIC
1268 if (l->l_back != NULL || l->l_stat != LSRUN)
1269 panic("setrunqueue");
1270 #endif
1271 sched_whichqs |= RQMASK(whichq);
1272 rq = &sched_qs[whichq];
1273 prev = rq->ph_rlink;
1274 l->l_forw = (struct lwp *)rq;
1275 rq->ph_rlink = l;
1276 prev->l_forw = l;
1277 l->l_back = prev;
1278 #ifdef RQDEBUG
1279 checkrunqueue(whichq, l);
1280 #endif
1281 }
1282
1283 void
1284 remrunqueue(struct lwp *l)
1285 {
1286 struct lwp *prev, *next;
1287 const int whichq = l->l_priority / PPQ;
1288
1289 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
1290
1291 #ifdef RQDEBUG
1292 checkrunqueue(whichq, l);
1293 #endif
1294
1295 #if defined(DIAGNOSTIC)
1296 if (((sched_whichqs & RQMASK(whichq)) == 0) || l->l_back == NULL) {
1297 /* Shouldn't happen - interrupts disabled. */
1298 panic("remrunqueue: bit %d not set", whichq);
1299 }
1300 #endif
1301 prev = l->l_back;
1302 l->l_back = NULL;
1303 next = l->l_forw;
1304 prev->l_forw = next;
1305 next->l_back = prev;
1306 if (prev == next)
1307 sched_whichqs &= ~RQMASK(whichq);
1308 #ifdef RQDEBUG
1309 checkrunqueue(whichq, NULL);
1310 #endif
1311 }
1312
1313 #undef RQMASK
1314 #endif /* !defined(__HAVE_MD_RUNQUEUE) */
1315