kern_synch.c revision 1.166.2.12 1 /* $NetBSD: kern_synch.c,v 1.166.2.12 2007/01/27 00:26:44 ad Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*-
41 * Copyright (c) 1982, 1986, 1990, 1991, 1993
42 * The Regents of the University of California. All rights reserved.
43 * (c) UNIX System Laboratories, Inc.
44 * All or some portions of this file are derived from material licensed
45 * to the University of California by American Telephone and Telegraph
46 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
47 * the permission of UNIX System Laboratories, Inc.
48 *
49 * Redistribution and use in source and binary forms, with or without
50 * modification, are permitted provided that the following conditions
51 * are met:
52 * 1. Redistributions of source code must retain the above copyright
53 * notice, this list of conditions and the following disclaimer.
54 * 2. Redistributions in binary form must reproduce the above copyright
55 * notice, this list of conditions and the following disclaimer in the
56 * documentation and/or other materials provided with the distribution.
57 * 3. Neither the name of the University nor the names of its contributors
58 * may be used to endorse or promote products derived from this software
59 * without specific prior written permission.
60 *
61 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
62 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
63 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
64 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
65 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
66 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
67 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
68 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
69 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
70 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
71 * SUCH DAMAGE.
72 *
73 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
74 */
75
76 #include <sys/cdefs.h>
77 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.166.2.12 2007/01/27 00:26:44 ad Exp $");
78
79 #include "opt_ddb.h"
80 #include "opt_kstack.h"
81 #include "opt_lockdebug.h"
82 #include "opt_multiprocessor.h"
83 #include "opt_perfctrs.h"
84
85 #define __MUTEX_PRIVATE
86
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/callout.h>
90 #include <sys/proc.h>
91 #include <sys/kernel.h>
92 #include <sys/buf.h>
93 #if defined(PERFCTRS)
94 #include <sys/pmc.h>
95 #endif
96 #include <sys/signalvar.h>
97 #include <sys/resourcevar.h>
98 #include <sys/sched.h>
99 #include <sys/sa.h>
100 #include <sys/savar.h>
101 #include <sys/kauth.h>
102 #include <sys/sleepq.h>
103 #include <sys/lockdebug.h>
104
105 #include <uvm/uvm_extern.h>
106
107 #include <machine/cpu.h>
108
109 int lbolt; /* once a second sleep address */
110 int rrticks; /* number of hardclock ticks per roundrobin() */
111
112 /*
113 * The global scheduler state.
114 */
115 kmutex_t sched_mutex; /* global sched state mutex */
116 struct prochd sched_qs[RUNQUE_NQS]; /* run queues */
117 volatile uint32_t sched_whichqs; /* bitmap of non-empty queues */
118
119 void schedcpu(void *);
120 void updatepri(struct lwp *);
121 void sa_awaken(struct lwp *);
122
123 void sched_unsleep(struct lwp *);
124 void sched_changepri(struct lwp *, int);
125
126 struct callout schedcpu_ch = CALLOUT_INITIALIZER_SETFUNC(schedcpu, NULL);
127 static unsigned int schedcpu_ticks;
128
129 syncobj_t sleep_syncobj = {
130 SOBJ_SLEEPQ_SORTED,
131 sleepq_unsleep,
132 sleepq_changepri
133 };
134
135 syncobj_t sched_syncobj = {
136 SOBJ_SLEEPQ_SORTED,
137 sched_unsleep,
138 sched_changepri
139 };
140
141 /*
142 * Force switch among equal priority processes every 100ms.
143 * Called from hardclock every hz/10 == rrticks hardclock ticks.
144 */
145 /* ARGSUSED */
146 void
147 roundrobin(struct cpu_info *ci)
148 {
149 struct schedstate_percpu *spc = &ci->ci_schedstate;
150
151 spc->spc_rrticks = rrticks;
152
153 if (curlwp != NULL) {
154 if (spc->spc_flags & SPCF_SEENRR) {
155 /*
156 * The process has already been through a roundrobin
157 * without switching and may be hogging the CPU.
158 * Indicate that the process should yield.
159 */
160 spc->spc_flags |= SPCF_SHOULDYIELD;
161 } else
162 spc->spc_flags |= SPCF_SEENRR;
163 }
164 cpu_need_resched(curcpu());
165 }
166
167 #define PPQ (128 / RUNQUE_NQS) /* priorities per queue */
168 #define NICE_WEIGHT 2 /* priorities per nice level */
169
170 #define ESTCPU_SHIFT 11
171 #define ESTCPU_MAX ((NICE_WEIGHT * PRIO_MAX - PPQ) << ESTCPU_SHIFT)
172 #define ESTCPULIM(e) min((e), ESTCPU_MAX)
173
174 /*
175 * Constants for digital decay and forget:
176 * 90% of (p_estcpu) usage in 5 * loadav time
177 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive)
178 * Note that, as ps(1) mentions, this can let percentages
179 * total over 100% (I've seen 137.9% for 3 processes).
180 *
181 * Note that hardclock updates p_estcpu and p_cpticks independently.
182 *
183 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
184 * That is, the system wants to compute a value of decay such
185 * that the following for loop:
186 * for (i = 0; i < (5 * loadavg); i++)
187 * p_estcpu *= decay;
188 * will compute
189 * p_estcpu *= 0.1;
190 * for all values of loadavg:
191 *
192 * Mathematically this loop can be expressed by saying:
193 * decay ** (5 * loadavg) ~= .1
194 *
195 * The system computes decay as:
196 * decay = (2 * loadavg) / (2 * loadavg + 1)
197 *
198 * We wish to prove that the system's computation of decay
199 * will always fulfill the equation:
200 * decay ** (5 * loadavg) ~= .1
201 *
202 * If we compute b as:
203 * b = 2 * loadavg
204 * then
205 * decay = b / (b + 1)
206 *
207 * We now need to prove two things:
208 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
209 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
210 *
211 * Facts:
212 * For x close to zero, exp(x) =~ 1 + x, since
213 * exp(x) = 0! + x**1/1! + x**2/2! + ... .
214 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
215 * For x close to zero, ln(1+x) =~ x, since
216 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1
217 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
218 * ln(.1) =~ -2.30
219 *
220 * Proof of (1):
221 * Solve (factor)**(power) =~ .1 given power (5*loadav):
222 * solving for factor,
223 * ln(factor) =~ (-2.30/5*loadav), or
224 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
225 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED
226 *
227 * Proof of (2):
228 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
229 * solving for power,
230 * power*ln(b/(b+1)) =~ -2.30, or
231 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED
232 *
233 * Actual power values for the implemented algorithm are as follows:
234 * loadav: 1 2 3 4
235 * power: 5.68 10.32 14.94 19.55
236 */
237
238 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
239 #define loadfactor(loadav) (2 * (loadav))
240
241 static fixpt_t
242 decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
243 {
244
245 if (estcpu == 0) {
246 return 0;
247 }
248
249 #if !defined(_LP64)
250 /* avoid 64bit arithmetics. */
251 #define FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
252 if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
253 return estcpu * loadfac / (loadfac + FSCALE);
254 }
255 #endif /* !defined(_LP64) */
256
257 return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
258 }
259
260 /*
261 * For all load averages >= 1 and max p_estcpu of (255 << ESTCPU_SHIFT),
262 * sleeping for at least seven times the loadfactor will decay p_estcpu to
263 * less than (1 << ESTCPU_SHIFT).
264 *
265 * note that our ESTCPU_MAX is actually much smaller than (255 << ESTCPU_SHIFT).
266 */
267 static fixpt_t
268 decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n)
269 {
270
271 if ((n << FSHIFT) >= 7 * loadfac) {
272 return 0;
273 }
274
275 while (estcpu != 0 && n > 1) {
276 estcpu = decay_cpu(loadfac, estcpu);
277 n--;
278 }
279
280 return estcpu;
281 }
282
283 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
284 fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
285
286 /*
287 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
288 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
289 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
290 *
291 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
292 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
293 *
294 * If you dont want to bother with the faster/more-accurate formula, you
295 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
296 * (more general) method of calculating the %age of CPU used by a process.
297 */
298 #define CCPU_SHIFT 11
299
300 /*
301 * schedcpu:
302 *
303 * Recompute process priorities, every hz ticks.
304 *
305 * XXXSMP This needs to be reorganised in order to reduce the locking
306 * burden.
307 */
308 /* ARGSUSED */
309 void
310 schedcpu(void *arg)
311 {
312 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
313 struct rlimit *rlim;
314 struct lwp *l;
315 struct proc *p;
316 int minslp, clkhz, sig;
317 long runtm;
318
319 schedcpu_ticks++;
320
321 mutex_enter(&proclist_mutex);
322 PROCLIST_FOREACH(p, &allproc) {
323 /*
324 * Increment time in/out of memory and sleep time (if
325 * sleeping). We ignore overflow; with 16-bit int's
326 * (remember them?) overflow takes 45 days.
327 */
328 minslp = 2;
329 mutex_enter(&p->p_smutex);
330 runtm = p->p_rtime.tv_sec;
331 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
332 lwp_lock(l);
333 runtm += l->l_rtime.tv_sec;
334 l->l_swtime++;
335 if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
336 l->l_stat == LSSUSPENDED) {
337 l->l_slptime++;
338 minslp = min(minslp, l->l_slptime);
339 } else
340 minslp = 0;
341 lwp_unlock(l);
342 }
343 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
344
345 /*
346 * Check if the process exceeds its CPU resource allocation.
347 * If over max, kill it.
348 */
349 rlim = &p->p_rlimit[RLIMIT_CPU];
350 sig = 0;
351 if (runtm >= rlim->rlim_cur) {
352 if (runtm >= rlim->rlim_max)
353 sig = SIGKILL;
354 else {
355 sig = SIGXCPU;
356 if (rlim->rlim_cur < rlim->rlim_max)
357 rlim->rlim_cur += 5;
358 }
359 }
360
361 /*
362 * If the process has run for more than autonicetime, reduce
363 * priority to give others a chance.
364 */
365 if (autonicetime && runtm > autonicetime && p->p_nice == NZERO
366 && kauth_cred_geteuid(p->p_cred)) {
367 p->p_nice = autoniceval + NZERO;
368 resetprocpriority(p);
369 }
370
371 /*
372 * If the process has slept the entire second,
373 * stop recalculating its priority until it wakes up.
374 */
375 if (minslp <= 1) {
376 /*
377 * p_pctcpu is only for ps.
378 */
379 mutex_enter(&p->p_stmutex);
380 clkhz = stathz != 0 ? stathz : hz;
381 #if (FSHIFT >= CCPU_SHIFT)
382 p->p_pctcpu += (clkhz == 100)?
383 ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
384 100 * (((fixpt_t) p->p_cpticks)
385 << (FSHIFT - CCPU_SHIFT)) / clkhz;
386 #else
387 p->p_pctcpu += ((FSCALE - ccpu) *
388 (p->p_cpticks * FSCALE / clkhz)) >> FSHIFT;
389 #endif
390 p->p_cpticks = 0;
391 mutex_exit(&p->p_stmutex);
392 p->p_estcpu = decay_cpu(loadfac, p->p_estcpu);
393
394 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
395 lwp_lock(l);
396 if (l->l_slptime <= 1)
397 resetpriority(l);
398 lwp_unlock(l);
399 }
400 }
401
402 mutex_exit(&p->p_smutex);
403 if (sig) {
404 psignal(p, sig);
405 }
406 }
407 mutex_exit(&proclist_mutex);
408 uvm_meter();
409 wakeup((caddr_t)&lbolt);
410 callout_schedule(&schedcpu_ch, hz);
411 }
412
413 /*
414 * Recalculate the priority of a process after it has slept for a while.
415 */
416 void
417 updatepri(struct lwp *l)
418 {
419 struct proc *p = l->l_proc;
420 fixpt_t loadfac;
421
422 LOCK_ASSERT(lwp_locked(l, NULL));
423 KASSERT(l->l_slptime > 1);
424
425 loadfac = loadfactor(averunnable.ldavg[0]);
426
427 l->l_slptime--; /* the first time was done in schedcpu */
428 /* XXX NJWLWP */
429 /* XXXSMP occasionaly unlocked. */
430 p->p_estcpu = decay_cpu_batch(loadfac, p->p_estcpu, l->l_slptime);
431 resetpriority(l);
432 }
433
434 /*
435 * During autoconfiguration or after a panic, a sleep will simply lower the
436 * priority briefly to allow interrupts, then return. The priority to be
437 * used (safepri) is machine-dependent, thus this value is initialized and
438 * maintained in the machine-dependent layers. This priority will typically
439 * be 0, or the lowest priority that is safe for use on the interrupt stack;
440 * it can be made higher to block network software interrupts after panics.
441 */
442 int safepri;
443
444 /*
445 * ltsleep: see mtsleep() for comments.
446 */
447 int
448 ltsleep(wchan_t ident, int priority, const char *wmesg, int timo,
449 volatile struct simplelock *interlock)
450 {
451 struct lwp *l = curlwp;
452 sleepq_t *sq;
453 int error, catch;
454
455 if (sleepq_dontsleep(l)) {
456 (void)sleepq_abort(NULL, 0);
457 if ((priority & PNORELOCK) != 0)
458 simple_unlock(interlock);
459 return 0;
460 }
461
462 sq = sleeptab_lookup(&sleeptab, ident);
463 sleepq_enter(sq, l);
464
465 if (interlock != NULL) {
466 LOCK_ASSERT(simple_lock_held(interlock));
467 simple_unlock(interlock);
468 }
469
470 catch = priority & PCATCH;
471 sleepq_block(sq, priority & PRIMASK, ident, wmesg, timo, catch,
472 &sleep_syncobj);
473 error = sleepq_unblock(timo, catch);
474
475 if (interlock != NULL && (priority & PNORELOCK) == 0)
476 simple_lock(interlock);
477
478 return error;
479 }
480
481 /*
482 * General sleep call. Suspends the current process until a wakeup is
483 * performed on the specified identifier. The process will then be made
484 * runnable with the specified priority. Sleeps at most timo/hz seconds (0
485 * means no timeout). If pri includes PCATCH flag, signals are checked
486 * before and after sleeping, else signals are not checked. Returns 0 if
487 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
488 * signal needs to be delivered, ERESTART is returned if the current system
489 * call should be restarted if possible, and EINTR is returned if the system
490 * call should be interrupted by the signal (return EINTR).
491 *
492 * The interlock is held until we are on a sleep queue. The interlock will
493 * be locked before returning back to the caller unless the PNORELOCK flag
494 * is specified, in which case the interlock will always be unlocked upon
495 * return.
496 */
497 int
498 mtsleep(wchan_t ident, int priority, const char *wmesg, int timo,
499 kmutex_t *mtx)
500 {
501 struct lwp *l = curlwp;
502 sleepq_t *sq;
503 int error, catch;
504
505 if (sleepq_dontsleep(l))
506 return sleepq_abort(mtx, priority & PNORELOCK);
507
508 sq = sleeptab_lookup(&sleeptab, ident);
509 sleepq_enter(sq, l);
510
511 if (mtx != NULL) {
512 LOCK_ASSERT(mutex_owned(mtx));
513 mutex_exit(mtx);
514 }
515
516 catch = priority & PCATCH;
517 sleepq_block(sq, priority & PRIMASK, ident, wmesg, timo, catch,
518 &sleep_syncobj);
519 error = sleepq_unblock(timo, catch);
520
521 if (mtx != NULL && (priority & PNORELOCK) == 0)
522 mutex_enter(mtx);
523
524 return error;
525 }
526
527 /*
528 * sched_pause:
529 *
530 * General sleep call for situations where a wake-up is not expected.
531 */
532 int
533 sched_pause(const char *wmesg, boolean_t intr, int timo)
534 {
535 struct lwp *l = curlwp;
536 sleepq_t *sq;
537
538 if (sleepq_dontsleep(l))
539 return sleepq_abort(NULL, 0);
540
541 sq = sleeptab_lookup(&sleeptab, l);
542 sleepq_enter(sq, l);
543 sleepq_block(sq, sched_kpri(l), l, wmesg, timo, intr, &sleep_syncobj);
544 return sleepq_unblock(timo, intr);
545 }
546
547 void
548 sa_awaken(struct lwp *l)
549 {
550
551 LOCK_ASSERT(lwp_locked(l, NULL));
552
553 if (l == l->l_savp->savp_lwp && l->l_flag & L_SA_YIELD)
554 l->l_flag &= ~L_SA_IDLE;
555 }
556
557 /*
558 * Make all processes sleeping on the specified identifier runnable.
559 */
560 void
561 wakeup(wchan_t ident)
562 {
563 sleepq_t *sq;
564
565 if (cold)
566 return;
567
568 sq = sleeptab_lookup(&sleeptab, ident);
569 sleepq_wake(sq, ident, (u_int)-1);
570 }
571
572 /*
573 * Make the highest priority process first in line on the specified
574 * identifier runnable.
575 */
576 void
577 wakeup_one(wchan_t ident)
578 {
579 sleepq_t *sq;
580
581 if (cold)
582 return;
583
584 sq = sleeptab_lookup(&sleeptab, ident);
585 sleepq_wake(sq, ident, 1);
586 }
587
588
589 /*
590 * General yield call. Puts the current process back on its run queue and
591 * performs a voluntary context switch. Should only be called when the
592 * current process explicitly requests it (eg sched_yield(2) in compat code).
593 */
594 void
595 yield(void)
596 {
597 struct lwp *l = curlwp;
598
599 lwp_lock(l);
600 if (l->l_stat == LSONPROC) {
601 KASSERT(lwp_locked(l, &sched_mutex));
602 l->l_priority = l->l_usrpri;
603 }
604 l->l_nvcsw++;
605 mi_switch(l, NULL);
606 }
607
608 /*
609 * General preemption call. Puts the current process back on its run queue
610 * and performs an involuntary context switch.
611 * The 'more' ("more work to do") argument is boolean. Returning to userspace
612 * preempt() calls pass 0. "Voluntary" preemptions in e.g. uiomove() pass 1.
613 * This will be used to indicate to the SA subsystem that the LWP is
614 * not yet finished in the kernel.
615 */
616 void
617 preempt(int more)
618 {
619 struct lwp *l = curlwp;
620 int r;
621
622 lwp_lock(l);
623 if (l->l_stat == LSONPROC) {
624 KASSERT(lwp_locked(l, &sched_mutex));
625 l->l_priority = l->l_usrpri;
626 }
627 l->l_nivcsw++;
628 r = mi_switch(l, NULL);
629
630 if ((l->l_flag & L_SA) != 0 && r != 0 && more == 0)
631 sa_preempt(l);
632 }
633
634 /*
635 * The machine independent parts of context switch. Switch to "new"
636 * if non-NULL, otherwise let cpu_switch choose the next lwp.
637 *
638 * Returns 1 if another process was actually run.
639 */
640 int
641 mi_switch(struct lwp *l, struct lwp *newl)
642 {
643 struct schedstate_percpu *spc;
644 struct timeval tv;
645 #ifdef MULTIPROCESSOR
646 int hold_count;
647 #endif
648 int retval, oldspl;
649 long s, u;
650 #if PERFCTRS
651 struct proc *p = l->l_proc;
652 #endif
653
654 LOCK_ASSERT(lwp_locked(l, NULL));
655
656 /*
657 * Release the kernel_lock, as we are about to yield the CPU.
658 */
659 KERNEL_UNLOCK_ALL(l, &hold_count);
660
661 #ifdef LOCKDEBUG
662 spinlock_switchcheck();
663 simple_lock_switchcheck();
664 #endif
665 #ifdef KSTACK_CHECK_MAGIC
666 kstack_check_magic(l);
667 #endif
668
669 /*
670 * It's safe to read the per CPU schedstate unlocked here, as all we
671 * are after is the run time and that's guarenteed to have been last
672 * updated by this CPU.
673 */
674 KDASSERT(l->l_cpu == curcpu());
675 spc = &l->l_cpu->ci_schedstate;
676
677 /*
678 * Compute the amount of time during which the current
679 * process was running.
680 */
681 microtime(&tv);
682 u = l->l_rtime.tv_usec +
683 (tv.tv_usec - spc->spc_runtime.tv_usec);
684 s = l->l_rtime.tv_sec + (tv.tv_sec - spc->spc_runtime.tv_sec);
685 if (u < 0) {
686 u += 1000000;
687 s--;
688 } else if (u >= 1000000) {
689 u -= 1000000;
690 s++;
691 }
692 l->l_rtime.tv_usec = u;
693 l->l_rtime.tv_sec = s;
694
695 /*
696 * XXXSMP If we are using h/w performance counters, save context.
697 */
698 #if PERFCTRS
699 if (PMC_ENABLED(p)) {
700 pmc_save_context(p);
701 }
702 #endif
703
704 /*
705 * Acquire the sched_mutex if necessary. It will be released by
706 * cpu_switch once it has decided to idle, or picked another LWP
707 * to run.
708 */
709 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
710 if (l->l_mutex != &sched_mutex) {
711 mutex_enter(&sched_mutex);
712 lwp_unlock(l);
713 }
714 #endif
715
716 /*
717 * If on the CPU and we have gotten this far, then we must yield.
718 */
719 KASSERT(l->l_stat != LSRUN);
720 if (l->l_stat == LSONPROC) {
721 KASSERT(lwp_locked(l, &sched_mutex));
722 l->l_stat = LSRUN;
723 setrunqueue(l);
724 }
725 uvmexp.swtch++;
726
727 /*
728 * Process is about to yield the CPU; clear the appropriate
729 * scheduling flags.
730 */
731 spc->spc_flags &= ~SPCF_SWITCHCLEAR;
732
733 LOCKDEBUG_BARRIER(&sched_mutex, 1);
734
735 /*
736 * Switch to the new current LWP. When we run again, we'll
737 * return back here.
738 */
739 oldspl = MUTEX_SPIN_OLDSPL(l->l_cpu);
740
741 if (newl == NULL || newl->l_back == NULL)
742 retval = cpu_switch(l, NULL);
743 else {
744 KASSERT(lwp_locked(newl, &sched_mutex));
745 remrunqueue(newl);
746 cpu_switchto(l, newl);
747 retval = 0;
748 }
749
750 /*
751 * XXXSMP If we are using h/w performance counters, restore context.
752 */
753 #if PERFCTRS
754 if (PMC_ENABLED(p)) {
755 pmc_restore_context(p);
756 }
757 #endif
758
759 /*
760 * We're running again; record our new start time. We might
761 * be running on a new CPU now, so don't use the cached
762 * schedstate_percpu pointer.
763 */
764 KDASSERT(l->l_cpu == curcpu());
765 microtime(&l->l_cpu->ci_schedstate.spc_runtime);
766
767 /*
768 * Reacquire the kernel_lock.
769 */
770 splx(oldspl);
771 KERNEL_LOCK(hold_count, l);
772
773 return retval;
774 }
775
776 /*
777 * Initialize the (doubly-linked) run queues
778 * to be empty.
779 */
780 void
781 rqinit()
782 {
783 int i;
784
785 for (i = 0; i < RUNQUE_NQS; i++)
786 sched_qs[i].ph_link = sched_qs[i].ph_rlink =
787 (struct lwp *)&sched_qs[i];
788
789 mutex_init(&sched_mutex, MUTEX_SPIN, IPL_SCHED);
790 }
791
792 static inline void
793 resched_lwp(struct lwp *l, u_char pri)
794 {
795 struct cpu_info *ci;
796
797 /*
798 * XXXSMP
799 * Since l->l_cpu persists across a context switch,
800 * this gives us *very weak* processor affinity, in
801 * that we notify the CPU on which the process last
802 * ran that it should try to switch.
803 *
804 * This does not guarantee that the process will run on
805 * that processor next, because another processor might
806 * grab it the next time it performs a context switch.
807 *
808 * This also does not handle the case where its last
809 * CPU is running a higher-priority process, but every
810 * other CPU is running a lower-priority process. There
811 * are ways to handle this situation, but they're not
812 * currently very pretty, and we also need to weigh the
813 * cost of moving a process from one CPU to another.
814 *
815 * XXXSMP
816 * There is also the issue of locking the other CPU's
817 * sched state, which we currently do not do.
818 */
819 ci = (l->l_cpu != NULL) ? l->l_cpu : curcpu();
820 if (pri < ci->ci_schedstate.spc_curpriority)
821 cpu_need_resched(ci);
822 }
823
824 /*
825 * Change process state to be runnable, placing it on the run queue if it is
826 * in memory, and awakening the swapper if it isn't in memory.
827 *
828 * Call with the process and LWP locked. Will return with the LWP unlocked.
829 */
830 void
831 setrunnable(struct lwp *l)
832 {
833 struct proc *p = l->l_proc;
834
835 LOCK_ASSERT(mutex_owned(&p->p_smutex));
836 LOCK_ASSERT(lwp_locked(l, NULL));
837
838 switch (l->l_stat) {
839 case LSSTOP:
840 /*
841 * If we're being traced (possibly because someone attached us
842 * while we were stopped), check for a signal from the debugger.
843 */
844 if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0) {
845 sigaddset(&l->l_sigpend.sp_set, p->p_xstat);
846 signotify(l);
847 }
848 p->p_nrlwps++;
849 break;
850 case LSSUSPENDED:
851 l->l_flag &= ~L_WSUSPEND;
852 p->p_nrlwps++;
853 break;
854 case LSSLEEP:
855 KASSERT(l->l_wchan != NULL);
856 break;
857 default:
858 panic("setrunnable: lwp %p state was %d", l, l->l_stat);
859 }
860
861 /*
862 * If the LWP was sleeping interruptably, then it's OK to start it
863 * again. If not, mark it as still sleeping.
864 */
865 if (l->l_wchan != NULL) {
866 l->l_stat = LSSLEEP;
867 if ((l->l_flag & L_SINTR) != 0)
868 lwp_unsleep(l);
869 else {
870 lwp_unlock(l);
871 #ifdef DIAGNOSTIC
872 panic("setrunnable: !L_SINTR");
873 #endif
874 }
875 return;
876 }
877
878 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
879
880 if (l->l_proc->p_sa)
881 sa_awaken(l);
882
883 /*
884 * If the LWP is still on the CPU, mark it as LSONPROC. It may be
885 * about to call mi_switch(), in which case it will yield.
886 *
887 * XXXSMP Will need to change for preemption.
888 */
889 #ifdef MULTIPROCESSOR
890 if (l->l_cpu->ci_curlwp == l) {
891 #else
892 if (l == curlwp) {
893 #endif
894 l->l_stat = LSONPROC;
895 l->l_slptime = 0;
896 lwp_unlock(l);
897 return;
898 }
899
900 /*
901 * Set the LWP runnable. If it's swapped out, we need to wake the swapper
902 * to bring it back in. Otherwise, enter it into a run queue.
903 */
904 if (l->l_slptime > 1)
905 updatepri(l);
906 l->l_stat = LSRUN;
907 l->l_slptime = 0;
908
909 if (l->l_flag & L_INMEM) {
910 setrunqueue(l);
911 resched_lwp(l, l->l_priority);
912 lwp_unlock(l);
913 } else {
914 lwp_unlock(l);
915 wakeup(&proc0);
916 }
917 }
918
919 /*
920 * Compute the priority of a process when running in user mode.
921 * Arrange to reschedule if the resulting priority is better
922 * than that of the current process.
923 */
924 void
925 resetpriority(struct lwp *l)
926 {
927 unsigned int newpriority;
928 struct proc *p = l->l_proc;
929
930 LOCK_ASSERT(lwp_locked(l, NULL));
931
932 if ((l->l_flag & L_SYSTEM) != 0)
933 return;
934
935 newpriority = PUSER + (p->p_estcpu >> ESTCPU_SHIFT) +
936 NICE_WEIGHT * (p->p_nice - NZERO);
937 newpriority = min(newpriority, MAXPRI);
938 l->l_usrpri = newpriority;
939 if (l->l_priority != newpriority)
940 lwp_changepri(l, newpriority);
941 }
942
943 /*
944 * Recompute priority for all LWPs in a process.
945 */
946 void
947 resetprocpriority(struct proc *p)
948 {
949 struct lwp *l;
950
951 LOCK_ASSERT(mutex_owned(&p->p_smutex));
952
953 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
954 lwp_lock(l);
955 resetpriority(l);
956 lwp_unlock(l);
957 }
958 }
959
960 /*
961 * We adjust the priority of the current process. The priority of a process
962 * gets worse as it accumulates CPU time. The CPU usage estimator (p_estcpu)
963 * is increased here. The formula for computing priorities (in kern_synch.c)
964 * will compute a different value each time p_estcpu increases. This can
965 * cause a switch, but unless the priority crosses a PPQ boundary the actual
966 * queue will not change. The CPU usage estimator ramps up quite quickly
967 * when the process is running (linearly), and decays away exponentially, at
968 * a rate which is proportionally slower when the system is busy. The basic
969 * principle is that the system will 90% forget that the process used a lot
970 * of CPU time in 5 * loadav seconds. This causes the system to favor
971 * processes which haven't run much recently, and to round-robin among other
972 * processes.
973 */
974
975 void
976 schedclock(struct lwp *l)
977 {
978 struct proc *p = l->l_proc;
979
980 mutex_enter(&p->p_smutex);
981 p->p_estcpu = ESTCPULIM(p->p_estcpu + (1 << ESTCPU_SHIFT));
982 lwp_lock(l);
983 resetpriority(l);
984 mutex_exit(&p->p_smutex);
985 if ((l->l_flag & L_SYSTEM) == 0 && l->l_priority >= PUSER)
986 l->l_priority = l->l_usrpri;
987 lwp_unlock(l);
988 }
989
990 /*
991 * suspendsched:
992 *
993 * Convert all non-L_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
994 */
995 void
996 suspendsched(void)
997 {
998 #ifdef MULTIPROCESSOR
999 CPU_INFO_ITERATOR cii;
1000 struct cpu_info *ci;
1001 #endif
1002 struct lwp *l;
1003 struct proc *p;
1004
1005 /*
1006 * We do this by process in order not to violate the locking rules.
1007 */
1008 mutex_enter(&proclist_mutex);
1009 PROCLIST_FOREACH(p, &allproc) {
1010 mutex_enter(&p->p_smutex);
1011
1012 if ((p->p_flag & P_SYSTEM) != 0) {
1013 mutex_exit(&p->p_smutex);
1014 continue;
1015 }
1016
1017 p->p_stat = SSTOP;
1018
1019 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1020 if (l == curlwp)
1021 continue;
1022
1023 lwp_lock(l);
1024
1025 /*
1026 * Set L_WREBOOT so that the LWP will suspend itself
1027 * when it tries to return to user mode. We want to
1028 * try and get to get as many LWPs as possible to
1029 * the user / kernel boundary, so that they will
1030 * release any locks that they hold.
1031 */
1032 l->l_flag |= (L_WREBOOT | L_WSUSPEND);
1033
1034 if (l->l_stat == LSSLEEP &&
1035 (l->l_flag & L_SINTR) != 0) {
1036 /* setrunnable() will release the lock. */
1037 setrunnable(l);
1038 continue;
1039 }
1040
1041 lwp_unlock(l);
1042 }
1043
1044 mutex_exit(&p->p_smutex);
1045 }
1046 mutex_exit(&proclist_mutex);
1047
1048 /*
1049 * Kick all CPUs to make them preempt any LWPs running in user mode.
1050 * They'll trap into the kernel and suspend themselves in userret().
1051 */
1052 sched_lock(0);
1053 #ifdef MULTIPROCESSOR
1054 for (CPU_INFO_FOREACH(cii, ci))
1055 cpu_need_resched(ci);
1056 #else
1057 cpu_need_resched(curcpu());
1058 #endif
1059 sched_unlock(0);
1060 }
1061
1062 /*
1063 * scheduler_fork_hook:
1064 *
1065 * Inherit the parent's scheduler history.
1066 */
1067 void
1068 scheduler_fork_hook(struct proc *parent, struct proc *child)
1069 {
1070
1071 LOCK_ASSERT(mutex_owned(&parent->p_smutex));
1072
1073 child->p_estcpu = child->p_estcpu_inherited = parent->p_estcpu;
1074 child->p_forktime = schedcpu_ticks;
1075 }
1076
1077 /*
1078 * scheduler_wait_hook:
1079 *
1080 * Chargeback parents for the sins of their children.
1081 */
1082 void
1083 scheduler_wait_hook(struct proc *parent, struct proc *child)
1084 {
1085 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
1086 fixpt_t estcpu;
1087
1088 /* XXX Only if parent != init?? */
1089
1090 mutex_enter(&parent->p_smutex);
1091 estcpu = decay_cpu_batch(loadfac, child->p_estcpu_inherited,
1092 schedcpu_ticks - child->p_forktime);
1093 if (child->p_estcpu > estcpu)
1094 parent->p_estcpu =
1095 ESTCPULIM(parent->p_estcpu + child->p_estcpu - estcpu);
1096 mutex_exit(&parent->p_smutex);
1097 }
1098
1099 /*
1100 * sched_kpri:
1101 *
1102 * Scale a priority level to a kernel priority level, usually
1103 * for an LWP that is about to sleep.
1104 */
1105 int
1106 sched_kpri(struct lwp *l)
1107 {
1108 static const uint8_t kpri_tab[] = {
1109 0, 1, 2, 3, 4, 5, 6, 7,
1110 8, 9, 10, 11, 12, 13, 14, 15,
1111 16, 17, 18, 19, 20, 21, 22, 23,
1112 24, 25, 26, 27, 28, 29, 30, 31,
1113 32, 33, 34, 35, 36, 37, 38, 39,
1114 40, 41, 42, 43, 44, 45, 46, 47,
1115 48, 49, 8, 8, 9, 9, 10, 10,
1116 11, 11, 12, 12, 13, 14, 14, 15,
1117 15, 16, 16, 17, 17, 18, 18, 19,
1118 20, 20, 21, 21, 22, 22, 23, 23,
1119 24, 24, 25, 26, 26, 27, 27, 28,
1120 28, 29, 29, 30, 30, 31, 32, 32,
1121 33, 33, 34, 34, 35, 35, 36, 36,
1122 37, 38, 38, 39, 39, 40, 40, 41,
1123 41, 42, 42, 43, 44, 44, 45, 45,
1124 46, 46, 47, 47, 48, 48, 49, 49,
1125 };
1126
1127 return kpri_tab[l->l_priority];
1128 }
1129
1130 /*
1131 * sched_unsleep:
1132 *
1133 * The is called when the LWP has not been awoken normally but instead
1134 * interrupted: for example, if the sleep timed out. Because of this,
1135 * it's not a valid action for running or idle LWPs.
1136 */
1137 void
1138 sched_unsleep(struct lwp *l)
1139 {
1140
1141 lwp_unlock(l);
1142 panic("sched_unsleep");
1143 }
1144
1145 /*
1146 * sched_changepri:
1147 *
1148 * Adjust the priority of an LWP.
1149 */
1150 void
1151 sched_changepri(struct lwp *l, int pri)
1152 {
1153
1154 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
1155
1156 if (l->l_stat != LSRUN || (l->l_flag & L_INMEM) == 0 ||
1157 (l->l_priority / PPQ) == (pri / PPQ)) {
1158 l->l_priority = pri;
1159 return;
1160 }
1161
1162 remrunqueue(l);
1163 l->l_priority = pri;
1164 setrunqueue(l);
1165 resched_lwp(l, pri);
1166 }
1167
1168 /*
1169 * Low-level routines to access the run queue. Optimised assembler
1170 * routines can override these.
1171 */
1172
1173 #ifndef __HAVE_MD_RUNQUEUE
1174
1175 /*
1176 * On some architectures, it's faster to use a MSB ordering for the priorites
1177 * than the traditional LSB ordering.
1178 */
1179 #ifdef __HAVE_BIGENDIAN_BITOPS
1180 #define RQMASK(n) (0x80000000 >> (n))
1181 #else
1182 #define RQMASK(n) (0x00000001 << (n))
1183 #endif
1184
1185 /*
1186 * The primitives that manipulate the run queues. whichqs tells which
1187 * of the 32 queues qs have processes in them. Setrunqueue puts processes
1188 * into queues, remrunqueue removes them from queues. The running process is
1189 * on no queue, other processes are on a queue related to p->p_priority,
1190 * divided by 4 actually to shrink the 0-127 range of priorities into the 32
1191 * available queues.
1192 */
1193 #ifdef RQDEBUG
1194 static void
1195 checkrunqueue(int whichq, struct lwp *l)
1196 {
1197 const struct prochd * const rq = &sched_qs[whichq];
1198 struct lwp *l2;
1199 int found = 0;
1200 int die = 0;
1201 int empty = 1;
1202 for (l2 = rq->ph_link; l2 != (const void*) rq; l2 = l2->l_forw) {
1203 if (l2->l_stat != LSRUN) {
1204 printf("checkrunqueue[%d]: lwp %p state (%d) "
1205 " != LSRUN\n", whichq, l2, l2->l_stat);
1206 }
1207 if (l2->l_back->l_forw != l2) {
1208 printf("checkrunqueue[%d]: lwp %p back-qptr (%p) "
1209 "corrupt %p\n", whichq, l2, l2->l_back,
1210 l2->l_back->l_forw);
1211 die = 1;
1212 }
1213 if (l2->l_forw->l_back != l2) {
1214 printf("checkrunqueue[%d]: lwp %p forw-qptr (%p) "
1215 "corrupt %p\n", whichq, l2, l2->l_forw,
1216 l2->l_forw->l_back);
1217 die = 1;
1218 }
1219 if (l2 == l)
1220 found = 1;
1221 empty = 0;
1222 }
1223 if (empty && (sched_whichqs & RQMASK(whichq)) != 0) {
1224 printf("checkrunqueue[%d]: bit set for empty run-queue %p\n",
1225 whichq, rq);
1226 die = 1;
1227 } else if (!empty && (sched_whichqs & RQMASK(whichq)) == 0) {
1228 printf("checkrunqueue[%d]: bit clear for non-empty "
1229 "run-queue %p\n", whichq, rq);
1230 die = 1;
1231 }
1232 if (l != NULL && (sched_whichqs & RQMASK(whichq)) == 0) {
1233 printf("checkrunqueue[%d]: bit clear for active lwp %p\n",
1234 whichq, l);
1235 die = 1;
1236 }
1237 if (l != NULL && empty) {
1238 printf("checkrunqueue[%d]: empty run-queue %p with "
1239 "active lwp %p\n", whichq, rq, l);
1240 die = 1;
1241 }
1242 if (l != NULL && !found) {
1243 printf("checkrunqueue[%d]: lwp %p not in runqueue %p!",
1244 whichq, l, rq);
1245 die = 1;
1246 }
1247 if (die)
1248 panic("checkrunqueue: inconsistency found");
1249 }
1250 #endif /* RQDEBUG */
1251
1252 void
1253 setrunqueue(struct lwp *l)
1254 {
1255 struct prochd *rq;
1256 struct lwp *prev;
1257 const int whichq = l->l_priority / PPQ;
1258
1259 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
1260
1261 #ifdef RQDEBUG
1262 checkrunqueue(whichq, NULL);
1263 #endif
1264 #ifdef DIAGNOSTIC
1265 if (l->l_back != NULL || l->l_stat != LSRUN)
1266 panic("setrunqueue");
1267 #endif
1268 sched_whichqs |= RQMASK(whichq);
1269 rq = &sched_qs[whichq];
1270 prev = rq->ph_rlink;
1271 l->l_forw = (struct lwp *)rq;
1272 rq->ph_rlink = l;
1273 prev->l_forw = l;
1274 l->l_back = prev;
1275 #ifdef RQDEBUG
1276 checkrunqueue(whichq, l);
1277 #endif
1278 }
1279
1280 void
1281 remrunqueue(struct lwp *l)
1282 {
1283 struct lwp *prev, *next;
1284 const int whichq = l->l_priority / PPQ;
1285
1286 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
1287
1288 #ifdef RQDEBUG
1289 checkrunqueue(whichq, l);
1290 #endif
1291
1292 #if defined(DIAGNOSTIC)
1293 if (((sched_whichqs & RQMASK(whichq)) == 0) || l->l_back == NULL) {
1294 /* Shouldn't happen - interrupts disabled. */
1295 panic("remrunqueue: bit %d not set", whichq);
1296 }
1297 #endif
1298 prev = l->l_back;
1299 l->l_back = NULL;
1300 next = l->l_forw;
1301 prev->l_forw = next;
1302 next->l_back = prev;
1303 if (prev == next)
1304 sched_whichqs &= ~RQMASK(whichq);
1305 #ifdef RQDEBUG
1306 checkrunqueue(whichq, NULL);
1307 #endif
1308 }
1309
1310 #undef RQMASK
1311 #endif /* !defined(__HAVE_MD_RUNQUEUE) */
1312