kern_synch.c revision 1.166.2.8 1 /* $NetBSD: kern_synch.c,v 1.166.2.8 2007/01/11 22:23:00 ad Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2004, 2006 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*-
41 * Copyright (c) 1982, 1986, 1990, 1991, 1993
42 * The Regents of the University of California. All rights reserved.
43 * (c) UNIX System Laboratories, Inc.
44 * All or some portions of this file are derived from material licensed
45 * to the University of California by American Telephone and Telegraph
46 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
47 * the permission of UNIX System Laboratories, Inc.
48 *
49 * Redistribution and use in source and binary forms, with or without
50 * modification, are permitted provided that the following conditions
51 * are met:
52 * 1. Redistributions of source code must retain the above copyright
53 * notice, this list of conditions and the following disclaimer.
54 * 2. Redistributions in binary form must reproduce the above copyright
55 * notice, this list of conditions and the following disclaimer in the
56 * documentation and/or other materials provided with the distribution.
57 * 3. Neither the name of the University nor the names of its contributors
58 * may be used to endorse or promote products derived from this software
59 * without specific prior written permission.
60 *
61 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
62 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
63 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
64 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
65 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
66 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
67 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
68 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
69 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
70 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
71 * SUCH DAMAGE.
72 *
73 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
74 */
75
76 #include <sys/cdefs.h>
77 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.166.2.8 2007/01/11 22:23:00 ad Exp $");
78
79 #include "opt_ddb.h"
80 #include "opt_kstack.h"
81 #include "opt_lockdebug.h"
82 #include "opt_multiprocessor.h"
83 #include "opt_perfctrs.h"
84
85 #define __MUTEX_PRIVATE
86
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/callout.h>
90 #include <sys/proc.h>
91 #include <sys/kernel.h>
92 #include <sys/buf.h>
93 #if defined(PERFCTRS)
94 #include <sys/pmc.h>
95 #endif
96 #include <sys/signalvar.h>
97 #include <sys/resourcevar.h>
98 #include <sys/sched.h>
99 #include <sys/sa.h>
100 #include <sys/savar.h>
101 #include <sys/kauth.h>
102 #include <sys/sleepq.h>
103 #include <sys/lockdebug.h>
104
105 #include <uvm/uvm_extern.h>
106
107 #include <machine/cpu.h>
108
109 int lbolt; /* once a second sleep address */
110 int rrticks; /* number of hardclock ticks per roundrobin() */
111
112 /*
113 * The global scheduler state.
114 */
115 kmutex_t sched_mutex; /* global sched state mutex */
116 struct prochd sched_qs[RUNQUE_NQS]; /* run queues */
117 volatile uint32_t sched_whichqs; /* bitmap of non-empty queues */
118
119 void schedcpu(void *);
120 void updatepri(struct lwp *);
121 void sa_awaken(struct lwp *);
122
123 void sched_unsleep(struct lwp *);
124 void sched_changepri(struct lwp *, int);
125
126 struct callout schedcpu_ch = CALLOUT_INITIALIZER_SETFUNC(schedcpu, NULL);
127 static unsigned int schedcpu_ticks;
128
129 syncobj_t sleep_syncobj = {
130 SOBJ_SLEEPQ_SORTED,
131 sleepq_unsleep,
132 sleepq_changepri
133 };
134
135 syncobj_t sched_syncobj = {
136 SOBJ_SLEEPQ_SORTED,
137 sched_unsleep,
138 sched_changepri
139 };
140
141 /*
142 * Force switch among equal priority processes every 100ms.
143 * Called from hardclock every hz/10 == rrticks hardclock ticks.
144 */
145 /* ARGSUSED */
146 void
147 roundrobin(struct cpu_info *ci)
148 {
149 struct schedstate_percpu *spc = &ci->ci_schedstate;
150
151 spc->spc_rrticks = rrticks;
152
153 if (curlwp != NULL) {
154 if (spc->spc_flags & SPCF_SEENRR) {
155 /*
156 * The process has already been through a roundrobin
157 * without switching and may be hogging the CPU.
158 * Indicate that the process should yield.
159 */
160 spc->spc_flags |= SPCF_SHOULDYIELD;
161 } else
162 spc->spc_flags |= SPCF_SEENRR;
163 }
164 cpu_need_resched(curcpu());
165 }
166
167 #define PPQ (128 / RUNQUE_NQS) /* priorities per queue */
168 #define NICE_WEIGHT 2 /* priorities per nice level */
169
170 #define ESTCPU_SHIFT 11
171 #define ESTCPU_MAX ((NICE_WEIGHT * PRIO_MAX - PPQ) << ESTCPU_SHIFT)
172 #define ESTCPULIM(e) min((e), ESTCPU_MAX)
173
174 /*
175 * Constants for digital decay and forget:
176 * 90% of (p_estcpu) usage in 5 * loadav time
177 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive)
178 * Note that, as ps(1) mentions, this can let percentages
179 * total over 100% (I've seen 137.9% for 3 processes).
180 *
181 * Note that hardclock updates p_estcpu and p_cpticks independently.
182 *
183 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
184 * That is, the system wants to compute a value of decay such
185 * that the following for loop:
186 * for (i = 0; i < (5 * loadavg); i++)
187 * p_estcpu *= decay;
188 * will compute
189 * p_estcpu *= 0.1;
190 * for all values of loadavg:
191 *
192 * Mathematically this loop can be expressed by saying:
193 * decay ** (5 * loadavg) ~= .1
194 *
195 * The system computes decay as:
196 * decay = (2 * loadavg) / (2 * loadavg + 1)
197 *
198 * We wish to prove that the system's computation of decay
199 * will always fulfill the equation:
200 * decay ** (5 * loadavg) ~= .1
201 *
202 * If we compute b as:
203 * b = 2 * loadavg
204 * then
205 * decay = b / (b + 1)
206 *
207 * We now need to prove two things:
208 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
209 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
210 *
211 * Facts:
212 * For x close to zero, exp(x) =~ 1 + x, since
213 * exp(x) = 0! + x**1/1! + x**2/2! + ... .
214 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
215 * For x close to zero, ln(1+x) =~ x, since
216 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1
217 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
218 * ln(.1) =~ -2.30
219 *
220 * Proof of (1):
221 * Solve (factor)**(power) =~ .1 given power (5*loadav):
222 * solving for factor,
223 * ln(factor) =~ (-2.30/5*loadav), or
224 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
225 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED
226 *
227 * Proof of (2):
228 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
229 * solving for power,
230 * power*ln(b/(b+1)) =~ -2.30, or
231 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED
232 *
233 * Actual power values for the implemented algorithm are as follows:
234 * loadav: 1 2 3 4
235 * power: 5.68 10.32 14.94 19.55
236 */
237
238 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
239 #define loadfactor(loadav) (2 * (loadav))
240
241 static fixpt_t
242 decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
243 {
244
245 if (estcpu == 0) {
246 return 0;
247 }
248
249 #if !defined(_LP64)
250 /* avoid 64bit arithmetics. */
251 #define FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
252 if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
253 return estcpu * loadfac / (loadfac + FSCALE);
254 }
255 #endif /* !defined(_LP64) */
256
257 return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
258 }
259
260 /*
261 * For all load averages >= 1 and max p_estcpu of (255 << ESTCPU_SHIFT),
262 * sleeping for at least seven times the loadfactor will decay p_estcpu to
263 * less than (1 << ESTCPU_SHIFT).
264 *
265 * note that our ESTCPU_MAX is actually much smaller than (255 << ESTCPU_SHIFT).
266 */
267 static fixpt_t
268 decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n)
269 {
270
271 if ((n << FSHIFT) >= 7 * loadfac) {
272 return 0;
273 }
274
275 while (estcpu != 0 && n > 1) {
276 estcpu = decay_cpu(loadfac, estcpu);
277 n--;
278 }
279
280 return estcpu;
281 }
282
283 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
284 fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
285
286 /*
287 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
288 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
289 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
290 *
291 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
292 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
293 *
294 * If you dont want to bother with the faster/more-accurate formula, you
295 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
296 * (more general) method of calculating the %age of CPU used by a process.
297 */
298 #define CCPU_SHIFT 11
299
300 /*
301 * schedcpu:
302 *
303 * Recompute process priorities, every hz ticks.
304 *
305 * XXXSMP This needs to be reorganised in order to reduce the locking
306 * burden.
307 */
308 /* ARGSUSED */
309 void
310 schedcpu(void *arg)
311 {
312 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
313 struct rlimit *rlim;
314 struct lwp *l;
315 struct proc *p;
316 int minslp, clkhz;
317 long runtm;
318
319 schedcpu_ticks++;
320
321 mutex_enter(&proclist_mutex);
322 PROCLIST_FOREACH(p, &allproc) {
323 /*
324 * Increment time in/out of memory and sleep time (if
325 * sleeping). We ignore overflow; with 16-bit int's
326 * (remember them?) overflow takes 45 days.
327 */
328 minslp = 2;
329 runtm = 0;
330 mutex_enter(&p->p_smutex);
331 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
332 lwp_lock(l);
333 runtm += l->l_rtime.tv_sec;
334 l->l_swtime++;
335 if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
336 l->l_stat == LSSUSPENDED) {
337 l->l_slptime++;
338 minslp = min(minslp, l->l_slptime);
339 } else
340 minslp = 0;
341 lwp_unlock(l);
342 }
343 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
344
345 /*
346 * Check if the process exceeds its CPU resource allocation.
347 * If over max, kill it.
348 */
349 rlim = &p->p_rlimit[RLIMIT_CPU];
350 if (runtm >= rlim->rlim_cur) {
351 if (runtm >= rlim->rlim_max)
352 psignal(p, SIGKILL);
353 else {
354 psignal(p, SIGXCPU);
355 if (rlim->rlim_cur < rlim->rlim_max)
356 rlim->rlim_cur += 5;
357 }
358 }
359
360 /*
361 * If the process has run for more than autonicetime, reduce
362 * priority to give others a chance.
363 */
364 if (autonicetime && runtm > autonicetime && p->p_nice == NZERO
365 && kauth_cred_geteuid(p->p_cred)) {
366 p->p_nice = autoniceval + NZERO;
367 resetprocpriority(p);
368 }
369
370 /*
371 * If the process has slept the entire second,
372 * stop recalculating its priority until it wakes up.
373 */
374 if (minslp > 1) {
375 mutex_exit(&p->p_smutex);
376 continue;
377 }
378
379 /*
380 * p_pctcpu is only for ps.
381 */
382 mutex_enter(&p->p_stmutex);
383 clkhz = stathz != 0 ? stathz : hz;
384 #if (FSHIFT >= CCPU_SHIFT)
385 p->p_pctcpu += (clkhz == 100)?
386 ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
387 100 * (((fixpt_t) p->p_cpticks)
388 << (FSHIFT - CCPU_SHIFT)) / clkhz;
389 #else
390 p->p_pctcpu += ((FSCALE - ccpu) *
391 (p->p_cpticks * FSCALE / clkhz)) >> FSHIFT;
392 #endif
393 p->p_cpticks = 0;
394 mutex_exit(&p->p_stmutex);
395 p->p_estcpu = decay_cpu(loadfac, p->p_estcpu);
396
397 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
398 lwp_lock(l);
399 if (l->l_slptime <= 1)
400 resetpriority(l);
401 lwp_unlock(l);
402 }
403 mutex_exit(&p->p_smutex);
404 }
405 mutex_exit(&proclist_mutex);
406 uvm_meter();
407 wakeup((caddr_t)&lbolt);
408 callout_schedule(&schedcpu_ch, hz);
409 }
410
411 /*
412 * Recalculate the priority of a process after it has slept for a while.
413 */
414 void
415 updatepri(struct lwp *l)
416 {
417 struct proc *p = l->l_proc;
418 fixpt_t loadfac;
419
420 LOCK_ASSERT(lwp_locked(l, NULL));
421 KASSERT(l->l_slptime > 1);
422
423 loadfac = loadfactor(averunnable.ldavg[0]);
424
425 l->l_slptime--; /* the first time was done in schedcpu */
426 /* XXX NJWLWP */
427 /* XXXSMP occasionaly unlocked. */
428 p->p_estcpu = decay_cpu_batch(loadfac, p->p_estcpu, l->l_slptime);
429 resetpriority(l);
430 }
431
432 /*
433 * During autoconfiguration or after a panic, a sleep will simply lower the
434 * priority briefly to allow interrupts, then return. The priority to be
435 * used (safepri) is machine-dependent, thus this value is initialized and
436 * maintained in the machine-dependent layers. This priority will typically
437 * be 0, or the lowest priority that is safe for use on the interrupt stack;
438 * it can be made higher to block network software interrupts after panics.
439 */
440 int safepri;
441
442 /*
443 * ltsleep: see mtsleep() for comments.
444 */
445 int
446 ltsleep(wchan_t ident, int priority, const char *wmesg, int timo,
447 volatile struct simplelock *interlock)
448 {
449 struct lwp *l = curlwp;
450 sleepq_t *sq;
451 int error, catch;
452
453 if (sleepq_dontsleep(l)) {
454 (void)sleepq_abort(NULL, 0);
455 if ((priority & PNORELOCK) != 0)
456 simple_unlock(interlock);
457 return 0;
458 }
459
460 sq = sleeptab_lookup(&sleeptab, ident);
461 sleepq_enter(sq, l);
462
463 if (interlock != NULL) {
464 LOCK_ASSERT(simple_lock_held(interlock));
465 simple_unlock(interlock);
466 }
467
468 catch = priority & PCATCH;
469 sleepq_block(sq, priority & PRIMASK, ident, wmesg, timo, catch,
470 &sleep_syncobj);
471 error = sleepq_unblock(timo, catch);
472
473 if (interlock != NULL && (priority & PNORELOCK) == 0)
474 simple_lock(interlock);
475
476 return error;
477 }
478
479 /*
480 * General sleep call. Suspends the current process until a wakeup is
481 * performed on the specified identifier. The process will then be made
482 * runnable with the specified priority. Sleeps at most timo/hz seconds (0
483 * means no timeout). If pri includes PCATCH flag, signals are checked
484 * before and after sleeping, else signals are not checked. Returns 0 if
485 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
486 * signal needs to be delivered, ERESTART is returned if the current system
487 * call should be restarted if possible, and EINTR is returned if the system
488 * call should be interrupted by the signal (return EINTR).
489 *
490 * The interlock is held until we are on a sleep queue. The interlock will
491 * be locked before returning back to the caller unless the PNORELOCK flag
492 * is specified, in which case the interlock will always be unlocked upon
493 * return.
494 */
495 int
496 mtsleep(wchan_t ident, int priority, const char *wmesg, int timo,
497 kmutex_t *mtx)
498 {
499 struct lwp *l = curlwp;
500 sleepq_t *sq;
501 int error, catch;
502
503 if (sleepq_dontsleep(l))
504 return sleepq_abort(mtx, priority & PNORELOCK);
505
506 sq = sleeptab_lookup(&sleeptab, ident);
507 sleepq_enter(sq, l);
508
509 if (mtx != NULL) {
510 LOCK_ASSERT(mutex_owned(mtx));
511 mutex_exit(mtx);
512 }
513
514 catch = priority & PCATCH;
515 sleepq_block(sq, priority & PRIMASK, ident, wmesg, timo, catch,
516 &sleep_syncobj);
517 error = sleepq_unblock(timo, catch);
518
519 if (mtx != NULL && (priority & PNORELOCK) == 0)
520 mutex_enter(mtx);
521
522 return error;
523 }
524
525 /*
526 * sched_pause:
527 *
528 * General sleep call for situations where a wake-up is not expected.
529 */
530 int
531 sched_pause(const char *wmesg, boolean_t intr, int timo)
532 {
533 struct lwp *l = curlwp;
534 sleepq_t *sq;
535
536 if (sleepq_dontsleep(l))
537 return sleepq_abort(NULL, 0);
538
539 sq = sleeptab_lookup(&sleeptab, l);
540 sleepq_enter(sq, l);
541 sleepq_block(sq, sched_kpri(l), l, wmesg, timo, intr, &sleep_syncobj);
542 return sleepq_unblock(timo, intr);
543 }
544
545 void
546 sa_awaken(struct lwp *l)
547 {
548
549 LOCK_ASSERT(lwp_locked(l, NULL));
550
551 if (l == l->l_savp->savp_lwp && l->l_flag & L_SA_YIELD)
552 l->l_flag &= ~L_SA_IDLE;
553 }
554
555 /*
556 * Make all processes sleeping on the specified identifier runnable.
557 */
558 void
559 wakeup(wchan_t ident)
560 {
561 sleepq_t *sq;
562
563 if (cold)
564 return;
565
566 sq = sleeptab_lookup(&sleeptab, ident);
567 sleepq_wake(sq, ident, (u_int)-1);
568 }
569
570 /*
571 * Make the highest priority process first in line on the specified
572 * identifier runnable.
573 */
574 void
575 wakeup_one(wchan_t ident)
576 {
577 sleepq_t *sq;
578
579 if (cold)
580 return;
581
582 sq = sleeptab_lookup(&sleeptab, ident);
583 sleepq_wake(sq, ident, 1);
584 }
585
586
587 /*
588 * General yield call. Puts the current process back on its run queue and
589 * performs a voluntary context switch. Should only be called when the
590 * current process explicitly requests it (eg sched_yield(2) in compat code).
591 */
592 void
593 yield(void)
594 {
595 struct lwp *l = curlwp;
596
597 lwp_lock(l);
598 if (l->l_stat == LSONPROC) {
599 KASSERT(lwp_locked(l, &sched_mutex));
600 l->l_priority = l->l_usrpri;
601 }
602 l->l_nvcsw++;
603 mi_switch(l, NULL);
604 }
605
606 /*
607 * General preemption call. Puts the current process back on its run queue
608 * and performs an involuntary context switch.
609 * The 'more' ("more work to do") argument is boolean. Returning to userspace
610 * preempt() calls pass 0. "Voluntary" preemptions in e.g. uiomove() pass 1.
611 * This will be used to indicate to the SA subsystem that the LWP is
612 * not yet finished in the kernel.
613 */
614 void
615 preempt(int more)
616 {
617 struct lwp *l = curlwp;
618 int r;
619
620 lwp_lock(l);
621 if (l->l_stat == LSONPROC) {
622 KASSERT(lwp_locked(l, &sched_mutex));
623 l->l_priority = l->l_usrpri;
624 }
625 l->l_nivcsw++;
626 r = mi_switch(l, NULL);
627
628 if ((l->l_flag & L_SA) != 0 && r != 0 && more == 0)
629 sa_preempt(l);
630 }
631
632 /*
633 * The machine independent parts of context switch. Switch to "new"
634 * if non-NULL, otherwise let cpu_switch choose the next lwp.
635 *
636 * Returns 1 if another process was actually run.
637 */
638 int
639 mi_switch(struct lwp *l, struct lwp *newl)
640 {
641 struct schedstate_percpu *spc;
642 struct timeval tv;
643 #ifdef MULTIPROCESSOR
644 int hold_count;
645 #endif
646 int retval, oldspl;
647 long s, u;
648 #if PERFCTRS
649 struct proc *p = l->l_proc;
650 #endif
651
652 LOCK_ASSERT(lwp_locked(l, NULL));
653
654 /*
655 * Release the kernel_lock, as we are about to yield the CPU.
656 */
657 KERNEL_UNLOCK_ALL(l, &hold_count);
658
659 #ifdef LOCKDEBUG
660 spinlock_switchcheck();
661 simple_lock_switchcheck();
662 #endif
663 #ifdef KSTACK_CHECK_MAGIC
664 kstack_check_magic(l);
665 #endif
666
667 /*
668 * It's safe to read the per CPU schedstate unlocked here, as all we
669 * are after is the run time and that's guarenteed to have been last
670 * updated by this CPU.
671 */
672 KDASSERT(l->l_cpu == curcpu());
673 spc = &l->l_cpu->ci_schedstate;
674
675 /*
676 * Compute the amount of time during which the current
677 * process was running.
678 */
679 microtime(&tv);
680 u = l->l_rtime.tv_usec +
681 (tv.tv_usec - spc->spc_runtime.tv_usec);
682 s = l->l_rtime.tv_sec + (tv.tv_sec - spc->spc_runtime.tv_sec);
683 if (u < 0) {
684 u += 1000000;
685 s--;
686 } else if (u >= 1000000) {
687 u -= 1000000;
688 s++;
689 }
690 l->l_rtime.tv_usec = u;
691 l->l_rtime.tv_sec = s;
692
693 /*
694 * XXXSMP If we are using h/w performance counters, save context.
695 */
696 #if PERFCTRS
697 if (PMC_ENABLED(p)) {
698 pmc_save_context(p);
699 }
700 #endif
701
702 /*
703 * Acquire the sched_mutex if necessary. It will be released by
704 * cpu_switch once it has decided to idle, or picked another LWP
705 * to run.
706 */
707 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
708 if (l->l_mutex != &sched_mutex) {
709 mutex_enter(&sched_mutex);
710 lwp_unlock(l);
711 }
712 #endif
713
714 /*
715 * If on the CPU and we have gotten this far, then we must yield.
716 */
717 KASSERT(l->l_stat != LSRUN);
718 if (l->l_stat == LSONPROC) {
719 KASSERT(lwp_locked(l, &sched_mutex));
720 l->l_stat = LSRUN;
721 setrunqueue(l);
722 }
723 uvmexp.swtch++;
724
725 /*
726 * Process is about to yield the CPU; clear the appropriate
727 * scheduling flags.
728 */
729 spc->spc_flags &= ~SPCF_SWITCHCLEAR;
730
731 LOCKDEBUG_BARRIER(&sched_mutex, 1);
732
733 /*
734 * Switch to the new current LWP. When we run again, we'll
735 * return back here.
736 */
737 oldspl = MUTEX_SPIN_OLDSPL(l->l_cpu);
738
739 if (newl == NULL || newl->l_back == NULL)
740 retval = cpu_switch(l, NULL);
741 else {
742 KASSERT(lwp_locked(newl, &sched_mutex));
743 remrunqueue(newl);
744 cpu_switchto(l, newl);
745 retval = 0;
746 }
747
748 /*
749 * XXXSMP If we are using h/w performance counters, restore context.
750 */
751 #if PERFCTRS
752 if (PMC_ENABLED(p)) {
753 pmc_restore_context(p);
754 }
755 #endif
756
757 /*
758 * We're running again; record our new start time. We might
759 * be running on a new CPU now, so don't use the cached
760 * schedstate_percpu pointer.
761 */
762 KDASSERT(l->l_cpu == curcpu());
763 microtime(&l->l_cpu->ci_schedstate.spc_runtime);
764
765 /*
766 * Reacquire the kernel_lock.
767 */
768 splx(oldspl);
769 KERNEL_LOCK(hold_count, l);
770
771 return retval;
772 }
773
774 /*
775 * Initialize the (doubly-linked) run queues
776 * to be empty.
777 */
778 void
779 rqinit()
780 {
781 int i;
782
783 for (i = 0; i < RUNQUE_NQS; i++)
784 sched_qs[i].ph_link = sched_qs[i].ph_rlink =
785 (struct lwp *)&sched_qs[i];
786
787 mutex_init(&sched_mutex, MUTEX_SPIN, IPL_SCHED);
788 }
789
790 static inline void
791 resched_lwp(struct lwp *l, u_char pri)
792 {
793 struct cpu_info *ci;
794
795 /*
796 * XXXSMP
797 * Since l->l_cpu persists across a context switch,
798 * this gives us *very weak* processor affinity, in
799 * that we notify the CPU on which the process last
800 * ran that it should try to switch.
801 *
802 * This does not guarantee that the process will run on
803 * that processor next, because another processor might
804 * grab it the next time it performs a context switch.
805 *
806 * This also does not handle the case where its last
807 * CPU is running a higher-priority process, but every
808 * other CPU is running a lower-priority process. There
809 * are ways to handle this situation, but they're not
810 * currently very pretty, and we also need to weigh the
811 * cost of moving a process from one CPU to another.
812 *
813 * XXXSMP
814 * There is also the issue of locking the other CPU's
815 * sched state, which we currently do not do.
816 */
817 ci = (l->l_cpu != NULL) ? l->l_cpu : curcpu();
818 if (pri < ci->ci_schedstate.spc_curpriority)
819 cpu_need_resched(ci);
820 }
821
822 /*
823 * Change process state to be runnable, placing it on the run queue if it is
824 * in memory, and awakening the swapper if it isn't in memory.
825 *
826 * Call with the process and LWP locked. Will return with the LWP unlocked.
827 */
828 void
829 setrunnable(struct lwp *l)
830 {
831 struct proc *p = l->l_proc;
832
833 LOCK_ASSERT(mutex_owned(&p->p_smutex));
834 LOCK_ASSERT(lwp_locked(l, NULL));
835
836 switch (l->l_stat) {
837 case LSSTOP:
838 /*
839 * If we're being traced (possibly because someone attached us
840 * while we were stopped), check for a signal from the debugger.
841 */
842 if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0) {
843 sigaddset(&l->l_sigpend.sp_set, p->p_xstat);
844 signotify(l);
845 }
846 p->p_nrlwps++;
847 break;
848 case LSSUSPENDED:
849 l->l_flag &= ~L_WSUSPEND;
850 p->p_nrlwps++;
851 break;
852 case LSSLEEP:
853 KASSERT(l->l_wchan != NULL);
854 break;
855 default:
856 panic("setrunnable: lwp %p state was %d", l, l->l_stat);
857 }
858
859 /*
860 * If the LWP was sleeping interruptably, then it's OK to start it
861 * again. If not, mark it as still sleeping.
862 */
863 if (l->l_wchan != NULL) {
864 l->l_stat = LSSLEEP;
865 if ((l->l_flag & L_SINTR) != 0)
866 lwp_unsleep(l);
867 else {
868 lwp_unlock(l);
869 #ifdef DIAGNOSTIC
870 panic("setrunnable: !L_SINTR");
871 #endif
872 }
873 return;
874 }
875
876 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
877
878 if (l->l_proc->p_sa)
879 sa_awaken(l);
880
881 /*
882 * If the LWP is still on the CPU, mark it as LSONPROC. It may be
883 * about to call mi_switch(), in which case it will yield.
884 *
885 * XXXSMP Will need to change for preemption.
886 */
887 #ifdef MULTIPROCESSOR
888 if (l->l_cpu->ci_curlwp == l) {
889 #else
890 if (l == curlwp) {
891 #endif
892 l->l_stat = LSONPROC;
893 l->l_slptime = 0;
894 lwp_unlock(l);
895 return;
896 }
897
898 /*
899 * Set the LWP runnable. If it's swapped out, we need to wake the swapper
900 * to bring it back in. Otherwise, enter it into a run queue.
901 */
902 if (l->l_slptime > 1)
903 updatepri(l);
904 l->l_stat = LSRUN;
905 l->l_slptime = 0;
906
907 if (l->l_flag & L_INMEM) {
908 setrunqueue(l);
909 resched_lwp(l, l->l_priority);
910 lwp_unlock(l);
911 } else {
912 lwp_unlock(l);
913 wakeup(&proc0);
914 }
915 }
916
917 /*
918 * Compute the priority of a process when running in user mode.
919 * Arrange to reschedule if the resulting priority is better
920 * than that of the current process.
921 */
922 void
923 resetpriority(struct lwp *l)
924 {
925 unsigned int newpriority;
926 struct proc *p = l->l_proc;
927
928 LOCK_ASSERT(lwp_locked(l, NULL));
929
930 newpriority = PUSER + (p->p_estcpu >> ESTCPU_SHIFT) +
931 NICE_WEIGHT * (p->p_nice - NZERO);
932 newpriority = min(newpriority, MAXPRI);
933 l->l_usrpri = newpriority;
934 lwp_changepri(l, l->l_usrpri);
935 }
936
937 /*
938 * Recompute priority for all LWPs in a process.
939 */
940 void
941 resetprocpriority(struct proc *p)
942 {
943 struct lwp *l;
944
945 LOCK_ASSERT(mutex_owned(&p->p_smutex));
946
947 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
948 lwp_lock(l);
949 resetpriority(l);
950 lwp_unlock(l);
951 }
952 }
953
954 /*
955 * We adjust the priority of the current process. The priority of a process
956 * gets worse as it accumulates CPU time. The CPU usage estimator (p_estcpu)
957 * is increased here. The formula for computing priorities (in kern_synch.c)
958 * will compute a different value each time p_estcpu increases. This can
959 * cause a switch, but unless the priority crosses a PPQ boundary the actual
960 * queue will not change. The CPU usage estimator ramps up quite quickly
961 * when the process is running (linearly), and decays away exponentially, at
962 * a rate which is proportionally slower when the system is busy. The basic
963 * principle is that the system will 90% forget that the process used a lot
964 * of CPU time in 5 * loadav seconds. This causes the system to favor
965 * processes which haven't run much recently, and to round-robin among other
966 * processes.
967 */
968
969 void
970 schedclock(struct lwp *l)
971 {
972 struct proc *p = l->l_proc;
973
974 mutex_enter(&p->p_smutex);
975 p->p_estcpu = ESTCPULIM(p->p_estcpu + (1 << ESTCPU_SHIFT));
976 lwp_lock(l);
977 resetpriority(l);
978 mutex_exit(&p->p_smutex);
979 if (l->l_priority >= PUSER)
980 l->l_priority = l->l_usrpri;
981 lwp_unlock(l);
982 }
983
984 /*
985 * suspendsched:
986 *
987 * Convert all non-L_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
988 */
989 void
990 suspendsched(void)
991 {
992 #ifdef MULTIPROCESSOR
993 CPU_INFO_ITERATOR cii;
994 struct cpu_info *ci;
995 #endif
996 struct lwp *l;
997 struct proc *p;
998
999 /*
1000 * We do this by process in order not to violate the locking rules.
1001 */
1002 mutex_enter(&proclist_mutex);
1003 PROCLIST_FOREACH(p, &allproc) {
1004 mutex_enter(&p->p_smutex);
1005
1006 if ((p->p_flag & P_SYSTEM) != 0) {
1007 mutex_exit(&p->p_smutex);
1008 continue;
1009 }
1010
1011 p->p_stat = SSTOP;
1012
1013 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1014 if (l == curlwp)
1015 continue;
1016
1017 lwp_lock(l);
1018
1019 /*
1020 * Set L_WREBOOT so that the LWP will suspend itself
1021 * when it tries to return to user mode. We want to
1022 * try and get to get as many LWPs as possible to
1023 * the user / kernel boundary, so that they will
1024 * release any locks that they hold.
1025 */
1026 l->l_flag |= (L_WREBOOT | L_WSUSPEND);
1027
1028 if (l->l_stat == LSSLEEP &&
1029 (l->l_flag & L_SINTR) != 0) {
1030 /* setrunnable() will release the lock. */
1031 setrunnable(l);
1032 continue;
1033 }
1034
1035 lwp_unlock(l);
1036 }
1037
1038 mutex_exit(&p->p_smutex);
1039 }
1040 mutex_exit(&proclist_mutex);
1041
1042 /*
1043 * Kick all CPUs to make them preempt any LWPs running in user mode.
1044 * They'll trap into the kernel and suspend themselves in userret().
1045 */
1046 sched_lock(0);
1047 #ifdef MULTIPROCESSOR
1048 for (CPU_INFO_FOREACH(cii, ci))
1049 cpu_need_resched(ci);
1050 #else
1051 cpu_need_resched(curcpu());
1052 #endif
1053 sched_unlock(0);
1054 }
1055
1056 /*
1057 * scheduler_fork_hook:
1058 *
1059 * Inherit the parent's scheduler history.
1060 */
1061 void
1062 scheduler_fork_hook(struct proc *parent, struct proc *child)
1063 {
1064
1065 LOCK_ASSERT(mutex_owned(&parent->p_smutex));
1066
1067 child->p_estcpu = child->p_estcpu_inherited = parent->p_estcpu;
1068 child->p_forktime = schedcpu_ticks;
1069 }
1070
1071 /*
1072 * scheduler_wait_hook:
1073 *
1074 * Chargeback parents for the sins of their children.
1075 */
1076 void
1077 scheduler_wait_hook(struct proc *parent, struct proc *child)
1078 {
1079 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
1080 fixpt_t estcpu;
1081
1082 /* XXX Only if parent != init?? */
1083
1084 mutex_enter(&parent->p_smutex);
1085 estcpu = decay_cpu_batch(loadfac, child->p_estcpu_inherited,
1086 schedcpu_ticks - child->p_forktime);
1087 if (child->p_estcpu > estcpu)
1088 parent->p_estcpu =
1089 ESTCPULIM(parent->p_estcpu + child->p_estcpu - estcpu);
1090 mutex_exit(&parent->p_smutex);
1091 }
1092
1093 /*
1094 * sched_kpri:
1095 *
1096 * Given an LWP a priority boost before it sleeps. Currently we scale
1097 * user priorites into the range 60 -> 40, and kernel priorities into
1098 * 40 -> 0.
1099 */
1100 int
1101 sched_kpri(struct lwp *l)
1102 {
1103 static const uint8_t kpri_tab[] = {
1104 0, 0, 1, 2, 3, 4, 4, 5,
1105 6, 7, 8, 8, 9, 10, 11, 12,
1106 12, 13, 14, 15, 16, 16, 17, 18,
1107 19, 20, 20, 21, 22, 23, 24, 24,
1108 25, 26, 27, 28, 28, 29, 30, 31,
1109 32, 32, 33, 34, 35, 36, 36, 37,
1110 38, 39, 40, 40, 40, 40, 41, 41,
1111 41, 41, 42, 42, 42, 42, 43, 43,
1112 43, 43, 44, 44, 44, 44, 45, 45,
1113 45, 45, 46, 46, 46, 47, 47, 47,
1114 47, 48, 48, 48, 48, 49, 49, 49,
1115 49, 50, 50, 50, 50, 51, 51, 51,
1116 51, 52, 52, 52, 52, 53, 53, 53,
1117 54, 54, 54, 54, 55, 55, 55, 55,
1118 56, 56, 56, 56, 57, 57, 57, 57,
1119 58, 58, 58, 58, 59, 59, 59, 60,
1120 };
1121
1122 return kpri_tab[l->l_priority];
1123 }
1124
1125 /*
1126 * sched_unsleep:
1127 *
1128 * The is called when the LWP has not been awoken normally but instead
1129 * interrupted: for example, if the sleep timed out. Because of this,
1130 * it's not a valid action for running or idle LWPs.
1131 */
1132 void
1133 sched_unsleep(struct lwp *l)
1134 {
1135
1136 lwp_unlock(l);
1137 panic("sched_unsleep");
1138 }
1139
1140 /*
1141 * sched_changepri:
1142 *
1143 * Adjust the priority of an LWP.
1144 */
1145 void
1146 sched_changepri(struct lwp *l, int pri)
1147 {
1148
1149 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
1150
1151 if (l->l_stat != LSRUN || (l->l_flag & L_INMEM) == 0 ||
1152 (l->l_priority / PPQ) == (l->l_usrpri / PPQ)) {
1153 l->l_priority = pri;
1154 return;
1155 }
1156
1157 remrunqueue(l);
1158 l->l_priority = pri;
1159 setrunqueue(l);
1160 resched_lwp(l, pri);
1161 }
1162
1163 /*
1164 * Low-level routines to access the run queue. Optimised assembler
1165 * routines can override these.
1166 */
1167
1168 #ifndef __HAVE_MD_RUNQUEUE
1169
1170 /*
1171 * On some architectures, it's faster to use a MSB ordering for the priorites
1172 * than the traditional LSB ordering.
1173 */
1174 #ifdef __HAVE_BIGENDIAN_BITOPS
1175 #define RQMASK(n) (0x80000000 >> (n))
1176 #else
1177 #define RQMASK(n) (0x00000001 << (n))
1178 #endif
1179
1180 /*
1181 * The primitives that manipulate the run queues. whichqs tells which
1182 * of the 32 queues qs have processes in them. Setrunqueue puts processes
1183 * into queues, remrunqueue removes them from queues. The running process is
1184 * on no queue, other processes are on a queue related to p->p_priority,
1185 * divided by 4 actually to shrink the 0-127 range of priorities into the 32
1186 * available queues.
1187 */
1188 #ifdef RQDEBUG
1189 static void
1190 checkrunqueue(int whichq, struct lwp *l)
1191 {
1192 const struct prochd * const rq = &sched_qs[whichq];
1193 struct lwp *l2;
1194 int found = 0;
1195 int die = 0;
1196 int empty = 1;
1197 for (l2 = rq->ph_link; l2 != (const void*) rq; l2 = l2->l_forw) {
1198 if (l2->l_stat != LSRUN) {
1199 printf("checkrunqueue[%d]: lwp %p state (%d) "
1200 " != LSRUN\n", whichq, l2, l2->l_stat);
1201 }
1202 if (l2->l_back->l_forw != l2) {
1203 printf("checkrunqueue[%d]: lwp %p back-qptr (%p) "
1204 "corrupt %p\n", whichq, l2, l2->l_back,
1205 l2->l_back->l_forw);
1206 die = 1;
1207 }
1208 if (l2->l_forw->l_back != l2) {
1209 printf("checkrunqueue[%d]: lwp %p forw-qptr (%p) "
1210 "corrupt %p\n", whichq, l2, l2->l_forw,
1211 l2->l_forw->l_back);
1212 die = 1;
1213 }
1214 if (l2 == l)
1215 found = 1;
1216 empty = 0;
1217 }
1218 if (empty && (sched_whichqs & RQMASK(whichq)) != 0) {
1219 printf("checkrunqueue[%d]: bit set for empty run-queue %p\n",
1220 whichq, rq);
1221 die = 1;
1222 } else if (!empty && (sched_whichqs & RQMASK(whichq)) == 0) {
1223 printf("checkrunqueue[%d]: bit clear for non-empty "
1224 "run-queue %p\n", whichq, rq);
1225 die = 1;
1226 }
1227 if (l != NULL && (sched_whichqs & RQMASK(whichq)) == 0) {
1228 printf("checkrunqueue[%d]: bit clear for active lwp %p\n",
1229 whichq, l);
1230 die = 1;
1231 }
1232 if (l != NULL && empty) {
1233 printf("checkrunqueue[%d]: empty run-queue %p with "
1234 "active lwp %p\n", whichq, rq, l);
1235 die = 1;
1236 }
1237 if (l != NULL && !found) {
1238 printf("checkrunqueue[%d]: lwp %p not in runqueue %p!",
1239 whichq, l, rq);
1240 die = 1;
1241 }
1242 if (die)
1243 panic("checkrunqueue: inconsistency found");
1244 }
1245 #endif /* RQDEBUG */
1246
1247 void
1248 setrunqueue(struct lwp *l)
1249 {
1250 struct prochd *rq;
1251 struct lwp *prev;
1252 const int whichq = l->l_priority / PPQ;
1253
1254 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
1255
1256 #ifdef RQDEBUG
1257 checkrunqueue(whichq, NULL);
1258 #endif
1259 #ifdef DIAGNOSTIC
1260 if (l->l_back != NULL || l->l_stat != LSRUN)
1261 panic("setrunqueue");
1262 #endif
1263 sched_whichqs |= RQMASK(whichq);
1264 rq = &sched_qs[whichq];
1265 prev = rq->ph_rlink;
1266 l->l_forw = (struct lwp *)rq;
1267 rq->ph_rlink = l;
1268 prev->l_forw = l;
1269 l->l_back = prev;
1270 #ifdef RQDEBUG
1271 checkrunqueue(whichq, l);
1272 #endif
1273 }
1274
1275 void
1276 remrunqueue(struct lwp *l)
1277 {
1278 struct lwp *prev, *next;
1279 const int whichq = l->l_priority / PPQ;
1280
1281 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
1282
1283 #ifdef RQDEBUG
1284 checkrunqueue(whichq, l);
1285 #endif
1286
1287 #if defined(DIAGNOSTIC)
1288 if (((sched_whichqs & RQMASK(whichq)) == 0) || l->l_back == NULL) {
1289 /* Shouldn't happen - interrupts disabled. */
1290 panic("remrunqueue: bit %d not set", whichq);
1291 }
1292 #endif
1293 prev = l->l_back;
1294 l->l_back = NULL;
1295 next = l->l_forw;
1296 prev->l_forw = next;
1297 next->l_back = prev;
1298 if (prev == next)
1299 sched_whichqs &= ~RQMASK(whichq);
1300 #ifdef RQDEBUG
1301 checkrunqueue(whichq, NULL);
1302 #endif
1303 }
1304
1305 #undef RQMASK
1306 #endif /* !defined(__HAVE_MD_RUNQUEUE) */
1307