kern_synch.c revision 1.166.2.6 1 /* $NetBSD: kern_synch.c,v 1.166.2.6 2006/11/18 21:39:22 ad Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2004, 2006 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*-
41 * Copyright (c) 1982, 1986, 1990, 1991, 1993
42 * The Regents of the University of California. All rights reserved.
43 * (c) UNIX System Laboratories, Inc.
44 * All or some portions of this file are derived from material licensed
45 * to the University of California by American Telephone and Telegraph
46 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
47 * the permission of UNIX System Laboratories, Inc.
48 *
49 * Redistribution and use in source and binary forms, with or without
50 * modification, are permitted provided that the following conditions
51 * are met:
52 * 1. Redistributions of source code must retain the above copyright
53 * notice, this list of conditions and the following disclaimer.
54 * 2. Redistributions in binary form must reproduce the above copyright
55 * notice, this list of conditions and the following disclaimer in the
56 * documentation and/or other materials provided with the distribution.
57 * 3. Neither the name of the University nor the names of its contributors
58 * may be used to endorse or promote products derived from this software
59 * without specific prior written permission.
60 *
61 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
62 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
63 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
64 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
65 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
66 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
67 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
68 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
69 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
70 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
71 * SUCH DAMAGE.
72 *
73 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
74 */
75
76 #include <sys/cdefs.h>
77 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.166.2.6 2006/11/18 21:39:22 ad Exp $");
78
79 #include "opt_ddb.h"
80 #include "opt_kstack.h"
81 #include "opt_lockdebug.h"
82 #include "opt_multiprocessor.h"
83 #include "opt_perfctrs.h"
84
85 #define __MUTEX_PRIVATE
86
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/callout.h>
90 #include <sys/proc.h>
91 #include <sys/kernel.h>
92 #include <sys/buf.h>
93 #if defined(PERFCTRS)
94 #include <sys/pmc.h>
95 #endif
96 #include <sys/signalvar.h>
97 #include <sys/resourcevar.h>
98 #include <sys/sched.h>
99 #include <sys/sa.h>
100 #include <sys/savar.h>
101 #include <sys/kauth.h>
102 #include <sys/sleepq.h>
103 #include <sys/lockdebug.h>
104
105 #include <uvm/uvm_extern.h>
106
107 #include <machine/cpu.h>
108
109 int lbolt; /* once a second sleep address */
110 int rrticks; /* number of hardclock ticks per roundrobin() */
111
112 /*
113 * The global scheduler state.
114 */
115 kmutex_t sched_mutex; /* global sched state mutex */
116 struct prochd sched_qs[RUNQUE_NQS]; /* run queues */
117 volatile uint32_t sched_whichqs; /* bitmap of non-empty queues */
118
119 void schedcpu(void *);
120 void updatepri(struct lwp *);
121 void sa_awaken(struct lwp *);
122
123 void sched_unsleep(struct lwp *);
124 void sched_changepri(struct lwp *, int);
125
126 struct callout schedcpu_ch = CALLOUT_INITIALIZER_SETFUNC(schedcpu, NULL);
127 static unsigned int schedcpu_ticks;
128
129 syncobj_t sleep_syncobj = {
130 SOBJ_SLEEPQ_SORTED,
131 sleepq_unsleep,
132 sleepq_changepri
133 };
134
135 syncobj_t sched_syncobj = {
136 SOBJ_SLEEPQ_SORTED,
137 sched_unsleep,
138 sched_changepri
139 };
140
141 /*
142 * Force switch among equal priority processes every 100ms.
143 * Called from hardclock every hz/10 == rrticks hardclock ticks.
144 */
145 /* ARGSUSED */
146 void
147 roundrobin(struct cpu_info *ci)
148 {
149 struct schedstate_percpu *spc = &ci->ci_schedstate;
150
151 spc->spc_rrticks = rrticks;
152
153 if (curlwp != NULL) {
154 if (spc->spc_flags & SPCF_SEENRR) {
155 /*
156 * The process has already been through a roundrobin
157 * without switching and may be hogging the CPU.
158 * Indicate that the process should yield.
159 */
160 spc->spc_flags |= SPCF_SHOULDYIELD;
161 } else
162 spc->spc_flags |= SPCF_SEENRR;
163 }
164 cpu_need_resched(curcpu());
165 }
166
167 #define PPQ (128 / RUNQUE_NQS) /* priorities per queue */
168 #define NICE_WEIGHT 2 /* priorities per nice level */
169
170 #define ESTCPU_SHIFT 11
171 #define ESTCPU_MAX ((NICE_WEIGHT * PRIO_MAX - PPQ) << ESTCPU_SHIFT)
172 #define ESTCPULIM(e) min((e), ESTCPU_MAX)
173
174 /*
175 * Constants for digital decay and forget:
176 * 90% of (p_estcpu) usage in 5 * loadav time
177 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive)
178 * Note that, as ps(1) mentions, this can let percentages
179 * total over 100% (I've seen 137.9% for 3 processes).
180 *
181 * Note that hardclock updates p_estcpu and p_cpticks independently.
182 *
183 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
184 * That is, the system wants to compute a value of decay such
185 * that the following for loop:
186 * for (i = 0; i < (5 * loadavg); i++)
187 * p_estcpu *= decay;
188 * will compute
189 * p_estcpu *= 0.1;
190 * for all values of loadavg:
191 *
192 * Mathematically this loop can be expressed by saying:
193 * decay ** (5 * loadavg) ~= .1
194 *
195 * The system computes decay as:
196 * decay = (2 * loadavg) / (2 * loadavg + 1)
197 *
198 * We wish to prove that the system's computation of decay
199 * will always fulfill the equation:
200 * decay ** (5 * loadavg) ~= .1
201 *
202 * If we compute b as:
203 * b = 2 * loadavg
204 * then
205 * decay = b / (b + 1)
206 *
207 * We now need to prove two things:
208 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
209 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
210 *
211 * Facts:
212 * For x close to zero, exp(x) =~ 1 + x, since
213 * exp(x) = 0! + x**1/1! + x**2/2! + ... .
214 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
215 * For x close to zero, ln(1+x) =~ x, since
216 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1
217 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
218 * ln(.1) =~ -2.30
219 *
220 * Proof of (1):
221 * Solve (factor)**(power) =~ .1 given power (5*loadav):
222 * solving for factor,
223 * ln(factor) =~ (-2.30/5*loadav), or
224 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
225 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED
226 *
227 * Proof of (2):
228 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
229 * solving for power,
230 * power*ln(b/(b+1)) =~ -2.30, or
231 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED
232 *
233 * Actual power values for the implemented algorithm are as follows:
234 * loadav: 1 2 3 4
235 * power: 5.68 10.32 14.94 19.55
236 */
237
238 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
239 #define loadfactor(loadav) (2 * (loadav))
240
241 static fixpt_t
242 decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
243 {
244
245 if (estcpu == 0) {
246 return 0;
247 }
248
249 #if !defined(_LP64)
250 /* avoid 64bit arithmetics. */
251 #define FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
252 if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
253 return estcpu * loadfac / (loadfac + FSCALE);
254 }
255 #endif /* !defined(_LP64) */
256
257 return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
258 }
259
260 /*
261 * For all load averages >= 1 and max p_estcpu of (255 << ESTCPU_SHIFT),
262 * sleeping for at least seven times the loadfactor will decay p_estcpu to
263 * less than (1 << ESTCPU_SHIFT).
264 *
265 * note that our ESTCPU_MAX is actually much smaller than (255 << ESTCPU_SHIFT).
266 */
267 static fixpt_t
268 decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n)
269 {
270
271 if ((n << FSHIFT) >= 7 * loadfac) {
272 return 0;
273 }
274
275 while (estcpu != 0 && n > 1) {
276 estcpu = decay_cpu(loadfac, estcpu);
277 n--;
278 }
279
280 return estcpu;
281 }
282
283 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
284 fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
285
286 /*
287 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
288 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
289 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
290 *
291 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
292 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
293 *
294 * If you dont want to bother with the faster/more-accurate formula, you
295 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
296 * (more general) method of calculating the %age of CPU used by a process.
297 */
298 #define CCPU_SHIFT 11
299
300 /*
301 * Recompute process priorities, every hz ticks.
302 */
303 /* ARGSUSED */
304 void
305 schedcpu(void *arg)
306 {
307 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
308 struct rlimit *rlim;
309 struct lwp *l;
310 struct proc *p;
311 int s, minslp;
312 int clkhz;
313 long runtm;
314
315 schedcpu_ticks++;
316
317 mutex_enter(&proclist_mutex);
318 PROCLIST_FOREACH(p, &allproc) {
319 /*
320 * Increment time in/out of memory and sleep time (if
321 * sleeping). We ignore overflow; with 16-bit int's
322 * (remember them?) overflow takes 45 days.
323 *
324 * XXXSMP Should create an activeproc list so that we
325 * don't touch every proc+LWP in the system on a regular
326 * basis. l->l_swtime/l->l_slptime can become deltas.
327 */
328 minslp = 2;
329 runtm = 0;
330 mutex_enter(&p->p_smutex);
331 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
332 lwp_lock(l);
333 runtm += l->l_rtime.tv_sec;
334 l->l_swtime++;
335 if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
336 l->l_stat == LSSUSPENDED) {
337 l->l_slptime++;
338 minslp = min(minslp, l->l_slptime);
339 } else
340 minslp = 0;
341 lwp_unlock(l);
342 }
343 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
344
345 /*
346 * Check if the process exceeds its CPU resource allocation.
347 * If over max, kill it. In any case, if it has run for more
348 * than autonicetime, reduce priority to give others a chance.
349 */
350 rlim = &p->p_rlimit[RLIMIT_CPU];
351 if (runtm >= rlim->rlim_cur) {
352 if (runtm >= rlim->rlim_max)
353 psignal(p, SIGKILL);
354 else {
355 psignal(p, SIGXCPU);
356 if (rlim->rlim_cur < rlim->rlim_max)
357 rlim->rlim_cur += 5;
358 }
359 }
360 if (autonicetime && runtm > autonicetime && p->p_nice == NZERO
361 && kauth_cred_geteuid(p->p_cred)) {
362 p->p_nice = autoniceval + NZERO;
363 resetprocpriority(p);
364 }
365
366 /*
367 * If the process has slept the entire second,
368 * stop recalculating its priority until it wakes up.
369 */
370 if (minslp > 1) {
371 mutex_exit(&p->p_smutex);
372 continue;
373 }
374 s = splstatclock(); /* XXXSMP prevent state changes */
375 /*
376 * p_pctcpu is only for ps.
377 */
378 clkhz = stathz != 0 ? stathz : hz;
379 #if (FSHIFT >= CCPU_SHIFT)
380 p->p_pctcpu += (clkhz == 100)?
381 ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
382 100 * (((fixpt_t) p->p_cpticks)
383 << (FSHIFT - CCPU_SHIFT)) / clkhz;
384 #else
385 p->p_pctcpu += ((FSCALE - ccpu) *
386 (p->p_cpticks * FSCALE / clkhz)) >> FSHIFT;
387 #endif
388 p->p_cpticks = 0;
389 p->p_estcpu = decay_cpu(loadfac, p->p_estcpu);
390 splx(s); /* Done with the process CPU ticks update */
391 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
392 lwp_lock(l);
393 if (l->l_slptime > 1) {
394 lwp_unlock(l);
395 continue;
396 }
397 resetpriority(l);
398 if (l->l_priority >= PUSER) {
399 if (l->l_stat == LSRUN &&
400 (l->l_flag & L_INMEM) &&
401 (l->l_priority / PPQ) != (l->l_usrpri / PPQ)) {
402 lwp_changepri(l, l->l_usrpri);
403 } else
404 l->l_priority = l->l_usrpri;
405 }
406 lwp_unlock(l);
407 }
408 mutex_exit(&p->p_smutex);
409 }
410 mutex_exit(&proclist_mutex);
411 uvm_meter();
412 wakeup((caddr_t)&lbolt);
413 callout_schedule(&schedcpu_ch, hz);
414 }
415
416 /*
417 * Recalculate the priority of a process after it has slept for a while.
418 */
419 void
420 updatepri(struct lwp *l)
421 {
422 struct proc *p = l->l_proc;
423 fixpt_t loadfac;
424
425 LOCK_ASSERT(lwp_locked(l, NULL));
426 KASSERT(l->l_slptime > 1);
427
428 loadfac = loadfactor(averunnable.ldavg[0]);
429
430 l->l_slptime--; /* the first time was done in schedcpu */
431 /* XXX NJWLWP */
432 /* XXXSMP occasionaly unlocked. */
433 p->p_estcpu = decay_cpu_batch(loadfac, p->p_estcpu, l->l_slptime);
434 resetpriority(l);
435 }
436
437 /*
438 * During autoconfiguration or after a panic, a sleep will simply lower the
439 * priority briefly to allow interrupts, then return. The priority to be
440 * used (safepri) is machine-dependent, thus this value is initialized and
441 * maintained in the machine-dependent layers. This priority will typically
442 * be 0, or the lowest priority that is safe for use on the interrupt stack;
443 * it can be made higher to block network software interrupts after panics.
444 */
445 int safepri;
446
447 /*
448 * ltsleep: see mtsleep() for comments.
449 */
450 int
451 ltsleep(wchan_t ident, int priority, const char *wmesg, int timo,
452 volatile struct simplelock *interlock)
453 {
454 struct lwp *l = curlwp;
455 sleepq_t *sq;
456 int error;
457
458 if (sleepq_dontsleep(l)) {
459 (void)sleepq_abort(NULL, 0);
460 if ((priority & PNORELOCK) != 0)
461 simple_unlock(interlock);
462 return 0;
463 }
464
465 sq = sleeptab_lookup(&sleeptab, ident);
466
467 sleepq_enter(sq, priority & PRIMASK, ident, wmesg, timo,
468 priority & PCATCH, &sleep_syncobj);
469
470 if (interlock != NULL) {
471 LOCK_ASSERT(simple_lock_held(interlock));
472 simple_unlock(interlock);
473 }
474
475 error = sleepq_block(sq, timo);
476 sleepq_unblock();
477
478 if (interlock != NULL && (priority & PNORELOCK) == 0)
479 simple_lock(interlock);
480
481 return error;
482 }
483
484 /*
485 * General sleep call. Suspends the current process until a wakeup is
486 * performed on the specified identifier. The process will then be made
487 * runnable with the specified priority. Sleeps at most timo/hz seconds (0
488 * means no timeout). If pri includes PCATCH flag, signals are checked
489 * before and after sleeping, else signals are not checked. Returns 0 if
490 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
491 * signal needs to be delivered, ERESTART is returned if the current system
492 * call should be restarted if possible, and EINTR is returned if the system
493 * call should be interrupted by the signal (return EINTR).
494 *
495 * The interlock is held until we are on a sleep queue. The interlock will
496 * be locked before returning back to the caller unless the PNORELOCK flag
497 * is specified, in which case the interlock will always be unlocked upon
498 * return.
499 */
500 int
501 mtsleep(wchan_t ident, int priority, const char *wmesg, int timo,
502 kmutex_t *mtx)
503 {
504 struct lwp *l = curlwp;
505 sleepq_t *sq;
506 int error;
507
508 if (sleepq_dontsleep(l))
509 return sleepq_abort(mtx, priority & PNORELOCK);
510
511 sq = sleeptab_lookup(&sleeptab, ident);
512
513 sleepq_enter(sq, priority & PRIMASK, ident, wmesg, timo,
514 priority & PCATCH, &sleep_syncobj);
515
516 if (mtx != NULL) {
517 LOCK_ASSERT(mutex_owned(mtx));
518 mutex_exit(mtx);
519 }
520
521 error = sleepq_block(sq, timo);
522 sleepq_unblock();
523
524 if (mtx != NULL && (priority & PNORELOCK) == 0)
525 mutex_enter(mtx);
526
527 return error;
528 }
529
530 void
531 sa_awaken(struct lwp *l)
532 {
533
534 LOCK_ASSERT(lwp_locked(l, NULL));
535
536 if (l == l->l_savp->savp_lwp && l->l_flag & L_SA_YIELD)
537 l->l_flag &= ~L_SA_IDLE;
538 }
539
540 /*
541 * Make all processes sleeping on the specified identifier runnable.
542 */
543 void
544 wakeup(wchan_t ident)
545 {
546 sleepq_t *sq;
547
548 if (cold)
549 return;
550
551 sq = sleeptab_lookup(&sleeptab, ident);
552 sleepq_wake(sq, ident, (u_int)-1);
553 }
554
555 /*
556 * Make the highest priority process first in line on the specified
557 * identifier runnable.
558 */
559 void
560 wakeup_one(wchan_t ident)
561 {
562 sleepq_t *sq;
563
564 if (cold)
565 return;
566
567 sq = sleeptab_lookup(&sleeptab, ident);
568 sleepq_wake(sq, ident, 1);
569 }
570
571
572 /*
573 * General yield call. Puts the current process back on its run queue and
574 * performs a voluntary context switch. Should only be called when the
575 * current process explicitly requests it (eg sched_yield(2) in compat code).
576 */
577 void
578 yield(void)
579 {
580 struct lwp *l = curlwp;
581
582 lwp_lock(l);
583 if (l->l_stat == LSONPROC) {
584 KASSERT(lwp_locked(l, &sched_mutex));
585 l->l_priority = l->l_usrpri;
586 }
587 l->l_nvcsw++;
588 mi_switch(l, NULL);
589 }
590
591 /*
592 * General preemption call. Puts the current process back on its run queue
593 * and performs an involuntary context switch.
594 * The 'more' ("more work to do") argument is boolean. Returning to userspace
595 * preempt() calls pass 0. "Voluntary" preemptions in e.g. uiomove() pass 1.
596 * This will be used to indicate to the SA subsystem that the LWP is
597 * not yet finished in the kernel.
598 */
599 void
600 preempt(int more)
601 {
602 struct lwp *l = curlwp;
603 int r;
604
605 lwp_lock(l);
606 if (l->l_stat == LSONPROC) {
607 KASSERT(lwp_locked(l, &sched_mutex));
608 l->l_priority = l->l_usrpri;
609 }
610 l->l_nivcsw++;
611 r = mi_switch(l, NULL);
612 if ((l->l_flag & L_SA) != 0 && r != 0 && more == 0)
613 sa_preempt(l);
614 }
615
616 /*
617 * The machine independent parts of context switch. Switch to "new"
618 * if non-NULL, otherwise let cpu_switch choose the next lwp.
619 *
620 * Returns 1 if another process was actually run.
621 */
622 int
623 mi_switch(struct lwp *l, struct lwp *newl)
624 {
625 struct schedstate_percpu *spc;
626 struct timeval tv;
627 int hold_count;
628 int retval, oldspl;
629 long s, u;
630 #if PERFCTRS
631 struct proc *p = l->l_proc;
632 #endif
633
634 LOCK_ASSERT(lwp_locked(l, NULL));
635
636 /*
637 * Release the kernel_lock, as we are about to yield the CPU.
638 */
639 hold_count = KERNEL_UNLOCK(0, l);
640
641 #ifdef LOCKDEBUG
642 spinlock_switchcheck();
643 simple_lock_switchcheck();
644 #endif
645 #ifdef KSTACK_CHECK_MAGIC
646 kstack_check_magic(l);
647 #endif
648
649 /*
650 * It's safe to read the per CPU schedstate unlocked here, as all we
651 * are after is the run time and that's guarenteed to have been last
652 * updated by this CPU.
653 */
654 KDASSERT(l->l_cpu != NULL);
655 KDASSERT(l->l_cpu == curcpu());
656 spc = &l->l_cpu->ci_schedstate;
657
658 /*
659 * Compute the amount of time during which the current
660 * process was running.
661 */
662 microtime(&tv);
663 u = l->l_rtime.tv_usec +
664 (tv.tv_usec - spc->spc_runtime.tv_usec);
665 s = l->l_rtime.tv_sec + (tv.tv_sec - spc->spc_runtime.tv_sec);
666 if (u < 0) {
667 u += 1000000;
668 s--;
669 } else if (u >= 1000000) {
670 u -= 1000000;
671 s++;
672 }
673 l->l_rtime.tv_usec = u;
674 l->l_rtime.tv_sec = s;
675
676 /*
677 * XXXSMP If we are using h/w performance counters, save context.
678 */
679 #if PERFCTRS
680 if (PMC_ENABLED(p)) {
681 pmc_save_context(p);
682 }
683 #endif
684
685 /*
686 * Acquire the sched_mutex if necessary. It will be released by
687 * cpu_switch once it has decided to idle, or picked another LWP
688 * to run.
689 */
690 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
691 if (l->l_mutex != &sched_mutex) {
692 mutex_enter(&sched_mutex);
693 lwp_unlock(l);
694 }
695 #endif
696
697 /*
698 * If on the CPU and we have gotten this far, then we must yield.
699 */
700 KASSERT(l->l_stat != LSRUN);
701 if (l->l_stat == LSONPROC) {
702 KASSERT(lwp_locked(l, &sched_mutex));
703 l->l_stat = LSRUN;
704 setrunqueue(l);
705 }
706 uvmexp.swtch++;
707
708 /*
709 * Process is about to yield the CPU; clear the appropriate
710 * scheduling flags.
711 */
712 spc->spc_flags &= ~SPCF_SWITCHCLEAR;
713
714 LOCKDEBUG_BARRIER(&sched_mutex, 1);
715
716 /*
717 * Switch to the new current LWP. When we run again, we'll
718 * return back here.
719 */
720 oldspl = MUTEX_SPIN_OLDSPL(l->l_cpu);
721
722 if (newl == NULL || newl->l_back == NULL)
723 retval = cpu_switch(l, NULL);
724 else {
725 KASSERT(lwp_locked(newl, &sched_mutex));
726 remrunqueue(newl);
727 cpu_switchto(l, newl);
728 retval = 0;
729 }
730
731 /*
732 * XXXSMP If we are using h/w performance counters, restore context.
733 */
734 #if PERFCTRS
735 if (PMC_ENABLED(p)) {
736 pmc_restore_context(p);
737 }
738 #endif
739
740 /*
741 * We're running again; record our new start time. We might
742 * be running on a new CPU now, so don't use the cached
743 * schedstate_percpu pointer.
744 */
745 KDASSERT(l->l_cpu != NULL);
746 KDASSERT(l->l_cpu == curcpu());
747 microtime(&l->l_cpu->ci_schedstate.spc_runtime);
748
749 /*
750 * Reacquire the kernel_lock.
751 */
752 splx(oldspl);
753 KERNEL_LOCK(hold_count, l);
754
755 return retval;
756 }
757
758 /*
759 * Initialize the (doubly-linked) run queues
760 * to be empty.
761 */
762 void
763 rqinit()
764 {
765 int i;
766
767 for (i = 0; i < RUNQUE_NQS; i++)
768 sched_qs[i].ph_link = sched_qs[i].ph_rlink =
769 (struct lwp *)&sched_qs[i];
770
771 mutex_init(&sched_mutex, MUTEX_SPIN, IPL_SCHED);
772 }
773
774 static inline void
775 resched_lwp(struct lwp *l, u_char pri)
776 {
777 struct cpu_info *ci;
778
779 LOCK_ASSERT(lwp_locked(l, NULL));
780
781 /*
782 * XXXSMP
783 * Since l->l_cpu persists across a context switch,
784 * this gives us *very weak* processor affinity, in
785 * that we notify the CPU on which the process last
786 * ran that it should try to switch.
787 *
788 * This does not guarantee that the process will run on
789 * that processor next, because another processor might
790 * grab it the next time it performs a context switch.
791 *
792 * This also does not handle the case where its last
793 * CPU is running a higher-priority process, but every
794 * other CPU is running a lower-priority process. There
795 * are ways to handle this situation, but they're not
796 * currently very pretty, and we also need to weigh the
797 * cost of moving a process from one CPU to another.
798 */
799 ci = (l->l_cpu != NULL) ? l->l_cpu : curcpu();
800 if (pri < ci->ci_schedstate.spc_curpriority)
801 cpu_need_resched(ci);
802 }
803
804 /*
805 * Change process state to be runnable, placing it on the run queue if it is
806 * in memory, and awakening the swapper if it isn't in memory.
807 *
808 * Call with the process and LWP locked. Will return with the LWP unlocked.
809 */
810 void
811 setrunnable(struct lwp *l)
812 {
813 struct proc *p = l->l_proc;
814 struct cpu_info *ci;
815
816 LOCK_ASSERT(mutex_owned(&p->p_smutex));
817 LOCK_ASSERT(lwp_locked(l, NULL));
818
819 switch (l->l_stat) {
820 case LSSTOP:
821 /*
822 * If we're being traced (possibly because someone attached us
823 * while we were stopped), check for a signal from the debugger.
824 */
825 if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0) {
826 sigaddset(&l->l_sigpend->sp_set, p->p_xstat);
827 signotify(l);
828 }
829 p->p_nrlwps++;
830 break;
831 case LSSUSPENDED:
832 l->l_flag &= ~L_WSUSPEND;
833 p->p_nrlwps++;
834 break;
835 case LSSLEEP:
836 /*
837 * If the LWP was sleeping interruptably, then it's OK to
838 * start it again. If not, mark it as still sleeping.
839 */
840 KASSERT(l->l_wchan != NULL);
841
842 if ((l->l_flag & L_SINTR) != 0) {
843 /* lwp_unsleep() will release the lock. */
844 lwp_unsleep(l);
845 } else {
846 lwp_unlock(l);
847 #ifdef DIAGNOSTIC
848 panic("setrunnable: !L_SINTR");
849 #endif
850 }
851 return;
852 default:
853 panic("setrunnable: lwp %p state was %d", l, l->l_stat);
854 }
855
856 if (l->l_proc->p_sa)
857 sa_awaken(l);
858
859 /*
860 * Set in sched_mutex as it the LWP's current mutex. If the LWP is
861 * still on the CPU, mark it as LSONPROC. It may be about to call
862 * mi_switch(), in which case it will yield.
863 */
864 lwp_relock(l, &sched_mutex);
865
866 if ((ci = l->l_cpu) != NULL && ci->ci_curlwp == l) {
867 l->l_stat = LSONPROC;
868 l->l_slptime = 0;
869 lwp_unlock(l);
870 return;
871 }
872
873 /*
874 * Set the LWP runnable. If it's swapped out, we need to wake the swapper
875 * to bring it back in. Otherwise, enter it into a run queue.
876 */
877 l->l_stat = LSRUN;
878 if (l->l_slptime > 1)
879 updatepri(l);
880 l->l_slptime = 0;
881
882 if (l->l_flag & L_INMEM) {
883 setrunqueue(l);
884 resched_lwp(l, l->l_priority);
885 lwp_unlock(l);
886 } else {
887 lwp_unlock(l);
888 wakeup(&proc0);
889 }
890 }
891
892 /*
893 * Compute the priority of a process when running in user mode.
894 * Arrange to reschedule if the resulting priority is better
895 * than that of the current process.
896 */
897 void
898 resetpriority(struct lwp *l)
899 {
900 unsigned int newpriority;
901 struct proc *p = l->l_proc;
902
903 LOCK_ASSERT(lwp_locked(l, NULL));
904
905 /* XXXSMP proc values will be accessed unlocked */
906 newpriority = PUSER + (p->p_estcpu >> ESTCPU_SHIFT) +
907 NICE_WEIGHT * (p->p_nice - NZERO);
908 newpriority = min(newpriority, MAXPRI);
909 l->l_usrpri = newpriority;
910 resched_lwp(l, l->l_usrpri);
911 }
912
913 /*
914 * Recompute priority for all LWPs in a process.
915 */
916 void
917 resetprocpriority(struct proc *p)
918 {
919 struct lwp *l;
920
921 LOCK_ASSERT(mutex_owned(&p->p_smutex));
922
923 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
924 lwp_lock(l);
925 resetpriority(l);
926 lwp_unlock(l);
927 }
928 }
929
930 /*
931 * We adjust the priority of the current process. The priority of a process
932 * gets worse as it accumulates CPU time. The CPU usage estimator (p_estcpu)
933 * is increased here. The formula for computing priorities (in kern_synch.c)
934 * will compute a different value each time p_estcpu increases. This can
935 * cause a switch, but unless the priority crosses a PPQ boundary the actual
936 * queue will not change. The CPU usage estimator ramps up quite quickly
937 * when the process is running (linearly), and decays away exponentially, at
938 * a rate which is proportionally slower when the system is busy. The basic
939 * principle is that the system will 90% forget that the process used a lot
940 * of CPU time in 5 * loadav seconds. This causes the system to favor
941 * processes which haven't run much recently, and to round-robin among other
942 * processes.
943 */
944
945 void
946 schedclock(struct lwp *l)
947 {
948 struct proc *p = l->l_proc;
949
950 LOCK_ASSERT(mutex_owned(&p->p_smutex));
951
952 p->p_estcpu = ESTCPULIM(p->p_estcpu + (1 << ESTCPU_SHIFT));
953
954 lwp_lock(l);
955 resetpriority(l);
956 if (l->l_priority >= PUSER)
957 l->l_priority = l->l_usrpri;
958 lwp_unlock(l);
959 }
960
961 /*
962 * suspendsched:
963 *
964 * Convert all non-L_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
965 */
966 void
967 suspendsched(void)
968 {
969 CPU_INFO_ITERATOR cii;
970 struct cpu_info *ci;
971 struct lwp *l;
972 struct proc *p;
973
974 /*
975 * We do this by process in order not to violate the locking rules.
976 */
977 rw_enter(&proclist_lock, RW_READER);
978 PROCLIST_FOREACH(p, &allproc) {
979 mutex_enter(&p->p_smutex);
980
981 if ((p->p_flag & P_SYSTEM) != 0) {
982 mutex_exit(&p->p_smutex);
983 continue;
984 }
985
986 p->p_stat = SSTOP;
987
988 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
989 if (l == curlwp)
990 continue;
991
992 lwp_lock(l);
993
994 /*
995 * Set L_WREBOOT so that the LWP will suspend itself
996 * when it tries to return to user mode. We want to
997 * try and get to get as many LWPs as possible to
998 * the user / kernel boundary, so that they will
999 * release any locks that they hold.
1000 */
1001 l->l_flag |= (L_WREBOOT | L_WSUSPEND);
1002
1003 if (l->l_stat == LSSLEEP &&
1004 (l->l_flag & L_SINTR) != 0) {
1005 /* setrunnable() will release the lock. */
1006 setrunnable(l);
1007 continue;
1008 }
1009
1010 lwp_unlock(l);
1011 }
1012
1013 mutex_exit(&p->p_smutex);
1014 }
1015 rw_exit(&proclist_lock);
1016
1017 /*
1018 * Kick all CPUs to make them preempt any LWPs running in user mode.
1019 * They'll trap into the kernel and suspend themselves in userret().
1020 */
1021 sched_lock(0);
1022 for (CPU_INFO_FOREACH(cii, ci))
1023 cpu_need_resched(ci);
1024 sched_unlock(0);
1025 }
1026
1027 /*
1028 * scheduler_fork_hook:
1029 *
1030 * Inherit the parent's scheduler history.
1031 */
1032 void
1033 scheduler_fork_hook(struct proc *parent, struct proc *child)
1034 {
1035
1036 LOCK_ASSERT(mutex_owned(&parent->p_smutex));
1037
1038 child->p_estcpu = child->p_estcpu_inherited = parent->p_estcpu;
1039 child->p_forktime = schedcpu_ticks;
1040 }
1041
1042 /*
1043 * scheduler_wait_hook:
1044 *
1045 * Chargeback parents for the sins of their children.
1046 */
1047 void
1048 scheduler_wait_hook(struct proc *parent, struct proc *child)
1049 {
1050 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
1051 fixpt_t estcpu;
1052
1053 /* XXX Only if parent != init?? */
1054
1055 mutex_enter(&parent->p_smutex);
1056 estcpu = decay_cpu_batch(loadfac, child->p_estcpu_inherited,
1057 schedcpu_ticks - child->p_forktime);
1058 if (child->p_estcpu > estcpu)
1059 parent->p_estcpu =
1060 ESTCPULIM(parent->p_estcpu + child->p_estcpu - estcpu);
1061 mutex_exit(&parent->p_smutex);
1062 }
1063
1064 /*
1065 * sched_kpri:
1066 *
1067 * Given an LWP a priority boost before it sleeps. Currently we scale
1068 * user priorites into the range 60 -> 40, and kernel priorities into
1069 * 40 -> 0.
1070 */
1071 int
1072 sched_kpri(struct lwp *l)
1073 {
1074 static const uint8_t kpri_tab[] = {
1075 0, 0, 1, 2, 3, 4, 4, 5,
1076 6, 7, 8, 8, 9, 10, 11, 12,
1077 12, 13, 14, 15, 16, 16, 17, 18,
1078 19, 20, 20, 21, 22, 23, 24, 24,
1079 25, 26, 27, 28, 28, 29, 30, 31,
1080 32, 32, 33, 34, 35, 36, 36, 37,
1081 38, 39, 40, 40, 40, 40, 41, 41,
1082 41, 41, 42, 42, 42, 42, 43, 43,
1083 43, 43, 44, 44, 44, 44, 45, 45,
1084 45, 45, 46, 46, 46, 47, 47, 47,
1085 47, 48, 48, 48, 48, 49, 49, 49,
1086 49, 50, 50, 50, 50, 51, 51, 51,
1087 51, 52, 52, 52, 52, 53, 53, 53,
1088 54, 54, 54, 54, 55, 55, 55, 55,
1089 56, 56, 56, 56, 57, 57, 57, 57,
1090 58, 58, 58, 58, 59, 59, 59, 60,
1091 };
1092
1093 return kpri_tab[l->l_priority];
1094 }
1095
1096 /*
1097 * sched_unsleep:
1098 *
1099 * The is called when the LWP has not been awoken normally but instead
1100 * interrupted: for example, if the sleep timed out. Because of this,
1101 * it's not a valid action for running or idle LWPs.
1102 */
1103 void
1104 sched_unsleep(struct lwp *l)
1105 {
1106
1107 lwp_unlock(l);
1108 panic("sched_unsleep");
1109 }
1110
1111 /*
1112 * sched_changepri:
1113 *
1114 * Adjust the priority of an LWP.
1115 */
1116 void
1117 sched_changepri(struct lwp *l, int pri)
1118 {
1119 struct cpu_info *ci;
1120
1121 /*
1122 * XXXSMP
1123 * Since l->l_cpu persists across a context switch,
1124 * this gives us *very weak* processor affinity, in
1125 * that we notify the CPU on which the process last
1126 * ran that it should try to switch.
1127 *
1128 * This does not guarantee that the process will run on
1129 * that processor next, because another processor might
1130 * grab it the next time it performs a context switch.
1131 *
1132 * This also does not handle the case where its last
1133 * CPU is running a higher-priority process, but every
1134 * other CPU is running a lower-priority process. There
1135 * are ways to handle this situation, but they're not
1136 * currently very pretty, and we also need to weigh the
1137 * cost of moving a process from one CPU to another.
1138 */
1139 if (l->l_stat != LSRUN || (l->l_flag & L_INMEM) == 0) {
1140 l->l_priority = pri;
1141 return;
1142 }
1143
1144 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
1145
1146 remrunqueue(l);
1147 l->l_priority = pri;
1148 setrunqueue(l);
1149
1150 ci = (l->l_cpu != NULL) ? l->l_cpu : curcpu();
1151 if (pri < ci->ci_schedstate.spc_curpriority)
1152 cpu_need_resched(ci);
1153 }
1154
1155 /*
1156 * Low-level routines to access the run queue. Optimised assembler
1157 * routines can override these.
1158 */
1159
1160 #ifndef __HAVE_MD_RUNQUEUE
1161
1162 /*
1163 * On some architectures, it's faster to use a MSB ordering for the priorites
1164 * than the traditional LSB ordering.
1165 */
1166 #ifdef __HAVE_BIGENDIAN_BITOPS
1167 #define RQMASK(n) (0x80000000 >> (n))
1168 #else
1169 #define RQMASK(n) (0x00000001 << (n))
1170 #endif
1171
1172 /*
1173 * The primitives that manipulate the run queues. whichqs tells which
1174 * of the 32 queues qs have processes in them. Setrunqueue puts processes
1175 * into queues, remrunqueue removes them from queues. The running process is
1176 * on no queue, other processes are on a queue related to p->p_priority,
1177 * divided by 4 actually to shrink the 0-127 range of priorities into the 32
1178 * available queues.
1179 */
1180 #ifdef RQDEBUG
1181 static void
1182 checkrunqueue(int whichq, struct lwp *l)
1183 {
1184 const struct prochd * const rq = &sched_qs[whichq];
1185 struct lwp *l2;
1186 int found = 0;
1187 int die = 0;
1188 int empty = 1;
1189 for (l2 = rq->ph_link; l2 != (const void*) rq; l2 = l2->l_forw) {
1190 if (l2->l_stat != LSRUN) {
1191 printf("checkrunqueue[%d]: lwp %p state (%d) "
1192 " != LSRUN\n", whichq, l2, l2->l_stat);
1193 }
1194 if (l2->l_back->l_forw != l2) {
1195 printf("checkrunqueue[%d]: lwp %p back-qptr (%p) "
1196 "corrupt %p\n", whichq, l2, l2->l_back,
1197 l2->l_back->l_forw);
1198 die = 1;
1199 }
1200 if (l2->l_forw->l_back != l2) {
1201 printf("checkrunqueue[%d]: lwp %p forw-qptr (%p) "
1202 "corrupt %p\n", whichq, l2, l2->l_forw,
1203 l2->l_forw->l_back);
1204 die = 1;
1205 }
1206 if (l2 == l)
1207 found = 1;
1208 empty = 0;
1209 }
1210 if (empty && (sched_whichqs & RQMASK(whichq)) != 0) {
1211 printf("checkrunqueue[%d]: bit set for empty run-queue %p\n",
1212 whichq, rq);
1213 die = 1;
1214 } else if (!empty && (sched_whichqs & RQMASK(whichq)) == 0) {
1215 printf("checkrunqueue[%d]: bit clear for non-empty "
1216 "run-queue %p\n", whichq, rq);
1217 die = 1;
1218 }
1219 if (l != NULL && (sched_whichqs & RQMASK(whichq)) == 0) {
1220 printf("checkrunqueue[%d]: bit clear for active lwp %p\n",
1221 whichq, l);
1222 die = 1;
1223 }
1224 if (l != NULL && empty) {
1225 printf("checkrunqueue[%d]: empty run-queue %p with "
1226 "active lwp %p\n", whichq, rq, l);
1227 die = 1;
1228 }
1229 if (l != NULL && !found) {
1230 printf("checkrunqueue[%d]: lwp %p not in runqueue %p!",
1231 whichq, l, rq);
1232 die = 1;
1233 }
1234 if (die)
1235 panic("checkrunqueue: inconsistency found");
1236 }
1237 #endif /* RQDEBUG */
1238
1239 void
1240 setrunqueue(struct lwp *l)
1241 {
1242 struct prochd *rq;
1243 struct lwp *prev;
1244 const int whichq = l->l_priority / PPQ;
1245
1246 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
1247
1248 #ifdef RQDEBUG
1249 checkrunqueue(whichq, NULL);
1250 #endif
1251 #ifdef DIAGNOSTIC
1252 if (l->l_back != NULL || l->l_stat != LSRUN)
1253 panic("setrunqueue");
1254 #endif
1255 sched_whichqs |= RQMASK(whichq);
1256 rq = &sched_qs[whichq];
1257 prev = rq->ph_rlink;
1258 l->l_forw = (struct lwp *)rq;
1259 rq->ph_rlink = l;
1260 prev->l_forw = l;
1261 l->l_back = prev;
1262 #ifdef RQDEBUG
1263 checkrunqueue(whichq, l);
1264 #endif
1265 }
1266
1267 void
1268 remrunqueue(struct lwp *l)
1269 {
1270 struct lwp *prev, *next;
1271 const int whichq = l->l_priority / PPQ;
1272
1273 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
1274
1275 #ifdef RQDEBUG
1276 checkrunqueue(whichq, l);
1277 #endif
1278
1279 #if defined(DIAGNOSTIC)
1280 if (((sched_whichqs & RQMASK(whichq)) == 0) || l->l_back == NULL) {
1281 /* Shouldn't happen - interrupts disabled. */
1282 panic("remrunqueue: bit %d not set", whichq);
1283 }
1284 #endif
1285 prev = l->l_back;
1286 l->l_back = NULL;
1287 next = l->l_forw;
1288 prev->l_forw = next;
1289 next->l_back = prev;
1290 if (prev == next)
1291 sched_whichqs &= ~RQMASK(whichq);
1292 #ifdef RQDEBUG
1293 checkrunqueue(whichq, NULL);
1294 #endif
1295 }
1296
1297 #undef RQMASK
1298 #endif /* !defined(__HAVE_MD_RUNQUEUE) */
1299