kern_synch.c revision 1.166.2.16 1 /* $NetBSD: kern_synch.c,v 1.166.2.16 2007/01/28 07:20:39 ad Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*-
41 * Copyright (c) 1982, 1986, 1990, 1991, 1993
42 * The Regents of the University of California. All rights reserved.
43 * (c) UNIX System Laboratories, Inc.
44 * All or some portions of this file are derived from material licensed
45 * to the University of California by American Telephone and Telegraph
46 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
47 * the permission of UNIX System Laboratories, Inc.
48 *
49 * Redistribution and use in source and binary forms, with or without
50 * modification, are permitted provided that the following conditions
51 * are met:
52 * 1. Redistributions of source code must retain the above copyright
53 * notice, this list of conditions and the following disclaimer.
54 * 2. Redistributions in binary form must reproduce the above copyright
55 * notice, this list of conditions and the following disclaimer in the
56 * documentation and/or other materials provided with the distribution.
57 * 3. Neither the name of the University nor the names of its contributors
58 * may be used to endorse or promote products derived from this software
59 * without specific prior written permission.
60 *
61 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
62 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
63 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
64 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
65 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
66 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
67 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
68 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
69 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
70 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
71 * SUCH DAMAGE.
72 *
73 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
74 */
75
76 #include <sys/cdefs.h>
77 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.166.2.16 2007/01/28 07:20:39 ad Exp $");
78
79 #include "opt_ddb.h"
80 #include "opt_kstack.h"
81 #include "opt_lockdebug.h"
82 #include "opt_multiprocessor.h"
83 #include "opt_perfctrs.h"
84
85 #define __MUTEX_PRIVATE
86
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/callout.h>
90 #include <sys/proc.h>
91 #include <sys/kernel.h>
92 #include <sys/buf.h>
93 #if defined(PERFCTRS)
94 #include <sys/pmc.h>
95 #endif
96 #include <sys/signalvar.h>
97 #include <sys/resourcevar.h>
98 #include <sys/sched.h>
99 #include <sys/sa.h>
100 #include <sys/savar.h>
101 #include <sys/kauth.h>
102 #include <sys/sleepq.h>
103 #include <sys/lockdebug.h>
104
105 #include <uvm/uvm_extern.h>
106
107 #include <machine/cpu.h>
108
109 int lbolt; /* once a second sleep address */
110 int rrticks; /* number of hardclock ticks per roundrobin() */
111
112 /*
113 * The global scheduler state.
114 */
115 kmutex_t sched_mutex; /* global sched state mutex */
116 struct prochd sched_qs[RUNQUE_NQS]; /* run queues */
117 volatile uint32_t sched_whichqs; /* bitmap of non-empty queues */
118
119 void schedcpu(void *);
120 void updatepri(struct lwp *);
121 void sa_awaken(struct lwp *);
122
123 void sched_unsleep(struct lwp *);
124 void sched_changepri(struct lwp *, int);
125
126 struct callout schedcpu_ch = CALLOUT_INITIALIZER_SETFUNC(schedcpu, NULL);
127 static unsigned int schedcpu_ticks;
128
129 syncobj_t sleep_syncobj = {
130 SOBJ_SLEEPQ_SORTED,
131 sleepq_unsleep,
132 sleepq_changepri
133 };
134
135 syncobj_t sched_syncobj = {
136 SOBJ_SLEEPQ_SORTED,
137 sched_unsleep,
138 sched_changepri
139 };
140
141 /*
142 * Force switch among equal priority processes every 100ms.
143 * Called from hardclock every hz/10 == rrticks hardclock ticks.
144 */
145 /* ARGSUSED */
146 void
147 roundrobin(struct cpu_info *ci)
148 {
149 struct schedstate_percpu *spc = &ci->ci_schedstate;
150
151 spc->spc_rrticks = rrticks;
152
153 if (curlwp != NULL) {
154 if (spc->spc_flags & SPCF_SEENRR) {
155 /*
156 * The process has already been through a roundrobin
157 * without switching and may be hogging the CPU.
158 * Indicate that the process should yield.
159 */
160 spc->spc_flags |= SPCF_SHOULDYIELD;
161 } else
162 spc->spc_flags |= SPCF_SEENRR;
163 }
164 cpu_need_resched(curcpu());
165 }
166
167 #define PPQ (128 / RUNQUE_NQS) /* priorities per queue */
168 #define NICE_WEIGHT 2 /* priorities per nice level */
169
170 #define ESTCPU_SHIFT 11
171 #define ESTCPU_MAX ((NICE_WEIGHT * PRIO_MAX - PPQ) << ESTCPU_SHIFT)
172 #define ESTCPULIM(e) min((e), ESTCPU_MAX)
173
174 /*
175 * Constants for digital decay and forget:
176 * 90% of (p_estcpu) usage in 5 * loadav time
177 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive)
178 * Note that, as ps(1) mentions, this can let percentages
179 * total over 100% (I've seen 137.9% for 3 processes).
180 *
181 * Note that hardclock updates p_estcpu and p_cpticks independently.
182 *
183 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
184 * That is, the system wants to compute a value of decay such
185 * that the following for loop:
186 * for (i = 0; i < (5 * loadavg); i++)
187 * p_estcpu *= decay;
188 * will compute
189 * p_estcpu *= 0.1;
190 * for all values of loadavg:
191 *
192 * Mathematically this loop can be expressed by saying:
193 * decay ** (5 * loadavg) ~= .1
194 *
195 * The system computes decay as:
196 * decay = (2 * loadavg) / (2 * loadavg + 1)
197 *
198 * We wish to prove that the system's computation of decay
199 * will always fulfill the equation:
200 * decay ** (5 * loadavg) ~= .1
201 *
202 * If we compute b as:
203 * b = 2 * loadavg
204 * then
205 * decay = b / (b + 1)
206 *
207 * We now need to prove two things:
208 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
209 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
210 *
211 * Facts:
212 * For x close to zero, exp(x) =~ 1 + x, since
213 * exp(x) = 0! + x**1/1! + x**2/2! + ... .
214 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
215 * For x close to zero, ln(1+x) =~ x, since
216 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1
217 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
218 * ln(.1) =~ -2.30
219 *
220 * Proof of (1):
221 * Solve (factor)**(power) =~ .1 given power (5*loadav):
222 * solving for factor,
223 * ln(factor) =~ (-2.30/5*loadav), or
224 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
225 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED
226 *
227 * Proof of (2):
228 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
229 * solving for power,
230 * power*ln(b/(b+1)) =~ -2.30, or
231 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED
232 *
233 * Actual power values for the implemented algorithm are as follows:
234 * loadav: 1 2 3 4
235 * power: 5.68 10.32 14.94 19.55
236 */
237
238 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
239 #define loadfactor(loadav) (2 * (loadav))
240
241 static fixpt_t
242 decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
243 {
244
245 if (estcpu == 0) {
246 return 0;
247 }
248
249 #if !defined(_LP64)
250 /* avoid 64bit arithmetics. */
251 #define FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
252 if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
253 return estcpu * loadfac / (loadfac + FSCALE);
254 }
255 #endif /* !defined(_LP64) */
256
257 return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
258 }
259
260 /*
261 * For all load averages >= 1 and max p_estcpu of (255 << ESTCPU_SHIFT),
262 * sleeping for at least seven times the loadfactor will decay p_estcpu to
263 * less than (1 << ESTCPU_SHIFT).
264 *
265 * note that our ESTCPU_MAX is actually much smaller than (255 << ESTCPU_SHIFT).
266 */
267 static fixpt_t
268 decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n)
269 {
270
271 if ((n << FSHIFT) >= 7 * loadfac) {
272 return 0;
273 }
274
275 while (estcpu != 0 && n > 1) {
276 estcpu = decay_cpu(loadfac, estcpu);
277 n--;
278 }
279
280 return estcpu;
281 }
282
283 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
284 fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
285
286 /*
287 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
288 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
289 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
290 *
291 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
292 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
293 *
294 * If you dont want to bother with the faster/more-accurate formula, you
295 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
296 * (more general) method of calculating the %age of CPU used by a process.
297 */
298 #define CCPU_SHIFT 11
299
300 /*
301 * schedcpu:
302 *
303 * Recompute process priorities, every hz ticks.
304 *
305 * XXXSMP This needs to be reorganised in order to reduce the locking
306 * burden.
307 */
308 /* ARGSUSED */
309 void
310 schedcpu(void *arg)
311 {
312 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
313 struct rlimit *rlim;
314 struct lwp *l;
315 struct proc *p;
316 int minslp, clkhz, sig;
317 long runtm;
318
319 schedcpu_ticks++;
320
321 mutex_enter(&proclist_mutex);
322 PROCLIST_FOREACH(p, &allproc) {
323 /*
324 * Increment time in/out of memory and sleep time (if
325 * sleeping). We ignore overflow; with 16-bit int's
326 * (remember them?) overflow takes 45 days.
327 */
328 minslp = 2;
329 mutex_enter(&p->p_smutex);
330 runtm = p->p_rtime.tv_sec;
331 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
332 lwp_lock(l);
333 runtm += l->l_rtime.tv_sec;
334 l->l_swtime++;
335 if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
336 l->l_stat == LSSUSPENDED) {
337 l->l_slptime++;
338 minslp = min(minslp, l->l_slptime);
339 } else
340 minslp = 0;
341 lwp_unlock(l);
342 }
343 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
344
345 /*
346 * Check if the process exceeds its CPU resource allocation.
347 * If over max, kill it.
348 */
349 rlim = &p->p_rlimit[RLIMIT_CPU];
350 sig = 0;
351 if (runtm >= rlim->rlim_cur) {
352 if (runtm >= rlim->rlim_max)
353 sig = SIGKILL;
354 else {
355 sig = SIGXCPU;
356 if (rlim->rlim_cur < rlim->rlim_max)
357 rlim->rlim_cur += 5;
358 }
359 }
360
361 /*
362 * If the process has run for more than autonicetime, reduce
363 * priority to give others a chance.
364 */
365 if (autonicetime && runtm > autonicetime && p->p_nice == NZERO
366 && kauth_cred_geteuid(p->p_cred)) {
367 mutex_enter(&p->p_stmutex);
368 p->p_nice = autoniceval + NZERO;
369 resetprocpriority(p);
370 mutex_exit(&p->p_stmutex);
371 }
372
373 /*
374 * If the process has slept the entire second,
375 * stop recalculating its priority until it wakes up.
376 */
377 if (minslp <= 1) {
378 /*
379 * p_pctcpu is only for ps.
380 */
381 mutex_enter(&p->p_stmutex);
382 clkhz = stathz != 0 ? stathz : hz;
383 #if (FSHIFT >= CCPU_SHIFT)
384 p->p_pctcpu += (clkhz == 100)?
385 ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
386 100 * (((fixpt_t) p->p_cpticks)
387 << (FSHIFT - CCPU_SHIFT)) / clkhz;
388 #else
389 p->p_pctcpu += ((FSCALE - ccpu) *
390 (p->p_cpticks * FSCALE / clkhz)) >> FSHIFT;
391 #endif
392 p->p_cpticks = 0;
393 p->p_estcpu = decay_cpu(loadfac, p->p_estcpu);
394
395 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
396 lwp_lock(l);
397 if (l->l_slptime <= 1)
398 resetpriority(l);
399 lwp_unlock(l);
400 }
401 mutex_exit(&p->p_stmutex);
402 }
403
404 mutex_exit(&p->p_smutex);
405 if (sig) {
406 psignal(p, sig);
407 }
408 }
409 mutex_exit(&proclist_mutex);
410 uvm_meter();
411 wakeup((caddr_t)&lbolt);
412 callout_schedule(&schedcpu_ch, hz);
413 }
414
415 /*
416 * Recalculate the priority of a process after it has slept for a while.
417 */
418 void
419 updatepri(struct lwp *l)
420 {
421 struct proc *p = l->l_proc;
422 fixpt_t loadfac;
423
424 LOCK_ASSERT(lwp_locked(l, NULL));
425 KASSERT(l->l_slptime > 1);
426
427 loadfac = loadfactor(averunnable.ldavg[0]);
428
429 l->l_slptime--; /* the first time was done in schedcpu */
430 /* XXX NJWLWP */
431 /* XXXSMP occasionally unlocked, should be per-LWP */
432 p->p_estcpu = decay_cpu_batch(loadfac, p->p_estcpu, l->l_slptime);
433 resetpriority(l);
434 }
435
436 /*
437 * During autoconfiguration or after a panic, a sleep will simply lower the
438 * priority briefly to allow interrupts, then return. The priority to be
439 * used (safepri) is machine-dependent, thus this value is initialized and
440 * maintained in the machine-dependent layers. This priority will typically
441 * be 0, or the lowest priority that is safe for use on the interrupt stack;
442 * it can be made higher to block network software interrupts after panics.
443 */
444 int safepri;
445
446 /*
447 * OBSOLETE INTERFACE
448 *
449 * General sleep call. Suspends the current process until a wakeup is
450 * performed on the specified identifier. The process will then be made
451 * runnable with the specified priority. Sleeps at most timo/hz seconds (0
452 * means no timeout). If pri includes PCATCH flag, signals are checked
453 * before and after sleeping, else signals are not checked. Returns 0 if
454 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
455 * signal needs to be delivered, ERESTART is returned if the current system
456 * call should be restarted if possible, and EINTR is returned if the system
457 * call should be interrupted by the signal (return EINTR).
458 *
459 * The interlock is held until we are on a sleep queue. The interlock will
460 * be locked before returning back to the caller unless the PNORELOCK flag
461 * is specified, in which case the interlock will always be unlocked upon
462 * return.
463 */
464 int
465 ltsleep(wchan_t ident, int priority, const char *wmesg, int timo,
466 volatile struct simplelock *interlock)
467 {
468 struct lwp *l = curlwp;
469 sleepq_t *sq;
470 int error, catch;
471
472 if (sleepq_dontsleep(l)) {
473 (void)sleepq_abort(NULL, 0);
474 if ((priority & PNORELOCK) != 0)
475 simple_unlock(interlock);
476 return 0;
477 }
478
479 sq = sleeptab_lookup(&sleeptab, ident);
480 sleepq_enter(sq, l);
481
482 if (interlock != NULL) {
483 LOCK_ASSERT(simple_lock_held(interlock));
484 simple_unlock(interlock);
485 }
486
487 catch = priority & PCATCH;
488 sleepq_block(sq, priority & PRIMASK, ident, wmesg, timo, catch,
489 &sleep_syncobj);
490 error = sleepq_unblock(timo, catch);
491
492 if (interlock != NULL && (priority & PNORELOCK) == 0)
493 simple_lock(interlock);
494
495 return error;
496 }
497
498 /*
499 * General sleep call for situations where a wake-up is not expected.
500 */
501 int
502 kpause(const char *wmesg, boolean_t intr, int timo, kmutex_t *mtx)
503 {
504 struct lwp *l = curlwp;
505 sleepq_t *sq;
506 int error;
507
508 if (sleepq_dontsleep(l))
509 return sleepq_abort(NULL, 0);
510
511 if (mtx != NULL)
512 mutex_exit(mtx);
513 sq = sleeptab_lookup(&sleeptab, l);
514 sleepq_enter(sq, l);
515 sleepq_block(sq, sched_kpri(l), l, wmesg, timo, intr, &sleep_syncobj);
516 error = sleepq_unblock(timo, intr);
517 if (mtx != NULL)
518 mutex_enter(mtx);
519
520 return error;
521 }
522
523 void
524 sa_awaken(struct lwp *l)
525 {
526
527 LOCK_ASSERT(lwp_locked(l, NULL));
528
529 if (l == l->l_savp->savp_lwp && l->l_flag & L_SA_YIELD)
530 l->l_flag &= ~L_SA_IDLE;
531 }
532
533 /*
534 * OBSOLETE INTERFACE
535 *
536 * Make all processes sleeping on the specified identifier runnable.
537 */
538 void
539 wakeup(wchan_t ident)
540 {
541 sleepq_t *sq;
542
543 if (cold)
544 return;
545
546 sq = sleeptab_lookup(&sleeptab, ident);
547 sleepq_wake(sq, ident, (u_int)-1);
548 }
549
550 /*
551 * OBSOLETE INTERFACE
552 *
553 * Make the highest priority process first in line on the specified
554 * identifier runnable.
555 */
556 void
557 wakeup_one(wchan_t ident)
558 {
559 sleepq_t *sq;
560
561 if (cold)
562 return;
563
564 sq = sleeptab_lookup(&sleeptab, ident);
565 sleepq_wake(sq, ident, 1);
566 }
567
568
569 /*
570 * General yield call. Puts the current process back on its run queue and
571 * performs a voluntary context switch. Should only be called when the
572 * current process explicitly requests it (eg sched_yield(2) in compat code).
573 */
574 void
575 yield(void)
576 {
577 struct lwp *l = curlwp;
578
579 lwp_lock(l);
580 if (l->l_stat == LSONPROC) {
581 KASSERT(lwp_locked(l, &sched_mutex));
582 l->l_priority = l->l_usrpri;
583 }
584 l->l_nvcsw++;
585 mi_switch(l, NULL);
586 }
587
588 /*
589 * General preemption call. Puts the current process back on its run queue
590 * and performs an involuntary context switch.
591 * The 'more' ("more work to do") argument is boolean. Returning to userspace
592 * preempt() calls pass 0. "Voluntary" preemptions in e.g. uiomove() pass 1.
593 * This will be used to indicate to the SA subsystem that the LWP is
594 * not yet finished in the kernel.
595 */
596 void
597 preempt(int more)
598 {
599 struct lwp *l = curlwp;
600 int r;
601
602 lwp_lock(l);
603 if (l->l_stat == LSONPROC) {
604 KASSERT(lwp_locked(l, &sched_mutex));
605 l->l_priority = l->l_usrpri;
606 }
607 l->l_nivcsw++;
608 r = mi_switch(l, NULL);
609
610 if ((l->l_flag & L_SA) != 0 && r != 0 && more == 0)
611 sa_preempt(l);
612 }
613
614 /*
615 * The machine independent parts of context switch. Switch to "new"
616 * if non-NULL, otherwise let cpu_switch choose the next lwp.
617 *
618 * Returns 1 if another process was actually run.
619 */
620 int
621 mi_switch(struct lwp *l, struct lwp *newl)
622 {
623 struct schedstate_percpu *spc;
624 struct timeval tv;
625 #ifdef MULTIPROCESSOR
626 int hold_count;
627 #endif
628 int retval, oldspl;
629 long s, u;
630 #if PERFCTRS
631 struct proc *p = l->l_proc;
632 #endif
633
634 LOCK_ASSERT(lwp_locked(l, NULL));
635
636 /*
637 * Release the kernel_lock, as we are about to yield the CPU.
638 */
639 KERNEL_UNLOCK_ALL(l, &hold_count);
640
641 #ifdef LOCKDEBUG
642 spinlock_switchcheck();
643 simple_lock_switchcheck();
644 #endif
645 #ifdef KSTACK_CHECK_MAGIC
646 kstack_check_magic(l);
647 #endif
648
649 /*
650 * It's safe to read the per CPU schedstate unlocked here, as all we
651 * are after is the run time and that's guarenteed to have been last
652 * updated by this CPU.
653 */
654 KDASSERT(l->l_cpu == curcpu());
655 spc = &l->l_cpu->ci_schedstate;
656
657 /*
658 * Compute the amount of time during which the current
659 * process was running.
660 */
661 microtime(&tv);
662 u = l->l_rtime.tv_usec +
663 (tv.tv_usec - spc->spc_runtime.tv_usec);
664 s = l->l_rtime.tv_sec + (tv.tv_sec - spc->spc_runtime.tv_sec);
665 if (u < 0) {
666 u += 1000000;
667 s--;
668 } else if (u >= 1000000) {
669 u -= 1000000;
670 s++;
671 }
672 l->l_rtime.tv_usec = u;
673 l->l_rtime.tv_sec = s;
674
675 /*
676 * XXXSMP If we are using h/w performance counters, save context.
677 */
678 #if PERFCTRS
679 if (PMC_ENABLED(p)) {
680 pmc_save_context(p);
681 }
682 #endif
683
684 /*
685 * Acquire the sched_mutex if necessary. It will be released by
686 * cpu_switch once it has decided to idle, or picked another LWP
687 * to run.
688 */
689 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
690 if (l->l_mutex != &sched_mutex) {
691 mutex_spin_enter(&sched_mutex);
692 lwp_unlock(l);
693 }
694 #endif
695
696 /*
697 * If on the CPU and we have gotten this far, then we must yield.
698 */
699 KASSERT(l->l_stat != LSRUN);
700 if (l->l_stat == LSONPROC) {
701 KASSERT(lwp_locked(l, &sched_mutex));
702 l->l_stat = LSRUN;
703 setrunqueue(l);
704 }
705 uvmexp.swtch++;
706
707 /*
708 * Process is about to yield the CPU; clear the appropriate
709 * scheduling flags.
710 */
711 spc->spc_flags &= ~SPCF_SWITCHCLEAR;
712
713 LOCKDEBUG_BARRIER(&sched_mutex, 1);
714
715 /*
716 * Switch to the new current LWP. When we run again, we'll
717 * return back here.
718 */
719 oldspl = MUTEX_SPIN_OLDSPL(l->l_cpu);
720
721 if (newl == NULL || newl->l_back == NULL)
722 retval = cpu_switch(l, NULL);
723 else {
724 KASSERT(lwp_locked(newl, &sched_mutex));
725 remrunqueue(newl);
726 cpu_switchto(l, newl);
727 retval = 0;
728 }
729
730 /*
731 * XXXSMP If we are using h/w performance counters, restore context.
732 */
733 #if PERFCTRS
734 if (PMC_ENABLED(p)) {
735 pmc_restore_context(p);
736 }
737 #endif
738
739 /*
740 * We're running again; record our new start time. We might
741 * be running on a new CPU now, so don't use the cached
742 * schedstate_percpu pointer.
743 */
744 KDASSERT(l->l_cpu == curcpu());
745 microtime(&l->l_cpu->ci_schedstate.spc_runtime);
746
747 /*
748 * Reacquire the kernel_lock.
749 */
750 splx(oldspl);
751 KERNEL_LOCK(hold_count, l);
752
753 return retval;
754 }
755
756 /*
757 * Initialize the (doubly-linked) run queues
758 * to be empty.
759 */
760 void
761 rqinit()
762 {
763 int i;
764
765 for (i = 0; i < RUNQUE_NQS; i++)
766 sched_qs[i].ph_link = sched_qs[i].ph_rlink =
767 (struct lwp *)&sched_qs[i];
768
769 mutex_init(&sched_mutex, MUTEX_SPIN, IPL_SCHED);
770 }
771
772 static inline void
773 resched_lwp(struct lwp *l, u_char pri)
774 {
775 struct cpu_info *ci;
776
777 /*
778 * XXXSMP
779 * Since l->l_cpu persists across a context switch,
780 * this gives us *very weak* processor affinity, in
781 * that we notify the CPU on which the process last
782 * ran that it should try to switch.
783 *
784 * This does not guarantee that the process will run on
785 * that processor next, because another processor might
786 * grab it the next time it performs a context switch.
787 *
788 * This also does not handle the case where its last
789 * CPU is running a higher-priority process, but every
790 * other CPU is running a lower-priority process. There
791 * are ways to handle this situation, but they're not
792 * currently very pretty, and we also need to weigh the
793 * cost of moving a process from one CPU to another.
794 *
795 * XXXSMP
796 * There is also the issue of locking the other CPU's
797 * sched state, which we currently do not do.
798 */
799 ci = (l->l_cpu != NULL) ? l->l_cpu : curcpu();
800 if (pri < ci->ci_schedstate.spc_curpriority)
801 cpu_need_resched(ci);
802 }
803
804 /*
805 * Change process state to be runnable, placing it on the run queue if it is
806 * in memory, and awakening the swapper if it isn't in memory.
807 *
808 * Call with the process and LWP locked. Will return with the LWP unlocked.
809 */
810 void
811 setrunnable(struct lwp *l)
812 {
813 struct proc *p = l->l_proc;
814
815 LOCK_ASSERT(mutex_owned(&p->p_smutex));
816 LOCK_ASSERT(lwp_locked(l, NULL));
817
818 switch (l->l_stat) {
819 case LSSTOP:
820 /*
821 * If we're being traced (possibly because someone attached us
822 * while we were stopped), check for a signal from the debugger.
823 */
824 if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0) {
825 sigaddset(&l->l_sigpend.sp_set, p->p_xstat);
826 signotify(l);
827 }
828 p->p_nrlwps++;
829 break;
830 case LSSUSPENDED:
831 l->l_flag &= ~L_WSUSPEND;
832 p->p_nrlwps++;
833 break;
834 case LSSLEEP:
835 KASSERT(l->l_wchan != NULL);
836 break;
837 default:
838 panic("setrunnable: lwp %p state was %d", l, l->l_stat);
839 }
840
841 /*
842 * If the LWP was sleeping interruptably, then it's OK to start it
843 * again. If not, mark it as still sleeping.
844 */
845 if (l->l_wchan != NULL) {
846 l->l_stat = LSSLEEP;
847 if ((l->l_flag & L_SINTR) != 0)
848 lwp_unsleep(l);
849 else {
850 lwp_unlock(l);
851 #ifdef DIAGNOSTIC
852 panic("setrunnable: !L_SINTR");
853 #endif
854 }
855 return;
856 }
857
858 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
859
860 if (l->l_proc->p_sa)
861 sa_awaken(l);
862
863 /*
864 * If the LWP is still on the CPU, mark it as LSONPROC. It may be
865 * about to call mi_switch(), in which case it will yield.
866 *
867 * XXXSMP Will need to change for preemption.
868 */
869 #ifdef MULTIPROCESSOR
870 if (l->l_cpu->ci_curlwp == l) {
871 #else
872 if (l == curlwp) {
873 #endif
874 l->l_stat = LSONPROC;
875 l->l_slptime = 0;
876 lwp_unlock(l);
877 return;
878 }
879
880 /*
881 * Set the LWP runnable. If it's swapped out, we need to wake the swapper
882 * to bring it back in. Otherwise, enter it into a run queue.
883 */
884 if (l->l_slptime > 1)
885 updatepri(l);
886 l->l_stat = LSRUN;
887 l->l_slptime = 0;
888
889 if (l->l_flag & L_INMEM) {
890 setrunqueue(l);
891 resched_lwp(l, l->l_priority);
892 lwp_unlock(l);
893 } else {
894 lwp_unlock(l);
895 wakeup(&proc0);
896 }
897 }
898
899 /*
900 * Compute the priority of a process when running in user mode.
901 * Arrange to reschedule if the resulting priority is better
902 * than that of the current process.
903 */
904 void
905 resetpriority(struct lwp *l)
906 {
907 unsigned int newpriority;
908 struct proc *p = l->l_proc;
909
910 /* XXXSMP LOCK_ASSERT(mutex_owned(&p->p_stmutex)); */
911 LOCK_ASSERT(lwp_locked(l, NULL));
912
913 if ((l->l_flag & L_SYSTEM) != 0)
914 return;
915
916 newpriority = PUSER + (p->p_estcpu >> ESTCPU_SHIFT) +
917 NICE_WEIGHT * (p->p_nice - NZERO);
918 newpriority = min(newpriority, MAXPRI);
919 l->l_usrpri = newpriority;
920 if (l->l_priority != newpriority)
921 lwp_changepri(l, newpriority);
922 }
923
924 /*
925 * Recompute priority for all LWPs in a process.
926 */
927 void
928 resetprocpriority(struct proc *p)
929 {
930 struct lwp *l;
931
932 LOCK_ASSERT(mutex_owned(&p->p_stmutex));
933
934 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
935 lwp_lock(l);
936 resetpriority(l);
937 lwp_unlock(l);
938 }
939 }
940
941 /*
942 * We adjust the priority of the current process. The priority of a process
943 * gets worse as it accumulates CPU time. The CPU usage estimator (p_estcpu)
944 * is increased here. The formula for computing priorities (in kern_synch.c)
945 * will compute a different value each time p_estcpu increases. This can
946 * cause a switch, but unless the priority crosses a PPQ boundary the actual
947 * queue will not change. The CPU usage estimator ramps up quite quickly
948 * when the process is running (linearly), and decays away exponentially, at
949 * a rate which is proportionally slower when the system is busy. The basic
950 * principle is that the system will 90% forget that the process used a lot
951 * of CPU time in 5 * loadav seconds. This causes the system to favor
952 * processes which haven't run much recently, and to round-robin among other
953 * processes.
954 */
955
956 void
957 schedclock(struct lwp *l)
958 {
959 struct proc *p = l->l_proc;
960
961 mutex_enter(&p->p_stmutex);
962 p->p_estcpu = ESTCPULIM(p->p_estcpu + (1 << ESTCPU_SHIFT));
963 lwp_lock(l);
964 resetpriority(l);
965 mutex_exit(&p->p_stmutex);
966 if ((l->l_flag & L_SYSTEM) == 0 && l->l_priority >= PUSER)
967 l->l_priority = l->l_usrpri;
968 lwp_unlock(l);
969 }
970
971 /*
972 * suspendsched:
973 *
974 * Convert all non-L_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
975 */
976 void
977 suspendsched(void)
978 {
979 #ifdef MULTIPROCESSOR
980 CPU_INFO_ITERATOR cii;
981 struct cpu_info *ci;
982 #endif
983 struct lwp *l;
984 struct proc *p;
985
986 /*
987 * We do this by process in order not to violate the locking rules.
988 */
989 mutex_enter(&proclist_mutex);
990 PROCLIST_FOREACH(p, &allproc) {
991 mutex_enter(&p->p_smutex);
992
993 if ((p->p_flag & P_SYSTEM) != 0) {
994 mutex_exit(&p->p_smutex);
995 continue;
996 }
997
998 p->p_stat = SSTOP;
999
1000 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1001 if (l == curlwp)
1002 continue;
1003
1004 lwp_lock(l);
1005
1006 /*
1007 * Set L_WREBOOT so that the LWP will suspend itself
1008 * when it tries to return to user mode. We want to
1009 * try and get to get as many LWPs as possible to
1010 * the user / kernel boundary, so that they will
1011 * release any locks that they hold.
1012 */
1013 l->l_flag |= (L_WREBOOT | L_WSUSPEND);
1014
1015 if (l->l_stat == LSSLEEP &&
1016 (l->l_flag & L_SINTR) != 0) {
1017 /* setrunnable() will release the lock. */
1018 setrunnable(l);
1019 continue;
1020 }
1021
1022 lwp_unlock(l);
1023 }
1024
1025 mutex_exit(&p->p_smutex);
1026 }
1027 mutex_exit(&proclist_mutex);
1028
1029 /*
1030 * Kick all CPUs to make them preempt any LWPs running in user mode.
1031 * They'll trap into the kernel and suspend themselves in userret().
1032 */
1033 sched_lock(0);
1034 #ifdef MULTIPROCESSOR
1035 for (CPU_INFO_FOREACH(cii, ci))
1036 cpu_need_resched(ci);
1037 #else
1038 cpu_need_resched(curcpu());
1039 #endif
1040 sched_unlock(0);
1041 }
1042
1043 /*
1044 * scheduler_fork_hook:
1045 *
1046 * Inherit the parent's scheduler history.
1047 */
1048 void
1049 scheduler_fork_hook(struct proc *parent, struct proc *child)
1050 {
1051
1052 LOCK_ASSERT(mutex_owned(&parent->p_smutex));
1053
1054 child->p_estcpu = child->p_estcpu_inherited = parent->p_estcpu;
1055 child->p_forktime = schedcpu_ticks;
1056 }
1057
1058 /*
1059 * scheduler_wait_hook:
1060 *
1061 * Chargeback parents for the sins of their children.
1062 */
1063 void
1064 scheduler_wait_hook(struct proc *parent, struct proc *child)
1065 {
1066 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
1067 fixpt_t estcpu;
1068
1069 /* XXX Only if parent != init?? */
1070
1071 mutex_enter(&parent->p_stmutex);
1072 estcpu = decay_cpu_batch(loadfac, child->p_estcpu_inherited,
1073 schedcpu_ticks - child->p_forktime);
1074 if (child->p_estcpu > estcpu)
1075 parent->p_estcpu =
1076 ESTCPULIM(parent->p_estcpu + child->p_estcpu - estcpu);
1077 mutex_exit(&parent->p_stmutex);
1078 }
1079
1080 /*
1081 * sched_kpri:
1082 *
1083 * Scale a priority level to a kernel priority level, usually
1084 * for an LWP that is about to sleep.
1085 */
1086 int
1087 sched_kpri(struct lwp *l)
1088 {
1089 static const uint8_t kpri_tab[] = {
1090 0, 1, 2, 3, 4, 5, 6, 7,
1091 8, 9, 10, 11, 12, 13, 14, 15,
1092 16, 17, 18, 19, 20, 21, 22, 23,
1093 24, 25, 26, 27, 28, 29, 30, 31,
1094 32, 33, 34, 35, 36, 37, 38, 39,
1095 40, 41, 42, 43, 44, 45, 46, 47,
1096 48, 49, 8, 8, 9, 9, 10, 10,
1097 11, 11, 12, 12, 13, 14, 14, 15,
1098 15, 16, 16, 17, 17, 18, 18, 19,
1099 20, 20, 21, 21, 22, 22, 23, 23,
1100 24, 24, 25, 26, 26, 27, 27, 28,
1101 28, 29, 29, 30, 30, 31, 32, 32,
1102 33, 33, 34, 34, 35, 35, 36, 36,
1103 37, 38, 38, 39, 39, 40, 40, 41,
1104 41, 42, 42, 43, 44, 44, 45, 45,
1105 46, 46, 47, 47, 48, 48, 49, 49,
1106 };
1107
1108 return kpri_tab[l->l_priority];
1109 }
1110
1111 /*
1112 * sched_unsleep:
1113 *
1114 * The is called when the LWP has not been awoken normally but instead
1115 * interrupted: for example, if the sleep timed out. Because of this,
1116 * it's not a valid action for running or idle LWPs.
1117 */
1118 void
1119 sched_unsleep(struct lwp *l)
1120 {
1121
1122 lwp_unlock(l);
1123 panic("sched_unsleep");
1124 }
1125
1126 /*
1127 * sched_changepri:
1128 *
1129 * Adjust the priority of an LWP.
1130 */
1131 void
1132 sched_changepri(struct lwp *l, int pri)
1133 {
1134
1135 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
1136
1137 if (l->l_stat != LSRUN || (l->l_flag & L_INMEM) == 0 ||
1138 (l->l_priority / PPQ) == (pri / PPQ)) {
1139 l->l_priority = pri;
1140 return;
1141 }
1142
1143 remrunqueue(l);
1144 l->l_priority = pri;
1145 setrunqueue(l);
1146 resched_lwp(l, pri);
1147 }
1148
1149 /*
1150 * Low-level routines to access the run queue. Optimised assembler
1151 * routines can override these.
1152 */
1153
1154 #ifndef __HAVE_MD_RUNQUEUE
1155
1156 /*
1157 * On some architectures, it's faster to use a MSB ordering for the priorites
1158 * than the traditional LSB ordering.
1159 */
1160 #ifdef __HAVE_BIGENDIAN_BITOPS
1161 #define RQMASK(n) (0x80000000 >> (n))
1162 #else
1163 #define RQMASK(n) (0x00000001 << (n))
1164 #endif
1165
1166 /*
1167 * The primitives that manipulate the run queues. whichqs tells which
1168 * of the 32 queues qs have processes in them. Setrunqueue puts processes
1169 * into queues, remrunqueue removes them from queues. The running process is
1170 * on no queue, other processes are on a queue related to p->p_priority,
1171 * divided by 4 actually to shrink the 0-127 range of priorities into the 32
1172 * available queues.
1173 */
1174 #ifdef RQDEBUG
1175 static void
1176 checkrunqueue(int whichq, struct lwp *l)
1177 {
1178 const struct prochd * const rq = &sched_qs[whichq];
1179 struct lwp *l2;
1180 int found = 0;
1181 int die = 0;
1182 int empty = 1;
1183 for (l2 = rq->ph_link; l2 != (const void*) rq; l2 = l2->l_forw) {
1184 if (l2->l_stat != LSRUN) {
1185 printf("checkrunqueue[%d]: lwp %p state (%d) "
1186 " != LSRUN\n", whichq, l2, l2->l_stat);
1187 }
1188 if (l2->l_back->l_forw != l2) {
1189 printf("checkrunqueue[%d]: lwp %p back-qptr (%p) "
1190 "corrupt %p\n", whichq, l2, l2->l_back,
1191 l2->l_back->l_forw);
1192 die = 1;
1193 }
1194 if (l2->l_forw->l_back != l2) {
1195 printf("checkrunqueue[%d]: lwp %p forw-qptr (%p) "
1196 "corrupt %p\n", whichq, l2, l2->l_forw,
1197 l2->l_forw->l_back);
1198 die = 1;
1199 }
1200 if (l2 == l)
1201 found = 1;
1202 empty = 0;
1203 }
1204 if (empty && (sched_whichqs & RQMASK(whichq)) != 0) {
1205 printf("checkrunqueue[%d]: bit set for empty run-queue %p\n",
1206 whichq, rq);
1207 die = 1;
1208 } else if (!empty && (sched_whichqs & RQMASK(whichq)) == 0) {
1209 printf("checkrunqueue[%d]: bit clear for non-empty "
1210 "run-queue %p\n", whichq, rq);
1211 die = 1;
1212 }
1213 if (l != NULL && (sched_whichqs & RQMASK(whichq)) == 0) {
1214 printf("checkrunqueue[%d]: bit clear for active lwp %p\n",
1215 whichq, l);
1216 die = 1;
1217 }
1218 if (l != NULL && empty) {
1219 printf("checkrunqueue[%d]: empty run-queue %p with "
1220 "active lwp %p\n", whichq, rq, l);
1221 die = 1;
1222 }
1223 if (l != NULL && !found) {
1224 printf("checkrunqueue[%d]: lwp %p not in runqueue %p!",
1225 whichq, l, rq);
1226 die = 1;
1227 }
1228 if (die)
1229 panic("checkrunqueue: inconsistency found");
1230 }
1231 #endif /* RQDEBUG */
1232
1233 void
1234 setrunqueue(struct lwp *l)
1235 {
1236 struct prochd *rq;
1237 struct lwp *prev;
1238 const int whichq = l->l_priority / PPQ;
1239
1240 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
1241
1242 #ifdef RQDEBUG
1243 checkrunqueue(whichq, NULL);
1244 #endif
1245 #ifdef DIAGNOSTIC
1246 if (l->l_back != NULL || l->l_stat != LSRUN)
1247 panic("setrunqueue");
1248 #endif
1249 sched_whichqs |= RQMASK(whichq);
1250 rq = &sched_qs[whichq];
1251 prev = rq->ph_rlink;
1252 l->l_forw = (struct lwp *)rq;
1253 rq->ph_rlink = l;
1254 prev->l_forw = l;
1255 l->l_back = prev;
1256 #ifdef RQDEBUG
1257 checkrunqueue(whichq, l);
1258 #endif
1259 }
1260
1261 void
1262 remrunqueue(struct lwp *l)
1263 {
1264 struct lwp *prev, *next;
1265 const int whichq = l->l_priority / PPQ;
1266
1267 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
1268
1269 #ifdef RQDEBUG
1270 checkrunqueue(whichq, l);
1271 #endif
1272
1273 #if defined(DIAGNOSTIC)
1274 if (((sched_whichqs & RQMASK(whichq)) == 0) || l->l_back == NULL) {
1275 /* Shouldn't happen - interrupts disabled. */
1276 panic("remrunqueue: bit %d not set", whichq);
1277 }
1278 #endif
1279 prev = l->l_back;
1280 l->l_back = NULL;
1281 next = l->l_forw;
1282 prev->l_forw = next;
1283 next->l_back = prev;
1284 if (prev == next)
1285 sched_whichqs &= ~RQMASK(whichq);
1286 #ifdef RQDEBUG
1287 checkrunqueue(whichq, NULL);
1288 #endif
1289 }
1290
1291 #undef RQMASK
1292 #endif /* !defined(__HAVE_MD_RUNQUEUE) */
1293