kern_synch.c revision 1.185 1 /* $NetBSD: kern_synch.c,v 1.185 2007/02/27 15:07:29 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*-
41 * Copyright (c) 1982, 1986, 1990, 1991, 1993
42 * The Regents of the University of California. All rights reserved.
43 * (c) UNIX System Laboratories, Inc.
44 * All or some portions of this file are derived from material licensed
45 * to the University of California by American Telephone and Telegraph
46 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
47 * the permission of UNIX System Laboratories, Inc.
48 *
49 * Redistribution and use in source and binary forms, with or without
50 * modification, are permitted provided that the following conditions
51 * are met:
52 * 1. Redistributions of source code must retain the above copyright
53 * notice, this list of conditions and the following disclaimer.
54 * 2. Redistributions in binary form must reproduce the above copyright
55 * notice, this list of conditions and the following disclaimer in the
56 * documentation and/or other materials provided with the distribution.
57 * 3. Neither the name of the University nor the names of its contributors
58 * may be used to endorse or promote products derived from this software
59 * without specific prior written permission.
60 *
61 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
62 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
63 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
64 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
65 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
66 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
67 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
68 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
69 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
70 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
71 * SUCH DAMAGE.
72 *
73 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
74 */
75
76 #include <sys/cdefs.h>
77 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.185 2007/02/27 15:07:29 yamt Exp $");
78
79 #include "opt_ddb.h"
80 #include "opt_kstack.h"
81 #include "opt_lockdebug.h"
82 #include "opt_multiprocessor.h"
83 #include "opt_perfctrs.h"
84
85 #define __MUTEX_PRIVATE
86
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/callout.h>
90 #include <sys/proc.h>
91 #include <sys/kernel.h>
92 #include <sys/buf.h>
93 #if defined(PERFCTRS)
94 #include <sys/pmc.h>
95 #endif
96 #include <sys/signalvar.h>
97 #include <sys/resourcevar.h>
98 #include <sys/sched.h>
99 #include <sys/syscall_stats.h>
100 #include <sys/kauth.h>
101 #include <sys/sleepq.h>
102 #include <sys/lockdebug.h>
103
104 #include <uvm/uvm_extern.h>
105
106 #include <machine/cpu.h>
107
108 int lbolt; /* once a second sleep address */
109 int rrticks; /* number of hardclock ticks per roundrobin() */
110
111 /*
112 * The global scheduler state.
113 */
114 kmutex_t sched_mutex; /* global sched state mutex */
115 struct prochd sched_qs[RUNQUE_NQS]; /* run queues */
116 volatile uint32_t sched_whichqs; /* bitmap of non-empty queues */
117
118 void schedcpu(void *);
119 void updatepri(struct lwp *);
120
121 void sched_unsleep(struct lwp *);
122 void sched_changepri(struct lwp *, pri_t);
123 void sched_lendpri(struct lwp *, pri_t);
124
125 struct callout schedcpu_ch = CALLOUT_INITIALIZER_SETFUNC(schedcpu, NULL);
126 static unsigned int schedcpu_ticks;
127
128 syncobj_t sleep_syncobj = {
129 SOBJ_SLEEPQ_SORTED,
130 sleepq_unsleep,
131 sleepq_changepri,
132 sleepq_lendpri,
133 syncobj_noowner,
134 };
135
136 syncobj_t sched_syncobj = {
137 SOBJ_SLEEPQ_SORTED,
138 sched_unsleep,
139 sched_changepri,
140 sched_lendpri,
141 syncobj_noowner,
142 };
143
144 /*
145 * Force switch among equal priority processes every 100ms.
146 * Called from hardclock every hz/10 == rrticks hardclock ticks.
147 */
148 /* ARGSUSED */
149 void
150 roundrobin(struct cpu_info *ci)
151 {
152 struct schedstate_percpu *spc = &ci->ci_schedstate;
153
154 spc->spc_rrticks = rrticks;
155
156 if (curlwp != NULL) {
157 if (spc->spc_flags & SPCF_SEENRR) {
158 /*
159 * The process has already been through a roundrobin
160 * without switching and may be hogging the CPU.
161 * Indicate that the process should yield.
162 */
163 spc->spc_flags |= SPCF_SHOULDYIELD;
164 } else
165 spc->spc_flags |= SPCF_SEENRR;
166 }
167 cpu_need_resched(curcpu());
168 }
169
170 #define PPQ (128 / RUNQUE_NQS) /* priorities per queue */
171 #define NICE_WEIGHT 2 /* priorities per nice level */
172
173 #define ESTCPU_SHIFT 11
174 #define ESTCPU_MAX ((NICE_WEIGHT * PRIO_MAX - PPQ) << ESTCPU_SHIFT)
175 #define ESTCPULIM(e) min((e), ESTCPU_MAX)
176
177 /*
178 * Constants for digital decay and forget:
179 * 90% of (p_estcpu) usage in 5 * loadav time
180 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive)
181 * Note that, as ps(1) mentions, this can let percentages
182 * total over 100% (I've seen 137.9% for 3 processes).
183 *
184 * Note that hardclock updates p_estcpu and p_cpticks independently.
185 *
186 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
187 * That is, the system wants to compute a value of decay such
188 * that the following for loop:
189 * for (i = 0; i < (5 * loadavg); i++)
190 * p_estcpu *= decay;
191 * will compute
192 * p_estcpu *= 0.1;
193 * for all values of loadavg:
194 *
195 * Mathematically this loop can be expressed by saying:
196 * decay ** (5 * loadavg) ~= .1
197 *
198 * The system computes decay as:
199 * decay = (2 * loadavg) / (2 * loadavg + 1)
200 *
201 * We wish to prove that the system's computation of decay
202 * will always fulfill the equation:
203 * decay ** (5 * loadavg) ~= .1
204 *
205 * If we compute b as:
206 * b = 2 * loadavg
207 * then
208 * decay = b / (b + 1)
209 *
210 * We now need to prove two things:
211 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
212 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
213 *
214 * Facts:
215 * For x close to zero, exp(x) =~ 1 + x, since
216 * exp(x) = 0! + x**1/1! + x**2/2! + ... .
217 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
218 * For x close to zero, ln(1+x) =~ x, since
219 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1
220 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
221 * ln(.1) =~ -2.30
222 *
223 * Proof of (1):
224 * Solve (factor)**(power) =~ .1 given power (5*loadav):
225 * solving for factor,
226 * ln(factor) =~ (-2.30/5*loadav), or
227 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
228 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED
229 *
230 * Proof of (2):
231 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
232 * solving for power,
233 * power*ln(b/(b+1)) =~ -2.30, or
234 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED
235 *
236 * Actual power values for the implemented algorithm are as follows:
237 * loadav: 1 2 3 4
238 * power: 5.68 10.32 14.94 19.55
239 */
240
241 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
242 #define loadfactor(loadav) (2 * (loadav))
243
244 static fixpt_t
245 decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
246 {
247
248 if (estcpu == 0) {
249 return 0;
250 }
251
252 #if !defined(_LP64)
253 /* avoid 64bit arithmetics. */
254 #define FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
255 if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
256 return estcpu * loadfac / (loadfac + FSCALE);
257 }
258 #endif /* !defined(_LP64) */
259
260 return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
261 }
262
263 /*
264 * For all load averages >= 1 and max p_estcpu of (255 << ESTCPU_SHIFT),
265 * sleeping for at least seven times the loadfactor will decay p_estcpu to
266 * less than (1 << ESTCPU_SHIFT).
267 *
268 * note that our ESTCPU_MAX is actually much smaller than (255 << ESTCPU_SHIFT).
269 */
270 static fixpt_t
271 decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n)
272 {
273
274 if ((n << FSHIFT) >= 7 * loadfac) {
275 return 0;
276 }
277
278 while (estcpu != 0 && n > 1) {
279 estcpu = decay_cpu(loadfac, estcpu);
280 n--;
281 }
282
283 return estcpu;
284 }
285
286 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
287 fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
288
289 /*
290 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
291 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
292 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
293 *
294 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
295 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
296 *
297 * If you dont want to bother with the faster/more-accurate formula, you
298 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
299 * (more general) method of calculating the %age of CPU used by a process.
300 */
301 #define CCPU_SHIFT 11
302
303 /*
304 * schedcpu:
305 *
306 * Recompute process priorities, every hz ticks.
307 *
308 * XXXSMP This needs to be reorganised in order to reduce the locking
309 * burden.
310 */
311 /* ARGSUSED */
312 void
313 schedcpu(void *arg)
314 {
315 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
316 struct rlimit *rlim;
317 struct lwp *l;
318 struct proc *p;
319 int minslp, clkhz, sig;
320 long runtm;
321
322 schedcpu_ticks++;
323
324 mutex_enter(&proclist_mutex);
325 PROCLIST_FOREACH(p, &allproc) {
326 /*
327 * Increment time in/out of memory and sleep time (if
328 * sleeping). We ignore overflow; with 16-bit int's
329 * (remember them?) overflow takes 45 days.
330 */
331 minslp = 2;
332 mutex_enter(&p->p_smutex);
333 runtm = p->p_rtime.tv_sec;
334 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
335 lwp_lock(l);
336 runtm += l->l_rtime.tv_sec;
337 l->l_swtime++;
338 if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
339 l->l_stat == LSSUSPENDED) {
340 l->l_slptime++;
341 minslp = min(minslp, l->l_slptime);
342 } else
343 minslp = 0;
344 lwp_unlock(l);
345 }
346 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
347
348 /*
349 * Check if the process exceeds its CPU resource allocation.
350 * If over max, kill it.
351 */
352 rlim = &p->p_rlimit[RLIMIT_CPU];
353 sig = 0;
354 if (runtm >= rlim->rlim_cur) {
355 if (runtm >= rlim->rlim_max)
356 sig = SIGKILL;
357 else {
358 sig = SIGXCPU;
359 if (rlim->rlim_cur < rlim->rlim_max)
360 rlim->rlim_cur += 5;
361 }
362 }
363
364 /*
365 * If the process has run for more than autonicetime, reduce
366 * priority to give others a chance.
367 */
368 if (autonicetime && runtm > autonicetime && p->p_nice == NZERO
369 && kauth_cred_geteuid(p->p_cred)) {
370 mutex_spin_enter(&p->p_stmutex);
371 p->p_nice = autoniceval + NZERO;
372 resetprocpriority(p);
373 mutex_spin_exit(&p->p_stmutex);
374 }
375
376 /*
377 * If the process has slept the entire second,
378 * stop recalculating its priority until it wakes up.
379 */
380 if (minslp <= 1) {
381 /*
382 * p_pctcpu is only for ps.
383 */
384 mutex_spin_enter(&p->p_stmutex);
385 clkhz = stathz != 0 ? stathz : hz;
386 #if (FSHIFT >= CCPU_SHIFT)
387 p->p_pctcpu += (clkhz == 100)?
388 ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
389 100 * (((fixpt_t) p->p_cpticks)
390 << (FSHIFT - CCPU_SHIFT)) / clkhz;
391 #else
392 p->p_pctcpu += ((FSCALE - ccpu) *
393 (p->p_cpticks * FSCALE / clkhz)) >> FSHIFT;
394 #endif
395 p->p_cpticks = 0;
396 p->p_estcpu = decay_cpu(loadfac, p->p_estcpu);
397
398 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
399 lwp_lock(l);
400 if (l->l_slptime <= 1 &&
401 l->l_priority >= PUSER)
402 resetpriority(l);
403 lwp_unlock(l);
404 }
405 mutex_spin_exit(&p->p_stmutex);
406 }
407
408 mutex_exit(&p->p_smutex);
409 if (sig) {
410 psignal(p, sig);
411 }
412 }
413 mutex_exit(&proclist_mutex);
414 uvm_meter();
415 wakeup((caddr_t)&lbolt);
416 callout_schedule(&schedcpu_ch, hz);
417 }
418
419 /*
420 * Recalculate the priority of a process after it has slept for a while.
421 */
422 void
423 updatepri(struct lwp *l)
424 {
425 struct proc *p = l->l_proc;
426 fixpt_t loadfac;
427
428 LOCK_ASSERT(lwp_locked(l, NULL));
429 KASSERT(l->l_slptime > 1);
430
431 loadfac = loadfactor(averunnable.ldavg[0]);
432
433 l->l_slptime--; /* the first time was done in schedcpu */
434 /* XXX NJWLWP */
435 /* XXXSMP occasionally unlocked, should be per-LWP */
436 p->p_estcpu = decay_cpu_batch(loadfac, p->p_estcpu, l->l_slptime);
437 resetpriority(l);
438 }
439
440 /*
441 * During autoconfiguration or after a panic, a sleep will simply lower the
442 * priority briefly to allow interrupts, then return. The priority to be
443 * used (safepri) is machine-dependent, thus this value is initialized and
444 * maintained in the machine-dependent layers. This priority will typically
445 * be 0, or the lowest priority that is safe for use on the interrupt stack;
446 * it can be made higher to block network software interrupts after panics.
447 */
448 int safepri;
449
450 /*
451 * OBSOLETE INTERFACE
452 *
453 * General sleep call. Suspends the current process until a wakeup is
454 * performed on the specified identifier. The process will then be made
455 * runnable with the specified priority. Sleeps at most timo/hz seconds (0
456 * means no timeout). If pri includes PCATCH flag, signals are checked
457 * before and after sleeping, else signals are not checked. Returns 0 if
458 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
459 * signal needs to be delivered, ERESTART is returned if the current system
460 * call should be restarted if possible, and EINTR is returned if the system
461 * call should be interrupted by the signal (return EINTR).
462 *
463 * The interlock is held until we are on a sleep queue. The interlock will
464 * be locked before returning back to the caller unless the PNORELOCK flag
465 * is specified, in which case the interlock will always be unlocked upon
466 * return.
467 */
468 int
469 ltsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
470 volatile struct simplelock *interlock)
471 {
472 struct lwp *l = curlwp;
473 sleepq_t *sq;
474 int error, catch;
475
476 if (sleepq_dontsleep(l)) {
477 (void)sleepq_abort(NULL, 0);
478 if ((priority & PNORELOCK) != 0)
479 simple_unlock(interlock);
480 return 0;
481 }
482
483 sq = sleeptab_lookup(&sleeptab, ident);
484 sleepq_enter(sq, l);
485
486 if (interlock != NULL) {
487 LOCK_ASSERT(simple_lock_held(interlock));
488 simple_unlock(interlock);
489 }
490
491 catch = priority & PCATCH;
492 sleepq_block(sq, priority & PRIMASK, ident, wmesg, timo, catch,
493 &sleep_syncobj);
494 error = sleepq_unblock(timo, catch);
495
496 if (interlock != NULL && (priority & PNORELOCK) == 0)
497 simple_lock(interlock);
498
499 return error;
500 }
501
502 /*
503 * General sleep call for situations where a wake-up is not expected.
504 */
505 int
506 kpause(const char *wmesg, bool intr, int timo, kmutex_t *mtx)
507 {
508 struct lwp *l = curlwp;
509 sleepq_t *sq;
510 int error;
511
512 if (sleepq_dontsleep(l))
513 return sleepq_abort(NULL, 0);
514
515 if (mtx != NULL)
516 mutex_exit(mtx);
517 sq = sleeptab_lookup(&sleeptab, l);
518 sleepq_enter(sq, l);
519 sleepq_block(sq, sched_kpri(l), l, wmesg, timo, intr, &sleep_syncobj);
520 error = sleepq_unblock(timo, intr);
521 if (mtx != NULL)
522 mutex_enter(mtx);
523
524 return error;
525 }
526
527 /*
528 * OBSOLETE INTERFACE
529 *
530 * Make all processes sleeping on the specified identifier runnable.
531 */
532 void
533 wakeup(wchan_t ident)
534 {
535 sleepq_t *sq;
536
537 if (cold)
538 return;
539
540 sq = sleeptab_lookup(&sleeptab, ident);
541 sleepq_wake(sq, ident, (u_int)-1);
542 }
543
544 /*
545 * OBSOLETE INTERFACE
546 *
547 * Make the highest priority process first in line on the specified
548 * identifier runnable.
549 */
550 void
551 wakeup_one(wchan_t ident)
552 {
553 sleepq_t *sq;
554
555 if (cold)
556 return;
557
558 sq = sleeptab_lookup(&sleeptab, ident);
559 sleepq_wake(sq, ident, 1);
560 }
561
562
563 /*
564 * General yield call. Puts the current process back on its run queue and
565 * performs a voluntary context switch. Should only be called when the
566 * current process explicitly requests it (eg sched_yield(2) in compat code).
567 */
568 void
569 yield(void)
570 {
571 struct lwp *l = curlwp;
572
573 KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
574 lwp_lock(l);
575 if (l->l_stat == LSONPROC) {
576 KASSERT(lwp_locked(l, &sched_mutex));
577 l->l_priority = l->l_usrpri;
578 }
579 l->l_nvcsw++;
580 mi_switch(l, NULL);
581 KERNEL_LOCK(l->l_biglocks, l);
582 }
583
584 /*
585 * General preemption call. Puts the current process back on its run queue
586 * and performs an involuntary context switch.
587 */
588 void
589 preempt(void)
590 {
591 struct lwp *l = curlwp;
592
593 KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
594 lwp_lock(l);
595 if (l->l_stat == LSONPROC) {
596 KASSERT(lwp_locked(l, &sched_mutex));
597 l->l_priority = l->l_usrpri;
598 }
599 l->l_nivcsw++;
600 (void)mi_switch(l, NULL);
601 KERNEL_LOCK(l->l_biglocks, l);
602 }
603
604 /*
605 * The machine independent parts of context switch. Switch to "new"
606 * if non-NULL, otherwise let cpu_switch choose the next lwp.
607 *
608 * Returns 1 if another process was actually run.
609 */
610 int
611 mi_switch(struct lwp *l, struct lwp *newl)
612 {
613 struct schedstate_percpu *spc;
614 struct timeval tv;
615 int retval, oldspl;
616 long s, u;
617
618 LOCK_ASSERT(lwp_locked(l, NULL));
619
620 #ifdef LOCKDEBUG
621 spinlock_switchcheck();
622 simple_lock_switchcheck();
623 #endif
624 #ifdef KSTACK_CHECK_MAGIC
625 kstack_check_magic(l);
626 #endif
627
628 /*
629 * It's safe to read the per CPU schedstate unlocked here, as all we
630 * are after is the run time and that's guarenteed to have been last
631 * updated by this CPU.
632 */
633 KDASSERT(l->l_cpu == curcpu());
634 spc = &l->l_cpu->ci_schedstate;
635
636 /*
637 * Compute the amount of time during which the current
638 * process was running.
639 */
640 microtime(&tv);
641 u = l->l_rtime.tv_usec +
642 (tv.tv_usec - spc->spc_runtime.tv_usec);
643 s = l->l_rtime.tv_sec + (tv.tv_sec - spc->spc_runtime.tv_sec);
644 if (u < 0) {
645 u += 1000000;
646 s--;
647 } else if (u >= 1000000) {
648 u -= 1000000;
649 s++;
650 }
651 l->l_rtime.tv_usec = u;
652 l->l_rtime.tv_sec = s;
653
654 /* Count time spent in current system call */
655 SYSCALL_TIME_SLEEP(l);
656
657 /*
658 * XXXSMP If we are using h/w performance counters, save context.
659 */
660 #if PERFCTRS
661 if (PMC_ENABLED(l->l_proc)) {
662 pmc_save_context(l->l_proc);
663 }
664 #endif
665
666 /*
667 * Acquire the sched_mutex if necessary. It will be released by
668 * cpu_switch once it has decided to idle, or picked another LWP
669 * to run.
670 */
671 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
672 if (l->l_mutex != &sched_mutex) {
673 mutex_spin_enter(&sched_mutex);
674 lwp_unlock(l);
675 }
676 #endif
677
678 /*
679 * If on the CPU and we have gotten this far, then we must yield.
680 */
681 KASSERT(l->l_stat != LSRUN);
682 if (l->l_stat == LSONPROC) {
683 KASSERT(lwp_locked(l, &sched_mutex));
684 l->l_stat = LSRUN;
685 setrunqueue(l);
686 }
687 uvmexp.swtch++;
688
689 /*
690 * Process is about to yield the CPU; clear the appropriate
691 * scheduling flags.
692 */
693 spc->spc_flags &= ~SPCF_SWITCHCLEAR;
694
695 LOCKDEBUG_BARRIER(&sched_mutex, 1);
696
697 /*
698 * Switch to the new current LWP. When we run again, we'll
699 * return back here.
700 */
701 oldspl = MUTEX_SPIN_OLDSPL(l->l_cpu);
702
703 if (newl == NULL || newl->l_back == NULL)
704 retval = cpu_switch(l, NULL);
705 else {
706 KASSERT(lwp_locked(newl, &sched_mutex));
707 remrunqueue(newl);
708 cpu_switchto(l, newl);
709 retval = 0;
710 }
711
712 /*
713 * XXXSMP If we are using h/w performance counters, restore context.
714 */
715 #if PERFCTRS
716 if (PMC_ENABLED(l->l_proc)) {
717 pmc_restore_context(l->l_proc);
718 }
719 #endif
720
721 /*
722 * We're running again; record our new start time. We might
723 * be running on a new CPU now, so don't use the cached
724 * schedstate_percpu pointer.
725 */
726 SYSCALL_TIME_WAKEUP(l);
727 KDASSERT(l->l_cpu == curcpu());
728 microtime(&l->l_cpu->ci_schedstate.spc_runtime);
729 splx(oldspl);
730
731 return retval;
732 }
733
734 /*
735 * Initialize the (doubly-linked) run queues
736 * to be empty.
737 */
738 void
739 rqinit()
740 {
741 int i;
742
743 for (i = 0; i < RUNQUE_NQS; i++)
744 sched_qs[i].ph_link = sched_qs[i].ph_rlink =
745 (struct lwp *)&sched_qs[i];
746
747 mutex_init(&sched_mutex, MUTEX_SPIN, IPL_SCHED);
748 }
749
750 static inline void
751 resched_lwp(struct lwp *l)
752 {
753 struct cpu_info *ci;
754 const pri_t pri = lwp_eprio(l);
755
756 /*
757 * XXXSMP
758 * Since l->l_cpu persists across a context switch,
759 * this gives us *very weak* processor affinity, in
760 * that we notify the CPU on which the process last
761 * ran that it should try to switch.
762 *
763 * This does not guarantee that the process will run on
764 * that processor next, because another processor might
765 * grab it the next time it performs a context switch.
766 *
767 * This also does not handle the case where its last
768 * CPU is running a higher-priority process, but every
769 * other CPU is running a lower-priority process. There
770 * are ways to handle this situation, but they're not
771 * currently very pretty, and we also need to weigh the
772 * cost of moving a process from one CPU to another.
773 *
774 * XXXSMP
775 * There is also the issue of locking the other CPU's
776 * sched state, which we currently do not do.
777 */
778 ci = (l->l_cpu != NULL) ? l->l_cpu : curcpu();
779 if (pri < ci->ci_schedstate.spc_curpriority)
780 cpu_need_resched(ci);
781 }
782
783 /*
784 * Change process state to be runnable, placing it on the run queue if it is
785 * in memory, and awakening the swapper if it isn't in memory.
786 *
787 * Call with the process and LWP locked. Will return with the LWP unlocked.
788 */
789 void
790 setrunnable(struct lwp *l)
791 {
792 struct proc *p = l->l_proc;
793 sigset_t *ss;
794
795 KASSERT(mutex_owned(&p->p_smutex));
796 KASSERT(lwp_locked(l, NULL));
797
798 switch (l->l_stat) {
799 case LSSTOP:
800 /*
801 * If we're being traced (possibly because someone attached us
802 * while we were stopped), check for a signal from the debugger.
803 */
804 if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0) {
805 if ((sigprop[p->p_xstat] & SA_TOLWP) != 0)
806 ss = &l->l_sigpend.sp_set;
807 else
808 ss = &p->p_sigpend.sp_set;
809 sigaddset(ss, p->p_xstat);
810 signotify(l);
811 }
812 p->p_nrlwps++;
813 break;
814 case LSSUSPENDED:
815 l->l_flag &= ~LW_WSUSPEND;
816 p->p_nrlwps++;
817 break;
818 case LSSLEEP:
819 KASSERT(l->l_wchan != NULL);
820 break;
821 default:
822 panic("setrunnable: lwp %p state was %d", l, l->l_stat);
823 }
824
825 /*
826 * If the LWP was sleeping interruptably, then it's OK to start it
827 * again. If not, mark it as still sleeping.
828 */
829 if (l->l_wchan != NULL) {
830 l->l_stat = LSSLEEP;
831 /* lwp_unsleep() will release the lock. */
832 lwp_unsleep(l);
833 return;
834 }
835
836 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
837
838 /*
839 * If the LWP is still on the CPU, mark it as LSONPROC. It may be
840 * about to call mi_switch(), in which case it will yield.
841 *
842 * XXXSMP Will need to change for preemption.
843 */
844 #ifdef MULTIPROCESSOR
845 if (l->l_cpu->ci_curlwp == l) {
846 #else
847 if (l == curlwp) {
848 #endif
849 l->l_stat = LSONPROC;
850 l->l_slptime = 0;
851 lwp_unlock(l);
852 return;
853 }
854
855 /*
856 * Set the LWP runnable. If it's swapped out, we need to wake the swapper
857 * to bring it back in. Otherwise, enter it into a run queue.
858 */
859 if (l->l_slptime > 1)
860 updatepri(l);
861 l->l_stat = LSRUN;
862 l->l_slptime = 0;
863
864 if (l->l_flag & LW_INMEM) {
865 setrunqueue(l);
866 resched_lwp(l);
867 lwp_unlock(l);
868 } else {
869 lwp_unlock(l);
870 uvm_kick_scheduler();
871 }
872 }
873
874 /*
875 * Compute the priority of a process when running in user mode.
876 * Arrange to reschedule if the resulting priority is better
877 * than that of the current process.
878 */
879 void
880 resetpriority(struct lwp *l)
881 {
882 pri_t newpriority;
883 struct proc *p = l->l_proc;
884
885 /* XXXSMP LOCK_ASSERT(mutex_owned(&p->p_stmutex)); */
886 LOCK_ASSERT(lwp_locked(l, NULL));
887
888 if ((l->l_flag & LW_SYSTEM) != 0)
889 return;
890
891 newpriority = PUSER + (p->p_estcpu >> ESTCPU_SHIFT) +
892 NICE_WEIGHT * (p->p_nice - NZERO);
893 newpriority = min(newpriority, MAXPRI);
894 lwp_changepri(l, newpriority);
895 }
896
897 /*
898 * Recompute priority for all LWPs in a process.
899 */
900 void
901 resetprocpriority(struct proc *p)
902 {
903 struct lwp *l;
904
905 LOCK_ASSERT(mutex_owned(&p->p_stmutex));
906
907 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
908 lwp_lock(l);
909 resetpriority(l);
910 lwp_unlock(l);
911 }
912 }
913
914 /*
915 * We adjust the priority of the current process. The priority of a process
916 * gets worse as it accumulates CPU time. The CPU usage estimator (p_estcpu)
917 * is increased here. The formula for computing priorities (in kern_synch.c)
918 * will compute a different value each time p_estcpu increases. This can
919 * cause a switch, but unless the priority crosses a PPQ boundary the actual
920 * queue will not change. The CPU usage estimator ramps up quite quickly
921 * when the process is running (linearly), and decays away exponentially, at
922 * a rate which is proportionally slower when the system is busy. The basic
923 * principle is that the system will 90% forget that the process used a lot
924 * of CPU time in 5 * loadav seconds. This causes the system to favor
925 * processes which haven't run much recently, and to round-robin among other
926 * processes.
927 */
928
929 void
930 schedclock(struct lwp *l)
931 {
932 struct proc *p = l->l_proc;
933
934 mutex_spin_enter(&p->p_stmutex);
935 p->p_estcpu = ESTCPULIM(p->p_estcpu + (1 << ESTCPU_SHIFT));
936 lwp_lock(l);
937 resetpriority(l);
938 mutex_spin_exit(&p->p_stmutex);
939 if ((l->l_flag & LW_SYSTEM) == 0 && l->l_priority >= PUSER)
940 l->l_priority = l->l_usrpri;
941 lwp_unlock(l);
942 }
943
944 /*
945 * suspendsched:
946 *
947 * Convert all non-L_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
948 */
949 void
950 suspendsched(void)
951 {
952 #ifdef MULTIPROCESSOR
953 CPU_INFO_ITERATOR cii;
954 struct cpu_info *ci;
955 #endif
956 struct lwp *l;
957 struct proc *p;
958
959 /*
960 * We do this by process in order not to violate the locking rules.
961 */
962 mutex_enter(&proclist_mutex);
963 PROCLIST_FOREACH(p, &allproc) {
964 mutex_enter(&p->p_smutex);
965
966 if ((p->p_flag & PK_SYSTEM) != 0) {
967 mutex_exit(&p->p_smutex);
968 continue;
969 }
970
971 p->p_stat = SSTOP;
972
973 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
974 if (l == curlwp)
975 continue;
976
977 lwp_lock(l);
978
979 /*
980 * Set L_WREBOOT so that the LWP will suspend itself
981 * when it tries to return to user mode. We want to
982 * try and get to get as many LWPs as possible to
983 * the user / kernel boundary, so that they will
984 * release any locks that they hold.
985 */
986 l->l_flag |= (LW_WREBOOT | LW_WSUSPEND);
987
988 if (l->l_stat == LSSLEEP &&
989 (l->l_flag & LW_SINTR) != 0) {
990 /* setrunnable() will release the lock. */
991 setrunnable(l);
992 continue;
993 }
994
995 lwp_unlock(l);
996 }
997
998 mutex_exit(&p->p_smutex);
999 }
1000 mutex_exit(&proclist_mutex);
1001
1002 /*
1003 * Kick all CPUs to make them preempt any LWPs running in user mode.
1004 * They'll trap into the kernel and suspend themselves in userret().
1005 */
1006 sched_lock(0);
1007 #ifdef MULTIPROCESSOR
1008 for (CPU_INFO_FOREACH(cii, ci))
1009 cpu_need_resched(ci);
1010 #else
1011 cpu_need_resched(curcpu());
1012 #endif
1013 sched_unlock(0);
1014 }
1015
1016 /*
1017 * scheduler_fork_hook:
1018 *
1019 * Inherit the parent's scheduler history.
1020 */
1021 void
1022 scheduler_fork_hook(struct proc *parent, struct proc *child)
1023 {
1024
1025 LOCK_ASSERT(mutex_owned(&parent->p_smutex));
1026
1027 child->p_estcpu = child->p_estcpu_inherited = parent->p_estcpu;
1028 child->p_forktime = schedcpu_ticks;
1029 }
1030
1031 /*
1032 * scheduler_wait_hook:
1033 *
1034 * Chargeback parents for the sins of their children.
1035 */
1036 void
1037 scheduler_wait_hook(struct proc *parent, struct proc *child)
1038 {
1039 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
1040 fixpt_t estcpu;
1041
1042 /* XXX Only if parent != init?? */
1043
1044 mutex_spin_enter(&parent->p_stmutex);
1045 estcpu = decay_cpu_batch(loadfac, child->p_estcpu_inherited,
1046 schedcpu_ticks - child->p_forktime);
1047 if (child->p_estcpu > estcpu)
1048 parent->p_estcpu =
1049 ESTCPULIM(parent->p_estcpu + child->p_estcpu - estcpu);
1050 mutex_spin_exit(&parent->p_stmutex);
1051 }
1052
1053 /*
1054 * sched_kpri:
1055 *
1056 * Scale a priority level to a kernel priority level, usually
1057 * for an LWP that is about to sleep.
1058 */
1059 pri_t
1060 sched_kpri(struct lwp *l)
1061 {
1062 /*
1063 * Scale user priorities (127 -> 50) up to kernel priorities
1064 * in the range (49 -> 8). Reserve the top 8 kernel priorities
1065 * for high priority kthreads. Kernel priorities passed in
1066 * are left "as is". XXX This is somewhat arbitrary.
1067 */
1068 static const uint8_t kpri_tab[] = {
1069 0, 1, 2, 3, 4, 5, 6, 7,
1070 8, 9, 10, 11, 12, 13, 14, 15,
1071 16, 17, 18, 19, 20, 21, 22, 23,
1072 24, 25, 26, 27, 28, 29, 30, 31,
1073 32, 33, 34, 35, 36, 37, 38, 39,
1074 40, 41, 42, 43, 44, 45, 46, 47,
1075 48, 49, 8, 8, 9, 9, 10, 10,
1076 11, 11, 12, 12, 13, 14, 14, 15,
1077 15, 16, 16, 17, 17, 18, 18, 19,
1078 20, 20, 21, 21, 22, 22, 23, 23,
1079 24, 24, 25, 26, 26, 27, 27, 28,
1080 28, 29, 29, 30, 30, 31, 32, 32,
1081 33, 33, 34, 34, 35, 35, 36, 36,
1082 37, 38, 38, 39, 39, 40, 40, 41,
1083 41, 42, 42, 43, 44, 44, 45, 45,
1084 46, 46, 47, 47, 48, 48, 49, 49,
1085 };
1086
1087 return (pri_t)kpri_tab[l->l_usrpri];
1088 }
1089
1090 /*
1091 * sched_unsleep:
1092 *
1093 * The is called when the LWP has not been awoken normally but instead
1094 * interrupted: for example, if the sleep timed out. Because of this,
1095 * it's not a valid action for running or idle LWPs.
1096 */
1097 void
1098 sched_unsleep(struct lwp *l)
1099 {
1100
1101 lwp_unlock(l);
1102 panic("sched_unsleep");
1103 }
1104
1105 /*
1106 * sched_changepri:
1107 *
1108 * Adjust the priority of an LWP.
1109 */
1110 void
1111 sched_changepri(struct lwp *l, pri_t pri)
1112 {
1113
1114 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
1115
1116 l->l_usrpri = pri;
1117 if (l->l_priority < PUSER)
1118 return;
1119
1120 if (l->l_stat != LSRUN || (l->l_flag & LW_INMEM) == 0) {
1121 l->l_priority = pri;
1122 return;
1123 }
1124
1125 remrunqueue(l);
1126 l->l_priority = pri;
1127 setrunqueue(l);
1128 resched_lwp(l);
1129 }
1130
1131 void
1132 sched_lendpri(struct lwp *l, pri_t pri)
1133 {
1134
1135 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
1136
1137 if (l->l_stat != LSRUN || (l->l_flag & LW_INMEM) == 0) {
1138 l->l_inheritedprio = pri;
1139 return;
1140 }
1141
1142 remrunqueue(l);
1143 l->l_inheritedprio = pri;
1144 setrunqueue(l);
1145 resched_lwp(l);
1146 }
1147
1148 struct lwp *
1149 syncobj_noowner(wchan_t wchan)
1150 {
1151
1152 return NULL;
1153 }
1154
1155 /*
1156 * Low-level routines to access the run queue. Optimised assembler
1157 * routines can override these.
1158 */
1159
1160 #ifndef __HAVE_MD_RUNQUEUE
1161
1162 /*
1163 * On some architectures, it's faster to use a MSB ordering for the priorites
1164 * than the traditional LSB ordering.
1165 */
1166 #ifdef __HAVE_BIGENDIAN_BITOPS
1167 #define RQMASK(n) (0x80000000 >> (n))
1168 #else
1169 #define RQMASK(n) (0x00000001 << (n))
1170 #endif
1171
1172 /*
1173 * The primitives that manipulate the run queues. whichqs tells which
1174 * of the 32 queues qs have processes in them. Setrunqueue puts processes
1175 * into queues, remrunqueue removes them from queues. The running process is
1176 * on no queue, other processes are on a queue related to p->p_priority,
1177 * divided by 4 actually to shrink the 0-127 range of priorities into the 32
1178 * available queues.
1179 */
1180 #ifdef RQDEBUG
1181 static void
1182 checkrunqueue(int whichq, struct lwp *l)
1183 {
1184 const struct prochd * const rq = &sched_qs[whichq];
1185 struct lwp *l2;
1186 int found = 0;
1187 int die = 0;
1188 int empty = 1;
1189 for (l2 = rq->ph_link; l2 != (const void*) rq; l2 = l2->l_forw) {
1190 if (l2->l_stat != LSRUN) {
1191 printf("checkrunqueue[%d]: lwp %p state (%d) "
1192 " != LSRUN\n", whichq, l2, l2->l_stat);
1193 }
1194 if (l2->l_back->l_forw != l2) {
1195 printf("checkrunqueue[%d]: lwp %p back-qptr (%p) "
1196 "corrupt %p\n", whichq, l2, l2->l_back,
1197 l2->l_back->l_forw);
1198 die = 1;
1199 }
1200 if (l2->l_forw->l_back != l2) {
1201 printf("checkrunqueue[%d]: lwp %p forw-qptr (%p) "
1202 "corrupt %p\n", whichq, l2, l2->l_forw,
1203 l2->l_forw->l_back);
1204 die = 1;
1205 }
1206 if (l2 == l)
1207 found = 1;
1208 empty = 0;
1209 }
1210 if (empty && (sched_whichqs & RQMASK(whichq)) != 0) {
1211 printf("checkrunqueue[%d]: bit set for empty run-queue %p\n",
1212 whichq, rq);
1213 die = 1;
1214 } else if (!empty && (sched_whichqs & RQMASK(whichq)) == 0) {
1215 printf("checkrunqueue[%d]: bit clear for non-empty "
1216 "run-queue %p\n", whichq, rq);
1217 die = 1;
1218 }
1219 if (l != NULL && (sched_whichqs & RQMASK(whichq)) == 0) {
1220 printf("checkrunqueue[%d]: bit clear for active lwp %p\n",
1221 whichq, l);
1222 die = 1;
1223 }
1224 if (l != NULL && empty) {
1225 printf("checkrunqueue[%d]: empty run-queue %p with "
1226 "active lwp %p\n", whichq, rq, l);
1227 die = 1;
1228 }
1229 if (l != NULL && !found) {
1230 printf("checkrunqueue[%d]: lwp %p not in runqueue %p!",
1231 whichq, l, rq);
1232 die = 1;
1233 }
1234 if (die)
1235 panic("checkrunqueue: inconsistency found");
1236 }
1237 #endif /* RQDEBUG */
1238
1239 void
1240 setrunqueue(struct lwp *l)
1241 {
1242 struct prochd *rq;
1243 struct lwp *prev;
1244 const int whichq = lwp_eprio(l) / PPQ;
1245
1246 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
1247
1248 #ifdef RQDEBUG
1249 checkrunqueue(whichq, NULL);
1250 #endif
1251 #ifdef DIAGNOSTIC
1252 if (l->l_back != NULL || l->l_stat != LSRUN)
1253 panic("setrunqueue");
1254 #endif
1255 sched_whichqs |= RQMASK(whichq);
1256 rq = &sched_qs[whichq];
1257 prev = rq->ph_rlink;
1258 l->l_forw = (struct lwp *)rq;
1259 rq->ph_rlink = l;
1260 prev->l_forw = l;
1261 l->l_back = prev;
1262 #ifdef RQDEBUG
1263 checkrunqueue(whichq, l);
1264 #endif
1265 }
1266
1267 /*
1268 * XXXSMP When LWP dispatch (cpu_switch()) is changed to use remrunqueue(),
1269 * drop of the effective priority level from kernel to user needs to be
1270 * moved here from userret(). The assignment in userret() is currently
1271 * done unlocked.
1272 */
1273 void
1274 remrunqueue(struct lwp *l)
1275 {
1276 struct lwp *prev, *next;
1277 const int whichq = lwp_eprio(l) / PPQ;
1278
1279 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
1280
1281 #ifdef RQDEBUG
1282 checkrunqueue(whichq, l);
1283 #endif
1284
1285 #if defined(DIAGNOSTIC)
1286 if (((sched_whichqs & RQMASK(whichq)) == 0) || l->l_back == NULL) {
1287 /* Shouldn't happen - interrupts disabled. */
1288 panic("remrunqueue: bit %d not set", whichq);
1289 }
1290 #endif
1291 prev = l->l_back;
1292 l->l_back = NULL;
1293 next = l->l_forw;
1294 prev->l_forw = next;
1295 next->l_back = prev;
1296 if (prev == next)
1297 sched_whichqs &= ~RQMASK(whichq);
1298 #ifdef RQDEBUG
1299 checkrunqueue(whichq, NULL);
1300 #endif
1301 }
1302
1303 #undef RQMASK
1304 #endif /* !defined(__HAVE_MD_RUNQUEUE) */
1305