kern_synch.c revision 1.180 1 /* $NetBSD: kern_synch.c,v 1.180 2007/02/18 16:58:16 dsl Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*-
41 * Copyright (c) 1982, 1986, 1990, 1991, 1993
42 * The Regents of the University of California. All rights reserved.
43 * (c) UNIX System Laboratories, Inc.
44 * All or some portions of this file are derived from material licensed
45 * to the University of California by American Telephone and Telegraph
46 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
47 * the permission of UNIX System Laboratories, Inc.
48 *
49 * Redistribution and use in source and binary forms, with or without
50 * modification, are permitted provided that the following conditions
51 * are met:
52 * 1. Redistributions of source code must retain the above copyright
53 * notice, this list of conditions and the following disclaimer.
54 * 2. Redistributions in binary form must reproduce the above copyright
55 * notice, this list of conditions and the following disclaimer in the
56 * documentation and/or other materials provided with the distribution.
57 * 3. Neither the name of the University nor the names of its contributors
58 * may be used to endorse or promote products derived from this software
59 * without specific prior written permission.
60 *
61 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
62 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
63 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
64 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
65 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
66 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
67 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
68 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
69 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
70 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
71 * SUCH DAMAGE.
72 *
73 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
74 */
75
76 #include <sys/cdefs.h>
77 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.180 2007/02/18 16:58:16 dsl Exp $");
78
79 #include "opt_ddb.h"
80 #include "opt_kstack.h"
81 #include "opt_lockdebug.h"
82 #include "opt_multiprocessor.h"
83 #include "opt_perfctrs.h"
84
85 #define __MUTEX_PRIVATE
86
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/callout.h>
90 #include <sys/proc.h>
91 #include <sys/kernel.h>
92 #include <sys/buf.h>
93 #if defined(PERFCTRS)
94 #include <sys/pmc.h>
95 #endif
96 #include <sys/signalvar.h>
97 #include <sys/resourcevar.h>
98 #include <sys/sched.h>
99 #include <sys/syscall_stats.h>
100 #include <sys/kauth.h>
101 #include <sys/sleepq.h>
102 #include <sys/lockdebug.h>
103
104 #include <uvm/uvm_extern.h>
105
106 #include <machine/cpu.h>
107
108 int lbolt; /* once a second sleep address */
109 int rrticks; /* number of hardclock ticks per roundrobin() */
110
111 /*
112 * The global scheduler state.
113 */
114 kmutex_t sched_mutex; /* global sched state mutex */
115 struct prochd sched_qs[RUNQUE_NQS]; /* run queues */
116 volatile uint32_t sched_whichqs; /* bitmap of non-empty queues */
117
118 void schedcpu(void *);
119 void updatepri(struct lwp *);
120
121 void sched_unsleep(struct lwp *);
122 void sched_changepri(struct lwp *, int);
123
124 struct callout schedcpu_ch = CALLOUT_INITIALIZER_SETFUNC(schedcpu, NULL);
125 static unsigned int schedcpu_ticks;
126
127 syncobj_t sleep_syncobj = {
128 SOBJ_SLEEPQ_SORTED,
129 sleepq_unsleep,
130 sleepq_changepri
131 };
132
133 syncobj_t sched_syncobj = {
134 SOBJ_SLEEPQ_SORTED,
135 sched_unsleep,
136 sched_changepri
137 };
138
139 /*
140 * Force switch among equal priority processes every 100ms.
141 * Called from hardclock every hz/10 == rrticks hardclock ticks.
142 */
143 /* ARGSUSED */
144 void
145 roundrobin(struct cpu_info *ci)
146 {
147 struct schedstate_percpu *spc = &ci->ci_schedstate;
148
149 spc->spc_rrticks = rrticks;
150
151 if (curlwp != NULL) {
152 if (spc->spc_flags & SPCF_SEENRR) {
153 /*
154 * The process has already been through a roundrobin
155 * without switching and may be hogging the CPU.
156 * Indicate that the process should yield.
157 */
158 spc->spc_flags |= SPCF_SHOULDYIELD;
159 } else
160 spc->spc_flags |= SPCF_SEENRR;
161 }
162 cpu_need_resched(curcpu());
163 }
164
165 #define PPQ (128 / RUNQUE_NQS) /* priorities per queue */
166 #define NICE_WEIGHT 2 /* priorities per nice level */
167
168 #define ESTCPU_SHIFT 11
169 #define ESTCPU_MAX ((NICE_WEIGHT * PRIO_MAX - PPQ) << ESTCPU_SHIFT)
170 #define ESTCPULIM(e) min((e), ESTCPU_MAX)
171
172 /*
173 * Constants for digital decay and forget:
174 * 90% of (p_estcpu) usage in 5 * loadav time
175 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive)
176 * Note that, as ps(1) mentions, this can let percentages
177 * total over 100% (I've seen 137.9% for 3 processes).
178 *
179 * Note that hardclock updates p_estcpu and p_cpticks independently.
180 *
181 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
182 * That is, the system wants to compute a value of decay such
183 * that the following for loop:
184 * for (i = 0; i < (5 * loadavg); i++)
185 * p_estcpu *= decay;
186 * will compute
187 * p_estcpu *= 0.1;
188 * for all values of loadavg:
189 *
190 * Mathematically this loop can be expressed by saying:
191 * decay ** (5 * loadavg) ~= .1
192 *
193 * The system computes decay as:
194 * decay = (2 * loadavg) / (2 * loadavg + 1)
195 *
196 * We wish to prove that the system's computation of decay
197 * will always fulfill the equation:
198 * decay ** (5 * loadavg) ~= .1
199 *
200 * If we compute b as:
201 * b = 2 * loadavg
202 * then
203 * decay = b / (b + 1)
204 *
205 * We now need to prove two things:
206 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
207 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
208 *
209 * Facts:
210 * For x close to zero, exp(x) =~ 1 + x, since
211 * exp(x) = 0! + x**1/1! + x**2/2! + ... .
212 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
213 * For x close to zero, ln(1+x) =~ x, since
214 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1
215 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
216 * ln(.1) =~ -2.30
217 *
218 * Proof of (1):
219 * Solve (factor)**(power) =~ .1 given power (5*loadav):
220 * solving for factor,
221 * ln(factor) =~ (-2.30/5*loadav), or
222 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
223 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED
224 *
225 * Proof of (2):
226 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
227 * solving for power,
228 * power*ln(b/(b+1)) =~ -2.30, or
229 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED
230 *
231 * Actual power values for the implemented algorithm are as follows:
232 * loadav: 1 2 3 4
233 * power: 5.68 10.32 14.94 19.55
234 */
235
236 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
237 #define loadfactor(loadav) (2 * (loadav))
238
239 static fixpt_t
240 decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
241 {
242
243 if (estcpu == 0) {
244 return 0;
245 }
246
247 #if !defined(_LP64)
248 /* avoid 64bit arithmetics. */
249 #define FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
250 if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
251 return estcpu * loadfac / (loadfac + FSCALE);
252 }
253 #endif /* !defined(_LP64) */
254
255 return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
256 }
257
258 /*
259 * For all load averages >= 1 and max p_estcpu of (255 << ESTCPU_SHIFT),
260 * sleeping for at least seven times the loadfactor will decay p_estcpu to
261 * less than (1 << ESTCPU_SHIFT).
262 *
263 * note that our ESTCPU_MAX is actually much smaller than (255 << ESTCPU_SHIFT).
264 */
265 static fixpt_t
266 decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n)
267 {
268
269 if ((n << FSHIFT) >= 7 * loadfac) {
270 return 0;
271 }
272
273 while (estcpu != 0 && n > 1) {
274 estcpu = decay_cpu(loadfac, estcpu);
275 n--;
276 }
277
278 return estcpu;
279 }
280
281 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
282 fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
283
284 /*
285 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
286 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
287 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
288 *
289 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
290 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
291 *
292 * If you dont want to bother with the faster/more-accurate formula, you
293 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
294 * (more general) method of calculating the %age of CPU used by a process.
295 */
296 #define CCPU_SHIFT 11
297
298 /*
299 * schedcpu:
300 *
301 * Recompute process priorities, every hz ticks.
302 *
303 * XXXSMP This needs to be reorganised in order to reduce the locking
304 * burden.
305 */
306 /* ARGSUSED */
307 void
308 schedcpu(void *arg)
309 {
310 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
311 struct rlimit *rlim;
312 struct lwp *l;
313 struct proc *p;
314 int minslp, clkhz, sig;
315 long runtm;
316
317 schedcpu_ticks++;
318
319 mutex_enter(&proclist_mutex);
320 PROCLIST_FOREACH(p, &allproc) {
321 /*
322 * Increment time in/out of memory and sleep time (if
323 * sleeping). We ignore overflow; with 16-bit int's
324 * (remember them?) overflow takes 45 days.
325 */
326 minslp = 2;
327 mutex_enter(&p->p_smutex);
328 runtm = p->p_rtime.tv_sec;
329 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
330 lwp_lock(l);
331 runtm += l->l_rtime.tv_sec;
332 l->l_swtime++;
333 if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
334 l->l_stat == LSSUSPENDED) {
335 l->l_slptime++;
336 minslp = min(minslp, l->l_slptime);
337 } else
338 minslp = 0;
339 lwp_unlock(l);
340 }
341 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
342
343 /*
344 * Check if the process exceeds its CPU resource allocation.
345 * If over max, kill it.
346 */
347 rlim = &p->p_rlimit[RLIMIT_CPU];
348 sig = 0;
349 if (runtm >= rlim->rlim_cur) {
350 if (runtm >= rlim->rlim_max)
351 sig = SIGKILL;
352 else {
353 sig = SIGXCPU;
354 if (rlim->rlim_cur < rlim->rlim_max)
355 rlim->rlim_cur += 5;
356 }
357 }
358
359 /*
360 * If the process has run for more than autonicetime, reduce
361 * priority to give others a chance.
362 */
363 if (autonicetime && runtm > autonicetime && p->p_nice == NZERO
364 && kauth_cred_geteuid(p->p_cred)) {
365 mutex_spin_enter(&p->p_stmutex);
366 p->p_nice = autoniceval + NZERO;
367 resetprocpriority(p);
368 mutex_spin_exit(&p->p_stmutex);
369 }
370
371 /*
372 * If the process has slept the entire second,
373 * stop recalculating its priority until it wakes up.
374 */
375 if (minslp <= 1) {
376 /*
377 * p_pctcpu is only for ps.
378 */
379 mutex_spin_enter(&p->p_stmutex);
380 clkhz = stathz != 0 ? stathz : hz;
381 #if (FSHIFT >= CCPU_SHIFT)
382 p->p_pctcpu += (clkhz == 100)?
383 ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
384 100 * (((fixpt_t) p->p_cpticks)
385 << (FSHIFT - CCPU_SHIFT)) / clkhz;
386 #else
387 p->p_pctcpu += ((FSCALE - ccpu) *
388 (p->p_cpticks * FSCALE / clkhz)) >> FSHIFT;
389 #endif
390 p->p_cpticks = 0;
391 p->p_estcpu = decay_cpu(loadfac, p->p_estcpu);
392
393 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
394 lwp_lock(l);
395 if (l->l_slptime <= 1 &&
396 l->l_priority >= PUSER)
397 resetpriority(l);
398 lwp_unlock(l);
399 }
400 mutex_spin_exit(&p->p_stmutex);
401 }
402
403 mutex_exit(&p->p_smutex);
404 if (sig) {
405 psignal(p, sig);
406 }
407 }
408 mutex_exit(&proclist_mutex);
409 uvm_meter();
410 wakeup((caddr_t)&lbolt);
411 callout_schedule(&schedcpu_ch, hz);
412 }
413
414 /*
415 * Recalculate the priority of a process after it has slept for a while.
416 */
417 void
418 updatepri(struct lwp *l)
419 {
420 struct proc *p = l->l_proc;
421 fixpt_t loadfac;
422
423 LOCK_ASSERT(lwp_locked(l, NULL));
424 KASSERT(l->l_slptime > 1);
425
426 loadfac = loadfactor(averunnable.ldavg[0]);
427
428 l->l_slptime--; /* the first time was done in schedcpu */
429 /* XXX NJWLWP */
430 /* XXXSMP occasionally unlocked, should be per-LWP */
431 p->p_estcpu = decay_cpu_batch(loadfac, p->p_estcpu, l->l_slptime);
432 resetpriority(l);
433 }
434
435 /*
436 * During autoconfiguration or after a panic, a sleep will simply lower the
437 * priority briefly to allow interrupts, then return. The priority to be
438 * used (safepri) is machine-dependent, thus this value is initialized and
439 * maintained in the machine-dependent layers. This priority will typically
440 * be 0, or the lowest priority that is safe for use on the interrupt stack;
441 * it can be made higher to block network software interrupts after panics.
442 */
443 int safepri;
444
445 /*
446 * OBSOLETE INTERFACE
447 *
448 * General sleep call. Suspends the current process until a wakeup is
449 * performed on the specified identifier. The process will then be made
450 * runnable with the specified priority. Sleeps at most timo/hz seconds (0
451 * means no timeout). If pri includes PCATCH flag, signals are checked
452 * before and after sleeping, else signals are not checked. Returns 0 if
453 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
454 * signal needs to be delivered, ERESTART is returned if the current system
455 * call should be restarted if possible, and EINTR is returned if the system
456 * call should be interrupted by the signal (return EINTR).
457 *
458 * The interlock is held until we are on a sleep queue. The interlock will
459 * be locked before returning back to the caller unless the PNORELOCK flag
460 * is specified, in which case the interlock will always be unlocked upon
461 * return.
462 */
463 int
464 ltsleep(wchan_t ident, int priority, const char *wmesg, int timo,
465 volatile struct simplelock *interlock)
466 {
467 struct lwp *l = curlwp;
468 sleepq_t *sq;
469 int error, catch;
470
471 if (sleepq_dontsleep(l)) {
472 (void)sleepq_abort(NULL, 0);
473 if ((priority & PNORELOCK) != 0)
474 simple_unlock(interlock);
475 return 0;
476 }
477
478 sq = sleeptab_lookup(&sleeptab, ident);
479 sleepq_enter(sq, l);
480
481 if (interlock != NULL) {
482 LOCK_ASSERT(simple_lock_held(interlock));
483 simple_unlock(interlock);
484 }
485
486 catch = priority & PCATCH;
487 sleepq_block(sq, priority & PRIMASK, ident, wmesg, timo, catch,
488 &sleep_syncobj);
489 error = sleepq_unblock(timo, catch);
490
491 if (interlock != NULL && (priority & PNORELOCK) == 0)
492 simple_lock(interlock);
493
494 return error;
495 }
496
497 /*
498 * General sleep call for situations where a wake-up is not expected.
499 */
500 int
501 kpause(const char *wmesg, boolean_t intr, int timo, kmutex_t *mtx)
502 {
503 struct lwp *l = curlwp;
504 sleepq_t *sq;
505 int error;
506
507 if (sleepq_dontsleep(l))
508 return sleepq_abort(NULL, 0);
509
510 if (mtx != NULL)
511 mutex_exit(mtx);
512 sq = sleeptab_lookup(&sleeptab, l);
513 sleepq_enter(sq, l);
514 sleepq_block(sq, sched_kpri(l), l, wmesg, timo, intr, &sleep_syncobj);
515 error = sleepq_unblock(timo, intr);
516 if (mtx != NULL)
517 mutex_enter(mtx);
518
519 return error;
520 }
521
522 /*
523 * OBSOLETE INTERFACE
524 *
525 * Make all processes sleeping on the specified identifier runnable.
526 */
527 void
528 wakeup(wchan_t ident)
529 {
530 sleepq_t *sq;
531
532 if (cold)
533 return;
534
535 sq = sleeptab_lookup(&sleeptab, ident);
536 sleepq_wake(sq, ident, (u_int)-1);
537 }
538
539 /*
540 * OBSOLETE INTERFACE
541 *
542 * Make the highest priority process first in line on the specified
543 * identifier runnable.
544 */
545 void
546 wakeup_one(wchan_t ident)
547 {
548 sleepq_t *sq;
549
550 if (cold)
551 return;
552
553 sq = sleeptab_lookup(&sleeptab, ident);
554 sleepq_wake(sq, ident, 1);
555 }
556
557
558 /*
559 * General yield call. Puts the current process back on its run queue and
560 * performs a voluntary context switch. Should only be called when the
561 * current process explicitly requests it (eg sched_yield(2) in compat code).
562 */
563 void
564 yield(void)
565 {
566 struct lwp *l = curlwp;
567
568 KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
569 lwp_lock(l);
570 if (l->l_stat == LSONPROC) {
571 KASSERT(lwp_locked(l, &sched_mutex));
572 l->l_priority = l->l_usrpri;
573 }
574 l->l_nvcsw++;
575 mi_switch(l, NULL);
576 KERNEL_LOCK(l->l_biglocks, l);
577 }
578
579 /*
580 * General preemption call. Puts the current process back on its run queue
581 * and performs an involuntary context switch.
582 */
583 void
584 preempt(void)
585 {
586 struct lwp *l = curlwp;
587
588 KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
589 lwp_lock(l);
590 if (l->l_stat == LSONPROC) {
591 KASSERT(lwp_locked(l, &sched_mutex));
592 l->l_priority = l->l_usrpri;
593 }
594 l->l_nivcsw++;
595 (void)mi_switch(l, NULL);
596 KERNEL_LOCK(l->l_biglocks, l);
597 }
598
599 /*
600 * The machine independent parts of context switch. Switch to "new"
601 * if non-NULL, otherwise let cpu_switch choose the next lwp.
602 *
603 * Returns 1 if another process was actually run.
604 */
605 int
606 mi_switch(struct lwp *l, struct lwp *newl)
607 {
608 struct schedstate_percpu *spc;
609 struct timeval tv;
610 int retval, oldspl;
611 long s, u;
612
613 LOCK_ASSERT(lwp_locked(l, NULL));
614
615 #ifdef LOCKDEBUG
616 spinlock_switchcheck();
617 simple_lock_switchcheck();
618 #endif
619 #ifdef KSTACK_CHECK_MAGIC
620 kstack_check_magic(l);
621 #endif
622
623 /*
624 * It's safe to read the per CPU schedstate unlocked here, as all we
625 * are after is the run time and that's guarenteed to have been last
626 * updated by this CPU.
627 */
628 KDASSERT(l->l_cpu == curcpu());
629 spc = &l->l_cpu->ci_schedstate;
630
631 /*
632 * Compute the amount of time during which the current
633 * process was running.
634 */
635 microtime(&tv);
636 u = l->l_rtime.tv_usec +
637 (tv.tv_usec - spc->spc_runtime.tv_usec);
638 s = l->l_rtime.tv_sec + (tv.tv_sec - spc->spc_runtime.tv_sec);
639 if (u < 0 || u >= 1000000) {
640 if (u < 0) {
641 u += 1000000;
642 s--;
643 } else {
644 u -= 1000000;
645 s++;
646 }
647 }
648 l->l_rtime.tv_usec = u;
649 l->l_rtime.tv_sec = s;
650
651 /* Count time spent in current system call */
652 SYSCALL_TIME_SLEEP(l);
653
654 /*
655 * XXXSMP If we are using h/w performance counters, save context.
656 */
657 #if PERFCTRS
658 if (PMC_ENABLED(l->l_proc)) {
659 pmc_save_context(l->l_proc);
660 }
661 #endif
662
663 /*
664 * Acquire the sched_mutex if necessary. It will be released by
665 * cpu_switch once it has decided to idle, or picked another LWP
666 * to run.
667 */
668 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
669 if (l->l_mutex != &sched_mutex) {
670 mutex_spin_enter(&sched_mutex);
671 lwp_unlock(l);
672 }
673 #endif
674
675 /*
676 * If on the CPU and we have gotten this far, then we must yield.
677 */
678 KASSERT(l->l_stat != LSRUN);
679 if (l->l_stat == LSONPROC) {
680 KASSERT(lwp_locked(l, &sched_mutex));
681 l->l_stat = LSRUN;
682 setrunqueue(l);
683 }
684 uvmexp.swtch++;
685
686 /*
687 * Process is about to yield the CPU; clear the appropriate
688 * scheduling flags.
689 */
690 spc->spc_flags &= ~SPCF_SWITCHCLEAR;
691
692 LOCKDEBUG_BARRIER(&sched_mutex, 1);
693
694 /*
695 * Switch to the new current LWP. When we run again, we'll
696 * return back here.
697 */
698 oldspl = MUTEX_SPIN_OLDSPL(l->l_cpu);
699
700 if (newl == NULL || newl->l_back == NULL)
701 retval = cpu_switch(l, NULL);
702 else {
703 KASSERT(lwp_locked(newl, &sched_mutex));
704 remrunqueue(newl);
705 cpu_switchto(l, newl);
706 retval = 0;
707 }
708
709 /*
710 * XXXSMP If we are using h/w performance counters, restore context.
711 */
712 #if PERFCTRS
713 if (PMC_ENABLED(l->l_proc)) {
714 pmc_restore_context(l->l_proc);
715 }
716 #endif
717
718 /*
719 * We're running again; record our new start time. We might
720 * be running on a new CPU now, so don't use the cached
721 * schedstate_percpu pointer.
722 */
723 SYSCALL_TIME_WAKEUP(l);
724 KDASSERT(l->l_cpu == curcpu());
725 microtime(&l->l_cpu->ci_schedstate.spc_runtime);
726 splx(oldspl);
727
728 return retval;
729 }
730
731 /*
732 * Initialize the (doubly-linked) run queues
733 * to be empty.
734 */
735 void
736 rqinit()
737 {
738 int i;
739
740 for (i = 0; i < RUNQUE_NQS; i++)
741 sched_qs[i].ph_link = sched_qs[i].ph_rlink =
742 (struct lwp *)&sched_qs[i];
743
744 mutex_init(&sched_mutex, MUTEX_SPIN, IPL_SCHED);
745 }
746
747 static inline void
748 resched_lwp(struct lwp *l, u_char pri)
749 {
750 struct cpu_info *ci;
751
752 /*
753 * XXXSMP
754 * Since l->l_cpu persists across a context switch,
755 * this gives us *very weak* processor affinity, in
756 * that we notify the CPU on which the process last
757 * ran that it should try to switch.
758 *
759 * This does not guarantee that the process will run on
760 * that processor next, because another processor might
761 * grab it the next time it performs a context switch.
762 *
763 * This also does not handle the case where its last
764 * CPU is running a higher-priority process, but every
765 * other CPU is running a lower-priority process. There
766 * are ways to handle this situation, but they're not
767 * currently very pretty, and we also need to weigh the
768 * cost of moving a process from one CPU to another.
769 *
770 * XXXSMP
771 * There is also the issue of locking the other CPU's
772 * sched state, which we currently do not do.
773 */
774 ci = (l->l_cpu != NULL) ? l->l_cpu : curcpu();
775 if (pri < ci->ci_schedstate.spc_curpriority)
776 cpu_need_resched(ci);
777 }
778
779 /*
780 * Change process state to be runnable, placing it on the run queue if it is
781 * in memory, and awakening the swapper if it isn't in memory.
782 *
783 * Call with the process and LWP locked. Will return with the LWP unlocked.
784 */
785 void
786 setrunnable(struct lwp *l)
787 {
788 struct proc *p = l->l_proc;
789 sigset_t *ss;
790
791 LOCK_ASSERT(mutex_owned(&p->p_smutex));
792 LOCK_ASSERT(lwp_locked(l, NULL));
793
794 switch (l->l_stat) {
795 case LSSTOP:
796 /*
797 * If we're being traced (possibly because someone attached us
798 * while we were stopped), check for a signal from the debugger.
799 */
800 if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0) {
801 if ((sigprop[p->p_xstat] & SA_TOLWP) != 0)
802 ss = &l->l_sigpend.sp_set;
803 else
804 ss = &p->p_sigpend.sp_set;
805 sigaddset(ss, p->p_xstat);
806 signotify(l);
807 }
808 p->p_nrlwps++;
809 break;
810 case LSSUSPENDED:
811 l->l_flag &= ~LW_WSUSPEND;
812 p->p_nrlwps++;
813 break;
814 case LSSLEEP:
815 KASSERT(l->l_wchan != NULL);
816 break;
817 default:
818 panic("setrunnable: lwp %p state was %d", l, l->l_stat);
819 }
820
821 /*
822 * If the LWP was sleeping interruptably, then it's OK to start it
823 * again. If not, mark it as still sleeping.
824 */
825 if (l->l_wchan != NULL) {
826 l->l_stat = LSSLEEP;
827 if ((l->l_flag & LW_SINTR) != 0)
828 lwp_unsleep(l);
829 else {
830 lwp_unlock(l);
831 #ifdef DIAGNOSTIC
832 panic("setrunnable: !L_SINTR");
833 #endif
834 }
835 return;
836 }
837
838 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
839
840 /*
841 * If the LWP is still on the CPU, mark it as LSONPROC. It may be
842 * about to call mi_switch(), in which case it will yield.
843 *
844 * XXXSMP Will need to change for preemption.
845 */
846 #ifdef MULTIPROCESSOR
847 if (l->l_cpu->ci_curlwp == l) {
848 #else
849 if (l == curlwp) {
850 #endif
851 l->l_stat = LSONPROC;
852 l->l_slptime = 0;
853 lwp_unlock(l);
854 return;
855 }
856
857 /*
858 * Set the LWP runnable. If it's swapped out, we need to wake the swapper
859 * to bring it back in. Otherwise, enter it into a run queue.
860 */
861 if (l->l_slptime > 1)
862 updatepri(l);
863 l->l_stat = LSRUN;
864 l->l_slptime = 0;
865
866 if (l->l_flag & LW_INMEM) {
867 setrunqueue(l);
868 resched_lwp(l, l->l_priority);
869 lwp_unlock(l);
870 } else {
871 lwp_unlock(l);
872 uvm_kick_scheduler();
873 }
874 }
875
876 /*
877 * Compute the priority of a process when running in user mode.
878 * Arrange to reschedule if the resulting priority is better
879 * than that of the current process.
880 */
881 void
882 resetpriority(struct lwp *l)
883 {
884 unsigned int newpriority;
885 struct proc *p = l->l_proc;
886
887 /* XXXSMP LOCK_ASSERT(mutex_owned(&p->p_stmutex)); */
888 LOCK_ASSERT(lwp_locked(l, NULL));
889
890 if ((l->l_flag & LW_SYSTEM) != 0)
891 return;
892
893 newpriority = PUSER + (p->p_estcpu >> ESTCPU_SHIFT) +
894 NICE_WEIGHT * (p->p_nice - NZERO);
895 newpriority = min(newpriority, MAXPRI);
896 lwp_changepri(l, newpriority);
897 }
898
899 /*
900 * Recompute priority for all LWPs in a process.
901 */
902 void
903 resetprocpriority(struct proc *p)
904 {
905 struct lwp *l;
906
907 LOCK_ASSERT(mutex_owned(&p->p_stmutex));
908
909 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
910 lwp_lock(l);
911 resetpriority(l);
912 lwp_unlock(l);
913 }
914 }
915
916 /*
917 * We adjust the priority of the current process. The priority of a process
918 * gets worse as it accumulates CPU time. The CPU usage estimator (p_estcpu)
919 * is increased here. The formula for computing priorities (in kern_synch.c)
920 * will compute a different value each time p_estcpu increases. This can
921 * cause a switch, but unless the priority crosses a PPQ boundary the actual
922 * queue will not change. The CPU usage estimator ramps up quite quickly
923 * when the process is running (linearly), and decays away exponentially, at
924 * a rate which is proportionally slower when the system is busy. The basic
925 * principle is that the system will 90% forget that the process used a lot
926 * of CPU time in 5 * loadav seconds. This causes the system to favor
927 * processes which haven't run much recently, and to round-robin among other
928 * processes.
929 */
930
931 void
932 schedclock(struct lwp *l)
933 {
934 struct proc *p = l->l_proc;
935
936 mutex_spin_enter(&p->p_stmutex);
937 p->p_estcpu = ESTCPULIM(p->p_estcpu + (1 << ESTCPU_SHIFT));
938 lwp_lock(l);
939 resetpriority(l);
940 mutex_spin_exit(&p->p_stmutex);
941 if ((l->l_flag & LW_SYSTEM) == 0 && l->l_priority >= PUSER)
942 l->l_priority = l->l_usrpri;
943 lwp_unlock(l);
944 }
945
946 /*
947 * suspendsched:
948 *
949 * Convert all non-L_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
950 */
951 void
952 suspendsched(void)
953 {
954 #ifdef MULTIPROCESSOR
955 CPU_INFO_ITERATOR cii;
956 struct cpu_info *ci;
957 #endif
958 struct lwp *l;
959 struct proc *p;
960
961 /*
962 * We do this by process in order not to violate the locking rules.
963 */
964 mutex_enter(&proclist_mutex);
965 PROCLIST_FOREACH(p, &allproc) {
966 mutex_enter(&p->p_smutex);
967
968 if ((p->p_flag & PK_SYSTEM) != 0) {
969 mutex_exit(&p->p_smutex);
970 continue;
971 }
972
973 p->p_stat = SSTOP;
974
975 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
976 if (l == curlwp)
977 continue;
978
979 lwp_lock(l);
980
981 /*
982 * Set L_WREBOOT so that the LWP will suspend itself
983 * when it tries to return to user mode. We want to
984 * try and get to get as many LWPs as possible to
985 * the user / kernel boundary, so that they will
986 * release any locks that they hold.
987 */
988 l->l_flag |= (LW_WREBOOT | LW_WSUSPEND);
989
990 if (l->l_stat == LSSLEEP &&
991 (l->l_flag & LW_SINTR) != 0) {
992 /* setrunnable() will release the lock. */
993 setrunnable(l);
994 continue;
995 }
996
997 lwp_unlock(l);
998 }
999
1000 mutex_exit(&p->p_smutex);
1001 }
1002 mutex_exit(&proclist_mutex);
1003
1004 /*
1005 * Kick all CPUs to make them preempt any LWPs running in user mode.
1006 * They'll trap into the kernel and suspend themselves in userret().
1007 */
1008 sched_lock(0);
1009 #ifdef MULTIPROCESSOR
1010 for (CPU_INFO_FOREACH(cii, ci))
1011 cpu_need_resched(ci);
1012 #else
1013 cpu_need_resched(curcpu());
1014 #endif
1015 sched_unlock(0);
1016 }
1017
1018 /*
1019 * scheduler_fork_hook:
1020 *
1021 * Inherit the parent's scheduler history.
1022 */
1023 void
1024 scheduler_fork_hook(struct proc *parent, struct proc *child)
1025 {
1026
1027 LOCK_ASSERT(mutex_owned(&parent->p_smutex));
1028
1029 child->p_estcpu = child->p_estcpu_inherited = parent->p_estcpu;
1030 child->p_forktime = schedcpu_ticks;
1031 }
1032
1033 /*
1034 * scheduler_wait_hook:
1035 *
1036 * Chargeback parents for the sins of their children.
1037 */
1038 void
1039 scheduler_wait_hook(struct proc *parent, struct proc *child)
1040 {
1041 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
1042 fixpt_t estcpu;
1043
1044 /* XXX Only if parent != init?? */
1045
1046 mutex_spin_enter(&parent->p_stmutex);
1047 estcpu = decay_cpu_batch(loadfac, child->p_estcpu_inherited,
1048 schedcpu_ticks - child->p_forktime);
1049 if (child->p_estcpu > estcpu)
1050 parent->p_estcpu =
1051 ESTCPULIM(parent->p_estcpu + child->p_estcpu - estcpu);
1052 mutex_spin_exit(&parent->p_stmutex);
1053 }
1054
1055 /*
1056 * sched_kpri:
1057 *
1058 * Scale a priority level to a kernel priority level, usually
1059 * for an LWP that is about to sleep.
1060 */
1061 int
1062 sched_kpri(struct lwp *l)
1063 {
1064 /*
1065 * Scale user priorities (127 -> 50) up to kernel priorities
1066 * in the range (49 -> 8). Reserve the top 8 kernel priorities
1067 * for high priority kthreads. Kernel priorities passed in
1068 * are left "as is". XXX This is somewhat arbitrary.
1069 */
1070 static const uint8_t kpri_tab[] = {
1071 0, 1, 2, 3, 4, 5, 6, 7,
1072 8, 9, 10, 11, 12, 13, 14, 15,
1073 16, 17, 18, 19, 20, 21, 22, 23,
1074 24, 25, 26, 27, 28, 29, 30, 31,
1075 32, 33, 34, 35, 36, 37, 38, 39,
1076 40, 41, 42, 43, 44, 45, 46, 47,
1077 48, 49, 8, 8, 9, 9, 10, 10,
1078 11, 11, 12, 12, 13, 14, 14, 15,
1079 15, 16, 16, 17, 17, 18, 18, 19,
1080 20, 20, 21, 21, 22, 22, 23, 23,
1081 24, 24, 25, 26, 26, 27, 27, 28,
1082 28, 29, 29, 30, 30, 31, 32, 32,
1083 33, 33, 34, 34, 35, 35, 36, 36,
1084 37, 38, 38, 39, 39, 40, 40, 41,
1085 41, 42, 42, 43, 44, 44, 45, 45,
1086 46, 46, 47, 47, 48, 48, 49, 49,
1087 };
1088
1089 return kpri_tab[l->l_usrpri];
1090 }
1091
1092 /*
1093 * sched_unsleep:
1094 *
1095 * The is called when the LWP has not been awoken normally but instead
1096 * interrupted: for example, if the sleep timed out. Because of this,
1097 * it's not a valid action for running or idle LWPs.
1098 */
1099 void
1100 sched_unsleep(struct lwp *l)
1101 {
1102
1103 lwp_unlock(l);
1104 panic("sched_unsleep");
1105 }
1106
1107 /*
1108 * sched_changepri:
1109 *
1110 * Adjust the priority of an LWP.
1111 */
1112 void
1113 sched_changepri(struct lwp *l, int pri)
1114 {
1115
1116 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
1117
1118 l->l_usrpri = pri;
1119
1120 if (l->l_priority < PUSER)
1121 return;
1122 if (l->l_stat != LSRUN || (l->l_flag & LW_INMEM) == 0 ||
1123 (l->l_priority / PPQ) == (pri / PPQ)) {
1124 l->l_priority = pri;
1125 return;
1126 }
1127
1128 remrunqueue(l);
1129 l->l_priority = pri;
1130 setrunqueue(l);
1131 resched_lwp(l, pri);
1132 }
1133
1134 /*
1135 * Low-level routines to access the run queue. Optimised assembler
1136 * routines can override these.
1137 */
1138
1139 #ifndef __HAVE_MD_RUNQUEUE
1140
1141 /*
1142 * On some architectures, it's faster to use a MSB ordering for the priorites
1143 * than the traditional LSB ordering.
1144 */
1145 #ifdef __HAVE_BIGENDIAN_BITOPS
1146 #define RQMASK(n) (0x80000000 >> (n))
1147 #else
1148 #define RQMASK(n) (0x00000001 << (n))
1149 #endif
1150
1151 /*
1152 * The primitives that manipulate the run queues. whichqs tells which
1153 * of the 32 queues qs have processes in them. Setrunqueue puts processes
1154 * into queues, remrunqueue removes them from queues. The running process is
1155 * on no queue, other processes are on a queue related to p->p_priority,
1156 * divided by 4 actually to shrink the 0-127 range of priorities into the 32
1157 * available queues.
1158 */
1159 #ifdef RQDEBUG
1160 static void
1161 checkrunqueue(int whichq, struct lwp *l)
1162 {
1163 const struct prochd * const rq = &sched_qs[whichq];
1164 struct lwp *l2;
1165 int found = 0;
1166 int die = 0;
1167 int empty = 1;
1168 for (l2 = rq->ph_link; l2 != (const void*) rq; l2 = l2->l_forw) {
1169 if (l2->l_stat != LSRUN) {
1170 printf("checkrunqueue[%d]: lwp %p state (%d) "
1171 " != LSRUN\n", whichq, l2, l2->l_stat);
1172 }
1173 if (l2->l_back->l_forw != l2) {
1174 printf("checkrunqueue[%d]: lwp %p back-qptr (%p) "
1175 "corrupt %p\n", whichq, l2, l2->l_back,
1176 l2->l_back->l_forw);
1177 die = 1;
1178 }
1179 if (l2->l_forw->l_back != l2) {
1180 printf("checkrunqueue[%d]: lwp %p forw-qptr (%p) "
1181 "corrupt %p\n", whichq, l2, l2->l_forw,
1182 l2->l_forw->l_back);
1183 die = 1;
1184 }
1185 if (l2 == l)
1186 found = 1;
1187 empty = 0;
1188 }
1189 if (empty && (sched_whichqs & RQMASK(whichq)) != 0) {
1190 printf("checkrunqueue[%d]: bit set for empty run-queue %p\n",
1191 whichq, rq);
1192 die = 1;
1193 } else if (!empty && (sched_whichqs & RQMASK(whichq)) == 0) {
1194 printf("checkrunqueue[%d]: bit clear for non-empty "
1195 "run-queue %p\n", whichq, rq);
1196 die = 1;
1197 }
1198 if (l != NULL && (sched_whichqs & RQMASK(whichq)) == 0) {
1199 printf("checkrunqueue[%d]: bit clear for active lwp %p\n",
1200 whichq, l);
1201 die = 1;
1202 }
1203 if (l != NULL && empty) {
1204 printf("checkrunqueue[%d]: empty run-queue %p with "
1205 "active lwp %p\n", whichq, rq, l);
1206 die = 1;
1207 }
1208 if (l != NULL && !found) {
1209 printf("checkrunqueue[%d]: lwp %p not in runqueue %p!",
1210 whichq, l, rq);
1211 die = 1;
1212 }
1213 if (die)
1214 panic("checkrunqueue: inconsistency found");
1215 }
1216 #endif /* RQDEBUG */
1217
1218 void
1219 setrunqueue(struct lwp *l)
1220 {
1221 struct prochd *rq;
1222 struct lwp *prev;
1223 const int whichq = l->l_priority / PPQ;
1224
1225 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
1226
1227 #ifdef RQDEBUG
1228 checkrunqueue(whichq, NULL);
1229 #endif
1230 #ifdef DIAGNOSTIC
1231 if (l->l_back != NULL || l->l_stat != LSRUN)
1232 panic("setrunqueue");
1233 #endif
1234 sched_whichqs |= RQMASK(whichq);
1235 rq = &sched_qs[whichq];
1236 prev = rq->ph_rlink;
1237 l->l_forw = (struct lwp *)rq;
1238 rq->ph_rlink = l;
1239 prev->l_forw = l;
1240 l->l_back = prev;
1241 #ifdef RQDEBUG
1242 checkrunqueue(whichq, l);
1243 #endif
1244 }
1245
1246 /*
1247 * XXXSMP When LWP dispatch (cpu_switch()) is changed to use remrunqueue(),
1248 * drop of the effective priority level from kernel to user needs to be
1249 * moved here from userret(). The assignment in userret() is currently
1250 * done unlocked.
1251 */
1252 void
1253 remrunqueue(struct lwp *l)
1254 {
1255 struct lwp *prev, *next;
1256 const int whichq = l->l_priority / PPQ;
1257
1258 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
1259
1260 #ifdef RQDEBUG
1261 checkrunqueue(whichq, l);
1262 #endif
1263
1264 #if defined(DIAGNOSTIC)
1265 if (((sched_whichqs & RQMASK(whichq)) == 0) || l->l_back == NULL) {
1266 /* Shouldn't happen - interrupts disabled. */
1267 panic("remrunqueue: bit %d not set", whichq);
1268 }
1269 #endif
1270 prev = l->l_back;
1271 l->l_back = NULL;
1272 next = l->l_forw;
1273 prev->l_forw = next;
1274 next->l_back = prev;
1275 if (prev == next)
1276 sched_whichqs &= ~RQMASK(whichq);
1277 #ifdef RQDEBUG
1278 checkrunqueue(whichq, NULL);
1279 #endif
1280 }
1281
1282 #undef RQMASK
1283 #endif /* !defined(__HAVE_MD_RUNQUEUE) */
1284