kern_synch.c revision 1.175 1 /* $NetBSD: kern_synch.c,v 1.175 2007/02/10 02:55:18 christos Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*-
41 * Copyright (c) 1982, 1986, 1990, 1991, 1993
42 * The Regents of the University of California. All rights reserved.
43 * (c) UNIX System Laboratories, Inc.
44 * All or some portions of this file are derived from material licensed
45 * to the University of California by American Telephone and Telegraph
46 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
47 * the permission of UNIX System Laboratories, Inc.
48 *
49 * Redistribution and use in source and binary forms, with or without
50 * modification, are permitted provided that the following conditions
51 * are met:
52 * 1. Redistributions of source code must retain the above copyright
53 * notice, this list of conditions and the following disclaimer.
54 * 2. Redistributions in binary form must reproduce the above copyright
55 * notice, this list of conditions and the following disclaimer in the
56 * documentation and/or other materials provided with the distribution.
57 * 3. Neither the name of the University nor the names of its contributors
58 * may be used to endorse or promote products derived from this software
59 * without specific prior written permission.
60 *
61 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
62 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
63 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
64 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
65 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
66 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
67 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
68 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
69 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
70 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
71 * SUCH DAMAGE.
72 *
73 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
74 */
75
76 #include <sys/cdefs.h>
77 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.175 2007/02/10 02:55:18 christos Exp $");
78
79 #include "opt_ddb.h"
80 #include "opt_kstack.h"
81 #include "opt_lockdebug.h"
82 #include "opt_multiprocessor.h"
83 #include "opt_perfctrs.h"
84
85 #define __MUTEX_PRIVATE
86
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/callout.h>
90 #include <sys/proc.h>
91 #include <sys/kernel.h>
92 #include <sys/buf.h>
93 #if defined(PERFCTRS)
94 #include <sys/pmc.h>
95 #endif
96 #include <sys/signalvar.h>
97 #include <sys/resourcevar.h>
98 #include <sys/sched.h>
99 #include <sys/kauth.h>
100 #include <sys/sleepq.h>
101 #include <sys/lockdebug.h>
102
103 #include <uvm/uvm_extern.h>
104
105 #include <machine/cpu.h>
106
107 int lbolt; /* once a second sleep address */
108 int rrticks; /* number of hardclock ticks per roundrobin() */
109
110 /*
111 * The global scheduler state.
112 */
113 kmutex_t sched_mutex; /* global sched state mutex */
114 struct prochd sched_qs[RUNQUE_NQS]; /* run queues */
115 volatile uint32_t sched_whichqs; /* bitmap of non-empty queues */
116
117 void schedcpu(void *);
118 void updatepri(struct lwp *);
119 void sa_awaken(struct lwp *);
120
121 void sched_unsleep(struct lwp *);
122 void sched_changepri(struct lwp *, int);
123
124 struct callout schedcpu_ch = CALLOUT_INITIALIZER_SETFUNC(schedcpu, NULL);
125 static unsigned int schedcpu_ticks;
126
127 syncobj_t sleep_syncobj = {
128 SOBJ_SLEEPQ_SORTED,
129 sleepq_unsleep,
130 sleepq_changepri
131 };
132
133 syncobj_t sched_syncobj = {
134 SOBJ_SLEEPQ_SORTED,
135 sched_unsleep,
136 sched_changepri
137 };
138
139 /*
140 * Force switch among equal priority processes every 100ms.
141 * Called from hardclock every hz/10 == rrticks hardclock ticks.
142 */
143 /* ARGSUSED */
144 void
145 roundrobin(struct cpu_info *ci)
146 {
147 struct schedstate_percpu *spc = &ci->ci_schedstate;
148
149 spc->spc_rrticks = rrticks;
150
151 if (curlwp != NULL) {
152 if (spc->spc_flags & SPCF_SEENRR) {
153 /*
154 * The process has already been through a roundrobin
155 * without switching and may be hogging the CPU.
156 * Indicate that the process should yield.
157 */
158 spc->spc_flags |= SPCF_SHOULDYIELD;
159 } else
160 spc->spc_flags |= SPCF_SEENRR;
161 }
162 cpu_need_resched(curcpu());
163 }
164
165 #define PPQ (128 / RUNQUE_NQS) /* priorities per queue */
166 #define NICE_WEIGHT 2 /* priorities per nice level */
167
168 #define ESTCPU_SHIFT 11
169 #define ESTCPU_MAX ((NICE_WEIGHT * PRIO_MAX - PPQ) << ESTCPU_SHIFT)
170 #define ESTCPULIM(e) min((e), ESTCPU_MAX)
171
172 /*
173 * Constants for digital decay and forget:
174 * 90% of (p_estcpu) usage in 5 * loadav time
175 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive)
176 * Note that, as ps(1) mentions, this can let percentages
177 * total over 100% (I've seen 137.9% for 3 processes).
178 *
179 * Note that hardclock updates p_estcpu and p_cpticks independently.
180 *
181 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
182 * That is, the system wants to compute a value of decay such
183 * that the following for loop:
184 * for (i = 0; i < (5 * loadavg); i++)
185 * p_estcpu *= decay;
186 * will compute
187 * p_estcpu *= 0.1;
188 * for all values of loadavg:
189 *
190 * Mathematically this loop can be expressed by saying:
191 * decay ** (5 * loadavg) ~= .1
192 *
193 * The system computes decay as:
194 * decay = (2 * loadavg) / (2 * loadavg + 1)
195 *
196 * We wish to prove that the system's computation of decay
197 * will always fulfill the equation:
198 * decay ** (5 * loadavg) ~= .1
199 *
200 * If we compute b as:
201 * b = 2 * loadavg
202 * then
203 * decay = b / (b + 1)
204 *
205 * We now need to prove two things:
206 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
207 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
208 *
209 * Facts:
210 * For x close to zero, exp(x) =~ 1 + x, since
211 * exp(x) = 0! + x**1/1! + x**2/2! + ... .
212 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
213 * For x close to zero, ln(1+x) =~ x, since
214 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1
215 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
216 * ln(.1) =~ -2.30
217 *
218 * Proof of (1):
219 * Solve (factor)**(power) =~ .1 given power (5*loadav):
220 * solving for factor,
221 * ln(factor) =~ (-2.30/5*loadav), or
222 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
223 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED
224 *
225 * Proof of (2):
226 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
227 * solving for power,
228 * power*ln(b/(b+1)) =~ -2.30, or
229 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED
230 *
231 * Actual power values for the implemented algorithm are as follows:
232 * loadav: 1 2 3 4
233 * power: 5.68 10.32 14.94 19.55
234 */
235
236 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
237 #define loadfactor(loadav) (2 * (loadav))
238
239 static fixpt_t
240 decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
241 {
242
243 if (estcpu == 0) {
244 return 0;
245 }
246
247 #if !defined(_LP64)
248 /* avoid 64bit arithmetics. */
249 #define FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
250 if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
251 return estcpu * loadfac / (loadfac + FSCALE);
252 }
253 #endif /* !defined(_LP64) */
254
255 return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
256 }
257
258 /*
259 * For all load averages >= 1 and max p_estcpu of (255 << ESTCPU_SHIFT),
260 * sleeping for at least seven times the loadfactor will decay p_estcpu to
261 * less than (1 << ESTCPU_SHIFT).
262 *
263 * note that our ESTCPU_MAX is actually much smaller than (255 << ESTCPU_SHIFT).
264 */
265 static fixpt_t
266 decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n)
267 {
268
269 if ((n << FSHIFT) >= 7 * loadfac) {
270 return 0;
271 }
272
273 while (estcpu != 0 && n > 1) {
274 estcpu = decay_cpu(loadfac, estcpu);
275 n--;
276 }
277
278 return estcpu;
279 }
280
281 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
282 fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
283
284 /*
285 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
286 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
287 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
288 *
289 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
290 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
291 *
292 * If you dont want to bother with the faster/more-accurate formula, you
293 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
294 * (more general) method of calculating the %age of CPU used by a process.
295 */
296 #define CCPU_SHIFT 11
297
298 /*
299 * schedcpu:
300 *
301 * Recompute process priorities, every hz ticks.
302 *
303 * XXXSMP This needs to be reorganised in order to reduce the locking
304 * burden.
305 */
306 /* ARGSUSED */
307 void
308 schedcpu(void *arg)
309 {
310 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
311 struct rlimit *rlim;
312 struct lwp *l;
313 struct proc *p;
314 int minslp, clkhz, sig;
315 long runtm;
316
317 schedcpu_ticks++;
318
319 mutex_enter(&proclist_mutex);
320 PROCLIST_FOREACH(p, &allproc) {
321 /*
322 * Increment time in/out of memory and sleep time (if
323 * sleeping). We ignore overflow; with 16-bit int's
324 * (remember them?) overflow takes 45 days.
325 */
326 minslp = 2;
327 mutex_enter(&p->p_smutex);
328 runtm = p->p_rtime.tv_sec;
329 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
330 lwp_lock(l);
331 runtm += l->l_rtime.tv_sec;
332 l->l_swtime++;
333 if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
334 l->l_stat == LSSUSPENDED) {
335 l->l_slptime++;
336 minslp = min(minslp, l->l_slptime);
337 } else
338 minslp = 0;
339 lwp_unlock(l);
340 }
341 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
342
343 /*
344 * Check if the process exceeds its CPU resource allocation.
345 * If over max, kill it.
346 */
347 rlim = &p->p_rlimit[RLIMIT_CPU];
348 sig = 0;
349 if (runtm >= rlim->rlim_cur) {
350 if (runtm >= rlim->rlim_max)
351 sig = SIGKILL;
352 else {
353 sig = SIGXCPU;
354 if (rlim->rlim_cur < rlim->rlim_max)
355 rlim->rlim_cur += 5;
356 }
357 }
358
359 /*
360 * If the process has run for more than autonicetime, reduce
361 * priority to give others a chance.
362 */
363 if (autonicetime && runtm > autonicetime && p->p_nice == NZERO
364 && kauth_cred_geteuid(p->p_cred)) {
365 mutex_spin_enter(&p->p_stmutex);
366 p->p_nice = autoniceval + NZERO;
367 resetprocpriority(p);
368 mutex_spin_exit(&p->p_stmutex);
369 }
370
371 /*
372 * If the process has slept the entire second,
373 * stop recalculating its priority until it wakes up.
374 */
375 if (minslp <= 1) {
376 /*
377 * p_pctcpu is only for ps.
378 */
379 mutex_spin_enter(&p->p_stmutex);
380 clkhz = stathz != 0 ? stathz : hz;
381 #if (FSHIFT >= CCPU_SHIFT)
382 p->p_pctcpu += (clkhz == 100)?
383 ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
384 100 * (((fixpt_t) p->p_cpticks)
385 << (FSHIFT - CCPU_SHIFT)) / clkhz;
386 #else
387 p->p_pctcpu += ((FSCALE - ccpu) *
388 (p->p_cpticks * FSCALE / clkhz)) >> FSHIFT;
389 #endif
390 p->p_cpticks = 0;
391 p->p_estcpu = decay_cpu(loadfac, p->p_estcpu);
392
393 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
394 lwp_lock(l);
395 if (l->l_slptime <= 1 &&
396 l->l_priority >= PUSER)
397 resetpriority(l);
398 lwp_unlock(l);
399 }
400 mutex_spin_exit(&p->p_stmutex);
401 }
402
403 mutex_exit(&p->p_smutex);
404 if (sig) {
405 psignal(p, sig);
406 }
407 }
408 mutex_exit(&proclist_mutex);
409 uvm_meter();
410 wakeup((caddr_t)&lbolt);
411 callout_schedule(&schedcpu_ch, hz);
412 }
413
414 /*
415 * Recalculate the priority of a process after it has slept for a while.
416 */
417 void
418 updatepri(struct lwp *l)
419 {
420 struct proc *p = l->l_proc;
421 fixpt_t loadfac;
422
423 LOCK_ASSERT(lwp_locked(l, NULL));
424 KASSERT(l->l_slptime > 1);
425
426 loadfac = loadfactor(averunnable.ldavg[0]);
427
428 l->l_slptime--; /* the first time was done in schedcpu */
429 /* XXX NJWLWP */
430 /* XXXSMP occasionally unlocked, should be per-LWP */
431 p->p_estcpu = decay_cpu_batch(loadfac, p->p_estcpu, l->l_slptime);
432 resetpriority(l);
433 }
434
435 /*
436 * During autoconfiguration or after a panic, a sleep will simply lower the
437 * priority briefly to allow interrupts, then return. The priority to be
438 * used (safepri) is machine-dependent, thus this value is initialized and
439 * maintained in the machine-dependent layers. This priority will typically
440 * be 0, or the lowest priority that is safe for use on the interrupt stack;
441 * it can be made higher to block network software interrupts after panics.
442 */
443 int safepri;
444
445 /*
446 * OBSOLETE INTERFACE
447 *
448 * General sleep call. Suspends the current process until a wakeup is
449 * performed on the specified identifier. The process will then be made
450 * runnable with the specified priority. Sleeps at most timo/hz seconds (0
451 * means no timeout). If pri includes PCATCH flag, signals are checked
452 * before and after sleeping, else signals are not checked. Returns 0 if
453 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
454 * signal needs to be delivered, ERESTART is returned if the current system
455 * call should be restarted if possible, and EINTR is returned if the system
456 * call should be interrupted by the signal (return EINTR).
457 *
458 * The interlock is held until we are on a sleep queue. The interlock will
459 * be locked before returning back to the caller unless the PNORELOCK flag
460 * is specified, in which case the interlock will always be unlocked upon
461 * return.
462 */
463 int
464 ltsleep(wchan_t ident, int priority, const char *wmesg, int timo,
465 volatile struct simplelock *interlock)
466 {
467 struct lwp *l = curlwp;
468 sleepq_t *sq;
469 int error, catch;
470
471 if (sleepq_dontsleep(l)) {
472 (void)sleepq_abort(NULL, 0);
473 if ((priority & PNORELOCK) != 0)
474 simple_unlock(interlock);
475 return 0;
476 }
477
478 sq = sleeptab_lookup(&sleeptab, ident);
479 sleepq_enter(sq, l);
480
481 if (interlock != NULL) {
482 LOCK_ASSERT(simple_lock_held(interlock));
483 simple_unlock(interlock);
484 }
485
486 catch = priority & PCATCH;
487 sleepq_block(sq, priority & PRIMASK, ident, wmesg, timo, catch,
488 &sleep_syncobj);
489 error = sleepq_unblock(timo, catch);
490
491 if (interlock != NULL && (priority & PNORELOCK) == 0)
492 simple_lock(interlock);
493
494 return error;
495 }
496
497 /*
498 * General sleep call for situations where a wake-up is not expected.
499 */
500 int
501 kpause(const char *wmesg, boolean_t intr, int timo, kmutex_t *mtx)
502 {
503 struct lwp *l = curlwp;
504 sleepq_t *sq;
505 int error;
506
507 if (sleepq_dontsleep(l))
508 return sleepq_abort(NULL, 0);
509
510 if (mtx != NULL)
511 mutex_exit(mtx);
512 sq = sleeptab_lookup(&sleeptab, l);
513 sleepq_enter(sq, l);
514 sleepq_block(sq, sched_kpri(l), l, wmesg, timo, intr, &sleep_syncobj);
515 error = sleepq_unblock(timo, intr);
516 if (mtx != NULL)
517 mutex_enter(mtx);
518
519 return error;
520 }
521
522 /*
523 * OBSOLETE INTERFACE
524 *
525 * Make all processes sleeping on the specified identifier runnable.
526 */
527 void
528 wakeup(wchan_t ident)
529 {
530 sleepq_t *sq;
531
532 if (cold)
533 return;
534
535 sq = sleeptab_lookup(&sleeptab, ident);
536 sleepq_wake(sq, ident, (u_int)-1);
537 }
538
539 /*
540 * OBSOLETE INTERFACE
541 *
542 * Make the highest priority process first in line on the specified
543 * identifier runnable.
544 */
545 void
546 wakeup_one(wchan_t ident)
547 {
548 sleepq_t *sq;
549
550 if (cold)
551 return;
552
553 sq = sleeptab_lookup(&sleeptab, ident);
554 sleepq_wake(sq, ident, 1);
555 }
556
557
558 /*
559 * General yield call. Puts the current process back on its run queue and
560 * performs a voluntary context switch. Should only be called when the
561 * current process explicitly requests it (eg sched_yield(2) in compat code).
562 */
563 void
564 yield(void)
565 {
566 struct lwp *l = curlwp;
567
568 KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
569 lwp_lock(l);
570 if (l->l_stat == LSONPROC) {
571 KASSERT(lwp_locked(l, &sched_mutex));
572 l->l_priority = l->l_usrpri;
573 }
574 l->l_nvcsw++;
575 mi_switch(l, NULL);
576 KERNEL_LOCK(l->l_biglocks, l);
577 }
578
579 /*
580 * General preemption call. Puts the current process back on its run queue
581 * and performs an involuntary context switch.
582 */
583 void
584 preempt(void)
585 {
586 struct lwp *l = curlwp;
587
588 KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
589 lwp_lock(l);
590 if (l->l_stat == LSONPROC) {
591 KASSERT(lwp_locked(l, &sched_mutex));
592 l->l_priority = l->l_usrpri;
593 }
594 l->l_nivcsw++;
595 (void)mi_switch(l, NULL);
596 KERNEL_LOCK(l->l_biglocks, l);
597 }
598
599 /*
600 * The machine independent parts of context switch. Switch to "new"
601 * if non-NULL, otherwise let cpu_switch choose the next lwp.
602 *
603 * Returns 1 if another process was actually run.
604 */
605 int
606 mi_switch(struct lwp *l, struct lwp *newl)
607 {
608 struct schedstate_percpu *spc;
609 struct timeval tv;
610 int retval, oldspl;
611 long s, u;
612
613 LOCK_ASSERT(lwp_locked(l, NULL));
614
615 #ifdef LOCKDEBUG
616 spinlock_switchcheck();
617 simple_lock_switchcheck();
618 #endif
619 #ifdef KSTACK_CHECK_MAGIC
620 kstack_check_magic(l);
621 #endif
622
623 /*
624 * It's safe to read the per CPU schedstate unlocked here, as all we
625 * are after is the run time and that's guarenteed to have been last
626 * updated by this CPU.
627 */
628 KDASSERT(l->l_cpu == curcpu());
629 spc = &l->l_cpu->ci_schedstate;
630
631 /*
632 * Compute the amount of time during which the current
633 * process was running.
634 */
635 microtime(&tv);
636 u = l->l_rtime.tv_usec +
637 (tv.tv_usec - spc->spc_runtime.tv_usec);
638 s = l->l_rtime.tv_sec + (tv.tv_sec - spc->spc_runtime.tv_sec);
639 if (u < 0) {
640 u += 1000000;
641 s--;
642 } else if (u >= 1000000) {
643 u -= 1000000;
644 s++;
645 }
646 l->l_rtime.tv_usec = u;
647 l->l_rtime.tv_sec = s;
648
649 /*
650 * XXXSMP If we are using h/w performance counters, save context.
651 */
652 #if PERFCTRS
653 if (PMC_ENABLED(l->l_proc)) {
654 pmc_save_context(l->l_proc);
655 }
656 #endif
657
658 /*
659 * Acquire the sched_mutex if necessary. It will be released by
660 * cpu_switch once it has decided to idle, or picked another LWP
661 * to run.
662 */
663 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
664 if (l->l_mutex != &sched_mutex) {
665 mutex_spin_enter(&sched_mutex);
666 lwp_unlock(l);
667 }
668 #endif
669
670 /*
671 * If on the CPU and we have gotten this far, then we must yield.
672 */
673 KASSERT(l->l_stat != LSRUN);
674 if (l->l_stat == LSONPROC) {
675 KASSERT(lwp_locked(l, &sched_mutex));
676 l->l_stat = LSRUN;
677 setrunqueue(l);
678 }
679 uvmexp.swtch++;
680
681 /*
682 * Process is about to yield the CPU; clear the appropriate
683 * scheduling flags.
684 */
685 spc->spc_flags &= ~SPCF_SWITCHCLEAR;
686
687 LOCKDEBUG_BARRIER(&sched_mutex, 1);
688
689 /*
690 * Switch to the new current LWP. When we run again, we'll
691 * return back here.
692 */
693 oldspl = MUTEX_SPIN_OLDSPL(l->l_cpu);
694
695 if (newl == NULL || newl->l_back == NULL)
696 retval = cpu_switch(l, NULL);
697 else {
698 KASSERT(lwp_locked(newl, &sched_mutex));
699 remrunqueue(newl);
700 cpu_switchto(l, newl);
701 retval = 0;
702 }
703
704 /*
705 * XXXSMP If we are using h/w performance counters, restore context.
706 */
707 #if PERFCTRS
708 if (PMC_ENABLED(l->l_proc)) {
709 pmc_restore_context(l->l_proc);
710 }
711 #endif
712
713 /*
714 * We're running again; record our new start time. We might
715 * be running on a new CPU now, so don't use the cached
716 * schedstate_percpu pointer.
717 */
718 KDASSERT(l->l_cpu == curcpu());
719 microtime(&l->l_cpu->ci_schedstate.spc_runtime);
720 splx(oldspl);
721
722 return retval;
723 }
724
725 /*
726 * Initialize the (doubly-linked) run queues
727 * to be empty.
728 */
729 void
730 rqinit()
731 {
732 int i;
733
734 for (i = 0; i < RUNQUE_NQS; i++)
735 sched_qs[i].ph_link = sched_qs[i].ph_rlink =
736 (struct lwp *)&sched_qs[i];
737
738 mutex_init(&sched_mutex, MUTEX_SPIN, IPL_SCHED);
739 }
740
741 static inline void
742 resched_lwp(struct lwp *l, u_char pri)
743 {
744 struct cpu_info *ci;
745
746 /*
747 * XXXSMP
748 * Since l->l_cpu persists across a context switch,
749 * this gives us *very weak* processor affinity, in
750 * that we notify the CPU on which the process last
751 * ran that it should try to switch.
752 *
753 * This does not guarantee that the process will run on
754 * that processor next, because another processor might
755 * grab it the next time it performs a context switch.
756 *
757 * This also does not handle the case where its last
758 * CPU is running a higher-priority process, but every
759 * other CPU is running a lower-priority process. There
760 * are ways to handle this situation, but they're not
761 * currently very pretty, and we also need to weigh the
762 * cost of moving a process from one CPU to another.
763 *
764 * XXXSMP
765 * There is also the issue of locking the other CPU's
766 * sched state, which we currently do not do.
767 */
768 ci = (l->l_cpu != NULL) ? l->l_cpu : curcpu();
769 if (pri < ci->ci_schedstate.spc_curpriority)
770 cpu_need_resched(ci);
771 }
772
773 /*
774 * Change process state to be runnable, placing it on the run queue if it is
775 * in memory, and awakening the swapper if it isn't in memory.
776 *
777 * Call with the process and LWP locked. Will return with the LWP unlocked.
778 */
779 void
780 setrunnable(struct lwp *l)
781 {
782 struct proc *p = l->l_proc;
783 sigset_t *ss;
784
785 LOCK_ASSERT(mutex_owned(&p->p_smutex));
786 LOCK_ASSERT(lwp_locked(l, NULL));
787
788 switch (l->l_stat) {
789 case LSSTOP:
790 /*
791 * If we're being traced (possibly because someone attached us
792 * while we were stopped), check for a signal from the debugger.
793 */
794 if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0) {
795 if ((sigprop[p->p_xstat] & SA_TOLWP) != 0)
796 ss = &l->l_sigpend.sp_set;
797 else
798 ss = &p->p_sigpend.sp_set;
799 sigaddset(ss, p->p_xstat);
800 signotify(l);
801 }
802 p->p_nrlwps++;
803 break;
804 case LSSUSPENDED:
805 l->l_flag &= ~L_WSUSPEND;
806 p->p_nrlwps++;
807 break;
808 case LSSLEEP:
809 KASSERT(l->l_wchan != NULL);
810 break;
811 default:
812 panic("setrunnable: lwp %p state was %d", l, l->l_stat);
813 }
814
815 /*
816 * If the LWP was sleeping interruptably, then it's OK to start it
817 * again. If not, mark it as still sleeping.
818 */
819 if (l->l_wchan != NULL) {
820 l->l_stat = LSSLEEP;
821 if ((l->l_flag & L_SINTR) != 0)
822 lwp_unsleep(l);
823 else {
824 lwp_unlock(l);
825 #ifdef DIAGNOSTIC
826 panic("setrunnable: !L_SINTR");
827 #endif
828 }
829 return;
830 }
831
832 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
833
834 /*
835 * If the LWP is still on the CPU, mark it as LSONPROC. It may be
836 * about to call mi_switch(), in which case it will yield.
837 *
838 * XXXSMP Will need to change for preemption.
839 */
840 #ifdef MULTIPROCESSOR
841 if (l->l_cpu->ci_curlwp == l) {
842 #else
843 if (l == curlwp) {
844 #endif
845 l->l_stat = LSONPROC;
846 l->l_slptime = 0;
847 lwp_unlock(l);
848 return;
849 }
850
851 /*
852 * Set the LWP runnable. If it's swapped out, we need to wake the swapper
853 * to bring it back in. Otherwise, enter it into a run queue.
854 */
855 if (l->l_slptime > 1)
856 updatepri(l);
857 l->l_stat = LSRUN;
858 l->l_slptime = 0;
859
860 if (l->l_flag & L_INMEM) {
861 setrunqueue(l);
862 resched_lwp(l, l->l_priority);
863 lwp_unlock(l);
864 } else {
865 lwp_unlock(l);
866 wakeup(&proc0);
867 }
868 }
869
870 /*
871 * Compute the priority of a process when running in user mode.
872 * Arrange to reschedule if the resulting priority is better
873 * than that of the current process.
874 */
875 void
876 resetpriority(struct lwp *l)
877 {
878 unsigned int newpriority;
879 struct proc *p = l->l_proc;
880
881 /* XXXSMP LOCK_ASSERT(mutex_owned(&p->p_stmutex)); */
882 LOCK_ASSERT(lwp_locked(l, NULL));
883
884 if ((l->l_flag & L_SYSTEM) != 0)
885 return;
886
887 newpriority = PUSER + (p->p_estcpu >> ESTCPU_SHIFT) +
888 NICE_WEIGHT * (p->p_nice - NZERO);
889 newpriority = min(newpriority, MAXPRI);
890 lwp_changepri(l, newpriority);
891 }
892
893 /*
894 * Recompute priority for all LWPs in a process.
895 */
896 void
897 resetprocpriority(struct proc *p)
898 {
899 struct lwp *l;
900
901 LOCK_ASSERT(mutex_owned(&p->p_stmutex));
902
903 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
904 lwp_lock(l);
905 resetpriority(l);
906 lwp_unlock(l);
907 }
908 }
909
910 /*
911 * We adjust the priority of the current process. The priority of a process
912 * gets worse as it accumulates CPU time. The CPU usage estimator (p_estcpu)
913 * is increased here. The formula for computing priorities (in kern_synch.c)
914 * will compute a different value each time p_estcpu increases. This can
915 * cause a switch, but unless the priority crosses a PPQ boundary the actual
916 * queue will not change. The CPU usage estimator ramps up quite quickly
917 * when the process is running (linearly), and decays away exponentially, at
918 * a rate which is proportionally slower when the system is busy. The basic
919 * principle is that the system will 90% forget that the process used a lot
920 * of CPU time in 5 * loadav seconds. This causes the system to favor
921 * processes which haven't run much recently, and to round-robin among other
922 * processes.
923 */
924
925 void
926 schedclock(struct lwp *l)
927 {
928 struct proc *p = l->l_proc;
929
930 mutex_spin_enter(&p->p_stmutex);
931 p->p_estcpu = ESTCPULIM(p->p_estcpu + (1 << ESTCPU_SHIFT));
932 lwp_lock(l);
933 resetpriority(l);
934 mutex_spin_exit(&p->p_stmutex);
935 if ((l->l_flag & L_SYSTEM) == 0 && l->l_priority >= PUSER)
936 l->l_priority = l->l_usrpri;
937 lwp_unlock(l);
938 }
939
940 /*
941 * suspendsched:
942 *
943 * Convert all non-L_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
944 */
945 void
946 suspendsched(void)
947 {
948 #ifdef MULTIPROCESSOR
949 CPU_INFO_ITERATOR cii;
950 struct cpu_info *ci;
951 #endif
952 struct lwp *l;
953 struct proc *p;
954
955 /*
956 * We do this by process in order not to violate the locking rules.
957 */
958 mutex_enter(&proclist_mutex);
959 PROCLIST_FOREACH(p, &allproc) {
960 mutex_enter(&p->p_smutex);
961
962 if ((p->p_flag & P_SYSTEM) != 0) {
963 mutex_exit(&p->p_smutex);
964 continue;
965 }
966
967 p->p_stat = SSTOP;
968
969 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
970 if (l == curlwp)
971 continue;
972
973 lwp_lock(l);
974
975 /*
976 * Set L_WREBOOT so that the LWP will suspend itself
977 * when it tries to return to user mode. We want to
978 * try and get to get as many LWPs as possible to
979 * the user / kernel boundary, so that they will
980 * release any locks that they hold.
981 */
982 l->l_flag |= (L_WREBOOT | L_WSUSPEND);
983
984 if (l->l_stat == LSSLEEP &&
985 (l->l_flag & L_SINTR) != 0) {
986 /* setrunnable() will release the lock. */
987 setrunnable(l);
988 continue;
989 }
990
991 lwp_unlock(l);
992 }
993
994 mutex_exit(&p->p_smutex);
995 }
996 mutex_exit(&proclist_mutex);
997
998 /*
999 * Kick all CPUs to make them preempt any LWPs running in user mode.
1000 * They'll trap into the kernel and suspend themselves in userret().
1001 */
1002 sched_lock(0);
1003 #ifdef MULTIPROCESSOR
1004 for (CPU_INFO_FOREACH(cii, ci))
1005 cpu_need_resched(ci);
1006 #else
1007 cpu_need_resched(curcpu());
1008 #endif
1009 sched_unlock(0);
1010 }
1011
1012 /*
1013 * scheduler_fork_hook:
1014 *
1015 * Inherit the parent's scheduler history.
1016 */
1017 void
1018 scheduler_fork_hook(struct proc *parent, struct proc *child)
1019 {
1020
1021 LOCK_ASSERT(mutex_owned(&parent->p_smutex));
1022
1023 child->p_estcpu = child->p_estcpu_inherited = parent->p_estcpu;
1024 child->p_forktime = schedcpu_ticks;
1025 }
1026
1027 /*
1028 * scheduler_wait_hook:
1029 *
1030 * Chargeback parents for the sins of their children.
1031 */
1032 void
1033 scheduler_wait_hook(struct proc *parent, struct proc *child)
1034 {
1035 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
1036 fixpt_t estcpu;
1037
1038 /* XXX Only if parent != init?? */
1039
1040 mutex_spin_enter(&parent->p_stmutex);
1041 estcpu = decay_cpu_batch(loadfac, child->p_estcpu_inherited,
1042 schedcpu_ticks - child->p_forktime);
1043 if (child->p_estcpu > estcpu)
1044 parent->p_estcpu =
1045 ESTCPULIM(parent->p_estcpu + child->p_estcpu - estcpu);
1046 mutex_spin_exit(&parent->p_stmutex);
1047 }
1048
1049 /*
1050 * sched_kpri:
1051 *
1052 * Scale a priority level to a kernel priority level, usually
1053 * for an LWP that is about to sleep.
1054 */
1055 int
1056 sched_kpri(struct lwp *l)
1057 {
1058 /*
1059 * Scale user priorities (127 -> 50) up to kernel priorities
1060 * in the range (49 -> 8). Reserve the top 8 kernel priorities
1061 * for high priority kthreads. Kernel priorities passed in
1062 * are left "as is". XXX This is somewhat arbitrary.
1063 */
1064 static const uint8_t kpri_tab[] = {
1065 0, 1, 2, 3, 4, 5, 6, 7,
1066 8, 9, 10, 11, 12, 13, 14, 15,
1067 16, 17, 18, 19, 20, 21, 22, 23,
1068 24, 25, 26, 27, 28, 29, 30, 31,
1069 32, 33, 34, 35, 36, 37, 38, 39,
1070 40, 41, 42, 43, 44, 45, 46, 47,
1071 48, 49, 8, 8, 9, 9, 10, 10,
1072 11, 11, 12, 12, 13, 14, 14, 15,
1073 15, 16, 16, 17, 17, 18, 18, 19,
1074 20, 20, 21, 21, 22, 22, 23, 23,
1075 24, 24, 25, 26, 26, 27, 27, 28,
1076 28, 29, 29, 30, 30, 31, 32, 32,
1077 33, 33, 34, 34, 35, 35, 36, 36,
1078 37, 38, 38, 39, 39, 40, 40, 41,
1079 41, 42, 42, 43, 44, 44, 45, 45,
1080 46, 46, 47, 47, 48, 48, 49, 49,
1081 };
1082
1083 return kpri_tab[l->l_usrpri];
1084 }
1085
1086 /*
1087 * sched_unsleep:
1088 *
1089 * The is called when the LWP has not been awoken normally but instead
1090 * interrupted: for example, if the sleep timed out. Because of this,
1091 * it's not a valid action for running or idle LWPs.
1092 */
1093 void
1094 sched_unsleep(struct lwp *l)
1095 {
1096
1097 lwp_unlock(l);
1098 panic("sched_unsleep");
1099 }
1100
1101 /*
1102 * sched_changepri:
1103 *
1104 * Adjust the priority of an LWP.
1105 */
1106 void
1107 sched_changepri(struct lwp *l, int pri)
1108 {
1109
1110 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
1111
1112 l->l_usrpri = pri;
1113
1114 if (l->l_priority < PUSER)
1115 return;
1116 if (l->l_stat != LSRUN || (l->l_flag & L_INMEM) == 0 ||
1117 (l->l_priority / PPQ) == (pri / PPQ)) {
1118 l->l_priority = pri;
1119 return;
1120 }
1121
1122 remrunqueue(l);
1123 l->l_priority = pri;
1124 setrunqueue(l);
1125 resched_lwp(l, pri);
1126 }
1127
1128 /*
1129 * Low-level routines to access the run queue. Optimised assembler
1130 * routines can override these.
1131 */
1132
1133 #ifndef __HAVE_MD_RUNQUEUE
1134
1135 /*
1136 * On some architectures, it's faster to use a MSB ordering for the priorites
1137 * than the traditional LSB ordering.
1138 */
1139 #ifdef __HAVE_BIGENDIAN_BITOPS
1140 #define RQMASK(n) (0x80000000 >> (n))
1141 #else
1142 #define RQMASK(n) (0x00000001 << (n))
1143 #endif
1144
1145 /*
1146 * The primitives that manipulate the run queues. whichqs tells which
1147 * of the 32 queues qs have processes in them. Setrunqueue puts processes
1148 * into queues, remrunqueue removes them from queues. The running process is
1149 * on no queue, other processes are on a queue related to p->p_priority,
1150 * divided by 4 actually to shrink the 0-127 range of priorities into the 32
1151 * available queues.
1152 */
1153 #ifdef RQDEBUG
1154 static void
1155 checkrunqueue(int whichq, struct lwp *l)
1156 {
1157 const struct prochd * const rq = &sched_qs[whichq];
1158 struct lwp *l2;
1159 int found = 0;
1160 int die = 0;
1161 int empty = 1;
1162 for (l2 = rq->ph_link; l2 != (const void*) rq; l2 = l2->l_forw) {
1163 if (l2->l_stat != LSRUN) {
1164 printf("checkrunqueue[%d]: lwp %p state (%d) "
1165 " != LSRUN\n", whichq, l2, l2->l_stat);
1166 }
1167 if (l2->l_back->l_forw != l2) {
1168 printf("checkrunqueue[%d]: lwp %p back-qptr (%p) "
1169 "corrupt %p\n", whichq, l2, l2->l_back,
1170 l2->l_back->l_forw);
1171 die = 1;
1172 }
1173 if (l2->l_forw->l_back != l2) {
1174 printf("checkrunqueue[%d]: lwp %p forw-qptr (%p) "
1175 "corrupt %p\n", whichq, l2, l2->l_forw,
1176 l2->l_forw->l_back);
1177 die = 1;
1178 }
1179 if (l2 == l)
1180 found = 1;
1181 empty = 0;
1182 }
1183 if (empty && (sched_whichqs & RQMASK(whichq)) != 0) {
1184 printf("checkrunqueue[%d]: bit set for empty run-queue %p\n",
1185 whichq, rq);
1186 die = 1;
1187 } else if (!empty && (sched_whichqs & RQMASK(whichq)) == 0) {
1188 printf("checkrunqueue[%d]: bit clear for non-empty "
1189 "run-queue %p\n", whichq, rq);
1190 die = 1;
1191 }
1192 if (l != NULL && (sched_whichqs & RQMASK(whichq)) == 0) {
1193 printf("checkrunqueue[%d]: bit clear for active lwp %p\n",
1194 whichq, l);
1195 die = 1;
1196 }
1197 if (l != NULL && empty) {
1198 printf("checkrunqueue[%d]: empty run-queue %p with "
1199 "active lwp %p\n", whichq, rq, l);
1200 die = 1;
1201 }
1202 if (l != NULL && !found) {
1203 printf("checkrunqueue[%d]: lwp %p not in runqueue %p!",
1204 whichq, l, rq);
1205 die = 1;
1206 }
1207 if (die)
1208 panic("checkrunqueue: inconsistency found");
1209 }
1210 #endif /* RQDEBUG */
1211
1212 void
1213 setrunqueue(struct lwp *l)
1214 {
1215 struct prochd *rq;
1216 struct lwp *prev;
1217 const int whichq = l->l_priority / PPQ;
1218
1219 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
1220
1221 #ifdef RQDEBUG
1222 checkrunqueue(whichq, NULL);
1223 #endif
1224 #ifdef DIAGNOSTIC
1225 if (l->l_back != NULL || l->l_stat != LSRUN)
1226 panic("setrunqueue");
1227 #endif
1228 sched_whichqs |= RQMASK(whichq);
1229 rq = &sched_qs[whichq];
1230 prev = rq->ph_rlink;
1231 l->l_forw = (struct lwp *)rq;
1232 rq->ph_rlink = l;
1233 prev->l_forw = l;
1234 l->l_back = prev;
1235 #ifdef RQDEBUG
1236 checkrunqueue(whichq, l);
1237 #endif
1238 }
1239
1240 /*
1241 * XXXSMP When LWP dispatch (cpu_switch()) is changed to use remrunqueue(),
1242 * drop of the effective priority level from kernel to user needs to be
1243 * moved here from userret(). The assignment in userret() is currently
1244 * done unlocked.
1245 */
1246 void
1247 remrunqueue(struct lwp *l)
1248 {
1249 struct lwp *prev, *next;
1250 const int whichq = l->l_priority / PPQ;
1251
1252 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
1253
1254 #ifdef RQDEBUG
1255 checkrunqueue(whichq, l);
1256 #endif
1257
1258 #if defined(DIAGNOSTIC)
1259 if (((sched_whichqs & RQMASK(whichq)) == 0) || l->l_back == NULL) {
1260 /* Shouldn't happen - interrupts disabled. */
1261 panic("remrunqueue: bit %d not set", whichq);
1262 }
1263 #endif
1264 prev = l->l_back;
1265 l->l_back = NULL;
1266 next = l->l_forw;
1267 prev->l_forw = next;
1268 next->l_back = prev;
1269 if (prev == next)
1270 sched_whichqs &= ~RQMASK(whichq);
1271 #ifdef RQDEBUG
1272 checkrunqueue(whichq, NULL);
1273 #endif
1274 }
1275
1276 #undef RQMASK
1277 #endif /* !defined(__HAVE_MD_RUNQUEUE) */
1278