kern_synch.c revision 1.166.2.20 1 1.166.2.20 ad /* $NetBSD: kern_synch.c,v 1.166.2.20 2007/02/09 19:58:10 ad Exp $ */
2 1.63 thorpej
3 1.63 thorpej /*-
4 1.166.2.11 ad * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
5 1.63 thorpej * All rights reserved.
6 1.63 thorpej *
7 1.63 thorpej * This code is derived from software contributed to The NetBSD Foundation
8 1.63 thorpej * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 1.166.2.2 ad * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10 1.63 thorpej *
11 1.63 thorpej * Redistribution and use in source and binary forms, with or without
12 1.63 thorpej * modification, are permitted provided that the following conditions
13 1.63 thorpej * are met:
14 1.63 thorpej * 1. Redistributions of source code must retain the above copyright
15 1.63 thorpej * notice, this list of conditions and the following disclaimer.
16 1.63 thorpej * 2. Redistributions in binary form must reproduce the above copyright
17 1.63 thorpej * notice, this list of conditions and the following disclaimer in the
18 1.63 thorpej * documentation and/or other materials provided with the distribution.
19 1.63 thorpej * 3. All advertising materials mentioning features or use of this software
20 1.63 thorpej * must display the following acknowledgement:
21 1.63 thorpej * This product includes software developed by the NetBSD
22 1.63 thorpej * Foundation, Inc. and its contributors.
23 1.63 thorpej * 4. Neither the name of The NetBSD Foundation nor the names of its
24 1.63 thorpej * contributors may be used to endorse or promote products derived
25 1.63 thorpej * from this software without specific prior written permission.
26 1.63 thorpej *
27 1.63 thorpej * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 1.63 thorpej * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 1.63 thorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 1.63 thorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 1.63 thorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 1.63 thorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 1.63 thorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 1.63 thorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 1.63 thorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 1.63 thorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 1.63 thorpej * POSSIBILITY OF SUCH DAMAGE.
38 1.63 thorpej */
39 1.26 cgd
40 1.26 cgd /*-
41 1.26 cgd * Copyright (c) 1982, 1986, 1990, 1991, 1993
42 1.26 cgd * The Regents of the University of California. All rights reserved.
43 1.26 cgd * (c) UNIX System Laboratories, Inc.
44 1.26 cgd * All or some portions of this file are derived from material licensed
45 1.26 cgd * to the University of California by American Telephone and Telegraph
46 1.26 cgd * Co. or Unix System Laboratories, Inc. and are reproduced herein with
47 1.26 cgd * the permission of UNIX System Laboratories, Inc.
48 1.26 cgd *
49 1.26 cgd * Redistribution and use in source and binary forms, with or without
50 1.26 cgd * modification, are permitted provided that the following conditions
51 1.26 cgd * are met:
52 1.26 cgd * 1. Redistributions of source code must retain the above copyright
53 1.26 cgd * notice, this list of conditions and the following disclaimer.
54 1.26 cgd * 2. Redistributions in binary form must reproduce the above copyright
55 1.26 cgd * notice, this list of conditions and the following disclaimer in the
56 1.26 cgd * documentation and/or other materials provided with the distribution.
57 1.136 agc * 3. Neither the name of the University nor the names of its contributors
58 1.26 cgd * may be used to endorse or promote products derived from this software
59 1.26 cgd * without specific prior written permission.
60 1.26 cgd *
61 1.26 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
62 1.26 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
63 1.26 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
64 1.26 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
65 1.26 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
66 1.26 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
67 1.26 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
68 1.26 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
69 1.26 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
70 1.26 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
71 1.26 cgd * SUCH DAMAGE.
72 1.26 cgd *
73 1.50 fvdl * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
74 1.26 cgd */
75 1.106 lukem
76 1.106 lukem #include <sys/cdefs.h>
77 1.166.2.20 ad __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.166.2.20 2007/02/09 19:58:10 ad Exp $");
78 1.48 mrg
79 1.52 jonathan #include "opt_ddb.h"
80 1.109 yamt #include "opt_kstack.h"
81 1.82 thorpej #include "opt_lockdebug.h"
82 1.83 thorpej #include "opt_multiprocessor.h"
83 1.110 briggs #include "opt_perfctrs.h"
84 1.26 cgd
85 1.166.2.2 ad #define __MUTEX_PRIVATE
86 1.166.2.2 ad
87 1.26 cgd #include <sys/param.h>
88 1.26 cgd #include <sys/systm.h>
89 1.68 thorpej #include <sys/callout.h>
90 1.26 cgd #include <sys/proc.h>
91 1.26 cgd #include <sys/kernel.h>
92 1.26 cgd #include <sys/buf.h>
93 1.111 briggs #if defined(PERFCTRS)
94 1.110 briggs #include <sys/pmc.h>
95 1.111 briggs #endif
96 1.26 cgd #include <sys/signalvar.h>
97 1.26 cgd #include <sys/resourcevar.h>
98 1.55 ross #include <sys/sched.h>
99 1.161 elad #include <sys/kauth.h>
100 1.166.2.2 ad #include <sys/sleepq.h>
101 1.166.2.2 ad #include <sys/lockdebug.h>
102 1.47 mrg
103 1.47 mrg #include <uvm/uvm_extern.h>
104 1.47 mrg
105 1.26 cgd #include <machine/cpu.h>
106 1.34 christos
107 1.26 cgd int lbolt; /* once a second sleep address */
108 1.88 sommerfe int rrticks; /* number of hardclock ticks per roundrobin() */
109 1.26 cgd
110 1.73 thorpej /*
111 1.73 thorpej * The global scheduler state.
112 1.73 thorpej */
113 1.166.2.4 ad kmutex_t sched_mutex; /* global sched state mutex */
114 1.166.2.2 ad struct prochd sched_qs[RUNQUE_NQS]; /* run queues */
115 1.159 perry volatile uint32_t sched_whichqs; /* bitmap of non-empty queues */
116 1.34 christos
117 1.166.2.2 ad void schedcpu(void *);
118 1.166.2.2 ad void updatepri(struct lwp *);
119 1.166.2.2 ad void sa_awaken(struct lwp *);
120 1.63 thorpej
121 1.166.2.4 ad void sched_unsleep(struct lwp *);
122 1.166.2.4 ad void sched_changepri(struct lwp *, int);
123 1.166.2.4 ad
124 1.143 yamt struct callout schedcpu_ch = CALLOUT_INITIALIZER_SETFUNC(schedcpu, NULL);
125 1.157 yamt static unsigned int schedcpu_ticks;
126 1.122 thorpej
127 1.166.2.4 ad syncobj_t sleep_syncobj = {
128 1.166.2.4 ad SOBJ_SLEEPQ_SORTED,
129 1.166.2.4 ad sleepq_unsleep,
130 1.166.2.4 ad sleepq_changepri
131 1.166.2.4 ad };
132 1.166.2.4 ad
133 1.166.2.4 ad syncobj_t sched_syncobj = {
134 1.166.2.4 ad SOBJ_SLEEPQ_SORTED,
135 1.166.2.4 ad sched_unsleep,
136 1.166.2.4 ad sched_changepri
137 1.166.2.4 ad };
138 1.166.2.4 ad
139 1.26 cgd /*
140 1.26 cgd * Force switch among equal priority processes every 100ms.
141 1.88 sommerfe * Called from hardclock every hz/10 == rrticks hardclock ticks.
142 1.26 cgd */
143 1.26 cgd /* ARGSUSED */
144 1.26 cgd void
145 1.89 sommerfe roundrobin(struct cpu_info *ci)
146 1.26 cgd {
147 1.89 sommerfe struct schedstate_percpu *spc = &ci->ci_schedstate;
148 1.26 cgd
149 1.88 sommerfe spc->spc_rrticks = rrticks;
150 1.130 nathanw
151 1.122 thorpej if (curlwp != NULL) {
152 1.73 thorpej if (spc->spc_flags & SPCF_SEENRR) {
153 1.69 thorpej /*
154 1.69 thorpej * The process has already been through a roundrobin
155 1.69 thorpej * without switching and may be hogging the CPU.
156 1.69 thorpej * Indicate that the process should yield.
157 1.69 thorpej */
158 1.73 thorpej spc->spc_flags |= SPCF_SHOULDYIELD;
159 1.69 thorpej } else
160 1.73 thorpej spc->spc_flags |= SPCF_SEENRR;
161 1.69 thorpej }
162 1.166.2.2 ad cpu_need_resched(curcpu());
163 1.26 cgd }
164 1.26 cgd
165 1.153 yamt #define PPQ (128 / RUNQUE_NQS) /* priorities per queue */
166 1.153 yamt #define NICE_WEIGHT 2 /* priorities per nice level */
167 1.153 yamt
168 1.153 yamt #define ESTCPU_SHIFT 11
169 1.153 yamt #define ESTCPU_MAX ((NICE_WEIGHT * PRIO_MAX - PPQ) << ESTCPU_SHIFT)
170 1.153 yamt #define ESTCPULIM(e) min((e), ESTCPU_MAX)
171 1.153 yamt
172 1.26 cgd /*
173 1.26 cgd * Constants for digital decay and forget:
174 1.26 cgd * 90% of (p_estcpu) usage in 5 * loadav time
175 1.26 cgd * 95% of (p_pctcpu) usage in 60 seconds (load insensitive)
176 1.26 cgd * Note that, as ps(1) mentions, this can let percentages
177 1.26 cgd * total over 100% (I've seen 137.9% for 3 processes).
178 1.26 cgd *
179 1.26 cgd * Note that hardclock updates p_estcpu and p_cpticks independently.
180 1.26 cgd *
181 1.26 cgd * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
182 1.26 cgd * That is, the system wants to compute a value of decay such
183 1.26 cgd * that the following for loop:
184 1.26 cgd * for (i = 0; i < (5 * loadavg); i++)
185 1.26 cgd * p_estcpu *= decay;
186 1.26 cgd * will compute
187 1.26 cgd * p_estcpu *= 0.1;
188 1.26 cgd * for all values of loadavg:
189 1.26 cgd *
190 1.26 cgd * Mathematically this loop can be expressed by saying:
191 1.26 cgd * decay ** (5 * loadavg) ~= .1
192 1.26 cgd *
193 1.26 cgd * The system computes decay as:
194 1.26 cgd * decay = (2 * loadavg) / (2 * loadavg + 1)
195 1.26 cgd *
196 1.26 cgd * We wish to prove that the system's computation of decay
197 1.26 cgd * will always fulfill the equation:
198 1.26 cgd * decay ** (5 * loadavg) ~= .1
199 1.26 cgd *
200 1.26 cgd * If we compute b as:
201 1.26 cgd * b = 2 * loadavg
202 1.26 cgd * then
203 1.26 cgd * decay = b / (b + 1)
204 1.26 cgd *
205 1.26 cgd * We now need to prove two things:
206 1.26 cgd * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
207 1.26 cgd * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
208 1.130 nathanw *
209 1.26 cgd * Facts:
210 1.26 cgd * For x close to zero, exp(x) =~ 1 + x, since
211 1.26 cgd * exp(x) = 0! + x**1/1! + x**2/2! + ... .
212 1.26 cgd * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
213 1.26 cgd * For x close to zero, ln(1+x) =~ x, since
214 1.26 cgd * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1
215 1.26 cgd * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
216 1.26 cgd * ln(.1) =~ -2.30
217 1.26 cgd *
218 1.26 cgd * Proof of (1):
219 1.26 cgd * Solve (factor)**(power) =~ .1 given power (5*loadav):
220 1.26 cgd * solving for factor,
221 1.26 cgd * ln(factor) =~ (-2.30/5*loadav), or
222 1.26 cgd * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
223 1.26 cgd * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED
224 1.26 cgd *
225 1.26 cgd * Proof of (2):
226 1.26 cgd * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
227 1.26 cgd * solving for power,
228 1.26 cgd * power*ln(b/(b+1)) =~ -2.30, or
229 1.26 cgd * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED
230 1.26 cgd *
231 1.26 cgd * Actual power values for the implemented algorithm are as follows:
232 1.26 cgd * loadav: 1 2 3 4
233 1.26 cgd * power: 5.68 10.32 14.94 19.55
234 1.26 cgd */
235 1.26 cgd
236 1.26 cgd /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
237 1.26 cgd #define loadfactor(loadav) (2 * (loadav))
238 1.153 yamt
239 1.153 yamt static fixpt_t
240 1.153 yamt decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
241 1.153 yamt {
242 1.153 yamt
243 1.153 yamt if (estcpu == 0) {
244 1.153 yamt return 0;
245 1.153 yamt }
246 1.153 yamt
247 1.153 yamt #if !defined(_LP64)
248 1.153 yamt /* avoid 64bit arithmetics. */
249 1.153 yamt #define FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
250 1.153 yamt if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
251 1.153 yamt return estcpu * loadfac / (loadfac + FSCALE);
252 1.153 yamt }
253 1.153 yamt #endif /* !defined(_LP64) */
254 1.153 yamt
255 1.153 yamt return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
256 1.153 yamt }
257 1.26 cgd
258 1.157 yamt /*
259 1.157 yamt * For all load averages >= 1 and max p_estcpu of (255 << ESTCPU_SHIFT),
260 1.157 yamt * sleeping for at least seven times the loadfactor will decay p_estcpu to
261 1.157 yamt * less than (1 << ESTCPU_SHIFT).
262 1.157 yamt *
263 1.157 yamt * note that our ESTCPU_MAX is actually much smaller than (255 << ESTCPU_SHIFT).
264 1.157 yamt */
265 1.157 yamt static fixpt_t
266 1.157 yamt decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n)
267 1.157 yamt {
268 1.157 yamt
269 1.157 yamt if ((n << FSHIFT) >= 7 * loadfac) {
270 1.157 yamt return 0;
271 1.157 yamt }
272 1.157 yamt
273 1.157 yamt while (estcpu != 0 && n > 1) {
274 1.157 yamt estcpu = decay_cpu(loadfac, estcpu);
275 1.157 yamt n--;
276 1.157 yamt }
277 1.157 yamt
278 1.157 yamt return estcpu;
279 1.157 yamt }
280 1.157 yamt
281 1.26 cgd /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
282 1.26 cgd fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
283 1.26 cgd
284 1.26 cgd /*
285 1.26 cgd * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
286 1.26 cgd * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
287 1.26 cgd * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
288 1.26 cgd *
289 1.26 cgd * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
290 1.26 cgd * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
291 1.26 cgd *
292 1.26 cgd * If you dont want to bother with the faster/more-accurate formula, you
293 1.26 cgd * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
294 1.26 cgd * (more general) method of calculating the %age of CPU used by a process.
295 1.26 cgd */
296 1.26 cgd #define CCPU_SHIFT 11
297 1.26 cgd
298 1.26 cgd /*
299 1.166.2.7 ad * schedcpu:
300 1.166.2.7 ad *
301 1.166.2.7 ad * Recompute process priorities, every hz ticks.
302 1.166.2.7 ad *
303 1.166.2.7 ad * XXXSMP This needs to be reorganised in order to reduce the locking
304 1.166.2.7 ad * burden.
305 1.26 cgd */
306 1.26 cgd /* ARGSUSED */
307 1.26 cgd void
308 1.77 thorpej schedcpu(void *arg)
309 1.26 cgd {
310 1.71 augustss fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
311 1.166.2.2 ad struct rlimit *rlim;
312 1.122 thorpej struct lwp *l;
313 1.71 augustss struct proc *p;
314 1.166.2.11 ad int minslp, clkhz, sig;
315 1.166.2.2 ad long runtm;
316 1.26 cgd
317 1.157 yamt schedcpu_ticks++;
318 1.157 yamt
319 1.166.2.1 ad mutex_enter(&proclist_mutex);
320 1.145 yamt PROCLIST_FOREACH(p, &allproc) {
321 1.26 cgd /*
322 1.166.2.2 ad * Increment time in/out of memory and sleep time (if
323 1.166.2.2 ad * sleeping). We ignore overflow; with 16-bit int's
324 1.26 cgd * (remember them?) overflow takes 45 days.
325 1.26 cgd */
326 1.122 thorpej minslp = 2;
327 1.166.2.2 ad mutex_enter(&p->p_smutex);
328 1.166.2.10 yamt runtm = p->p_rtime.tv_sec;
329 1.122 thorpej LIST_FOREACH(l, &p->p_lwps, l_sibling) {
330 1.166.2.2 ad lwp_lock(l);
331 1.166.2.2 ad runtm += l->l_rtime.tv_sec;
332 1.122 thorpej l->l_swtime++;
333 1.130 nathanw if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
334 1.122 thorpej l->l_stat == LSSUSPENDED) {
335 1.122 thorpej l->l_slptime++;
336 1.122 thorpej minslp = min(minslp, l->l_slptime);
337 1.122 thorpej } else
338 1.122 thorpej minslp = 0;
339 1.166.2.2 ad lwp_unlock(l);
340 1.122 thorpej }
341 1.26 cgd p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
342 1.166.2.2 ad
343 1.166.2.2 ad /*
344 1.166.2.2 ad * Check if the process exceeds its CPU resource allocation.
345 1.166.2.7 ad * If over max, kill it.
346 1.166.2.2 ad */
347 1.166.2.2 ad rlim = &p->p_rlimit[RLIMIT_CPU];
348 1.166.2.11 ad sig = 0;
349 1.166.2.2 ad if (runtm >= rlim->rlim_cur) {
350 1.166.2.2 ad if (runtm >= rlim->rlim_max)
351 1.166.2.9 yamt sig = SIGKILL;
352 1.166.2.2 ad else {
353 1.166.2.9 yamt sig = SIGXCPU;
354 1.166.2.2 ad if (rlim->rlim_cur < rlim->rlim_max)
355 1.166.2.2 ad rlim->rlim_cur += 5;
356 1.166.2.2 ad }
357 1.166.2.2 ad }
358 1.166.2.7 ad
359 1.166.2.7 ad /*
360 1.166.2.7 ad * If the process has run for more than autonicetime, reduce
361 1.166.2.7 ad * priority to give others a chance.
362 1.166.2.7 ad */
363 1.166.2.2 ad if (autonicetime && runtm > autonicetime && p->p_nice == NZERO
364 1.166.2.2 ad && kauth_cred_geteuid(p->p_cred)) {
365 1.166.2.19 ad mutex_spin_enter(&p->p_stmutex);
366 1.166.2.2 ad p->p_nice = autoniceval + NZERO;
367 1.166.2.2 ad resetprocpriority(p);
368 1.166.2.19 ad mutex_spin_exit(&p->p_stmutex);
369 1.166.2.2 ad }
370 1.166.2.2 ad
371 1.26 cgd /*
372 1.26 cgd * If the process has slept the entire second,
373 1.26 cgd * stop recalculating its priority until it wakes up.
374 1.26 cgd */
375 1.166.2.11 ad if (minslp <= 1) {
376 1.166.2.11 ad /*
377 1.166.2.11 ad * p_pctcpu is only for ps.
378 1.166.2.11 ad */
379 1.166.2.19 ad mutex_spin_enter(&p->p_stmutex);
380 1.166.2.11 ad clkhz = stathz != 0 ? stathz : hz;
381 1.26 cgd #if (FSHIFT >= CCPU_SHIFT)
382 1.166.2.11 ad p->p_pctcpu += (clkhz == 100)?
383 1.166.2.11 ad ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
384 1.166.2.11 ad 100 * (((fixpt_t) p->p_cpticks)
385 1.166.2.11 ad << (FSHIFT - CCPU_SHIFT)) / clkhz;
386 1.26 cgd #else
387 1.166.2.11 ad p->p_pctcpu += ((FSCALE - ccpu) *
388 1.166.2.11 ad (p->p_cpticks * FSCALE / clkhz)) >> FSHIFT;
389 1.26 cgd #endif
390 1.166.2.11 ad p->p_cpticks = 0;
391 1.166.2.11 ad p->p_estcpu = decay_cpu(loadfac, p->p_estcpu);
392 1.166.2.11 ad
393 1.166.2.11 ad LIST_FOREACH(l, &p->p_lwps, l_sibling) {
394 1.166.2.11 ad lwp_lock(l);
395 1.166.2.20 ad if (l->l_slptime <= 1 &&
396 1.166.2.20 ad l->l_priority >= PUSER)
397 1.166.2.11 ad resetpriority(l);
398 1.166.2.11 ad lwp_unlock(l);
399 1.166.2.11 ad }
400 1.166.2.19 ad mutex_spin_exit(&p->p_stmutex);
401 1.26 cgd }
402 1.166.2.11 ad
403 1.166.2.2 ad mutex_exit(&p->p_smutex);
404 1.166.2.9 yamt if (sig) {
405 1.166.2.9 yamt psignal(p, sig);
406 1.166.2.9 yamt }
407 1.26 cgd }
408 1.166.2.1 ad mutex_exit(&proclist_mutex);
409 1.47 mrg uvm_meter();
410 1.67 fvdl wakeup((caddr_t)&lbolt);
411 1.143 yamt callout_schedule(&schedcpu_ch, hz);
412 1.26 cgd }
413 1.26 cgd
414 1.26 cgd /*
415 1.26 cgd * Recalculate the priority of a process after it has slept for a while.
416 1.26 cgd */
417 1.26 cgd void
418 1.122 thorpej updatepri(struct lwp *l)
419 1.26 cgd {
420 1.122 thorpej struct proc *p = l->l_proc;
421 1.83 thorpej fixpt_t loadfac;
422 1.83 thorpej
423 1.166.2.2 ad LOCK_ASSERT(lwp_locked(l, NULL));
424 1.157 yamt KASSERT(l->l_slptime > 1);
425 1.83 thorpej
426 1.83 thorpej loadfac = loadfactor(averunnable.ldavg[0]);
427 1.26 cgd
428 1.157 yamt l->l_slptime--; /* the first time was done in schedcpu */
429 1.157 yamt /* XXX NJWLWP */
430 1.166.2.13 ad /* XXXSMP occasionally unlocked, should be per-LWP */
431 1.157 yamt p->p_estcpu = decay_cpu_batch(loadfac, p->p_estcpu, l->l_slptime);
432 1.122 thorpej resetpriority(l);
433 1.26 cgd }
434 1.26 cgd
435 1.26 cgd /*
436 1.166.2.2 ad * During autoconfiguration or after a panic, a sleep will simply lower the
437 1.166.2.2 ad * priority briefly to allow interrupts, then return. The priority to be
438 1.166.2.2 ad * used (safepri) is machine-dependent, thus this value is initialized and
439 1.166.2.2 ad * maintained in the machine-dependent layers. This priority will typically
440 1.166.2.2 ad * be 0, or the lowest priority that is safe for use on the interrupt stack;
441 1.166.2.2 ad * it can be made higher to block network software interrupts after panics.
442 1.26 cgd */
443 1.166.2.2 ad int safepri;
444 1.26 cgd
445 1.26 cgd /*
446 1.166.2.15 ad * OBSOLETE INTERFACE
447 1.166.2.15 ad *
448 1.166.2.2 ad * General sleep call. Suspends the current process until a wakeup is
449 1.166.2.2 ad * performed on the specified identifier. The process will then be made
450 1.166.2.2 ad * runnable with the specified priority. Sleeps at most timo/hz seconds (0
451 1.166.2.2 ad * means no timeout). If pri includes PCATCH flag, signals are checked
452 1.166.2.2 ad * before and after sleeping, else signals are not checked. Returns 0 if
453 1.166.2.2 ad * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
454 1.166.2.2 ad * signal needs to be delivered, ERESTART is returned if the current system
455 1.166.2.2 ad * call should be restarted if possible, and EINTR is returned if the system
456 1.166.2.2 ad * call should be interrupted by the signal (return EINTR).
457 1.166.2.2 ad *
458 1.166.2.2 ad * The interlock is held until we are on a sleep queue. The interlock will
459 1.166.2.2 ad * be locked before returning back to the caller unless the PNORELOCK flag
460 1.166.2.2 ad * is specified, in which case the interlock will always be unlocked upon
461 1.166.2.2 ad * return.
462 1.166.2.2 ad */
463 1.166.2.1 ad int
464 1.166.2.15 ad ltsleep(wchan_t ident, int priority, const char *wmesg, int timo,
465 1.166.2.15 ad volatile struct simplelock *interlock)
466 1.166.2.1 ad {
467 1.166.2.1 ad struct lwp *l = curlwp;
468 1.166.2.2 ad sleepq_t *sq;
469 1.166.2.7 ad int error, catch;
470 1.166.2.1 ad
471 1.166.2.15 ad if (sleepq_dontsleep(l)) {
472 1.166.2.15 ad (void)sleepq_abort(NULL, 0);
473 1.166.2.15 ad if ((priority & PNORELOCK) != 0)
474 1.166.2.15 ad simple_unlock(interlock);
475 1.166.2.15 ad return 0;
476 1.166.2.15 ad }
477 1.166.2.1 ad
478 1.166.2.4 ad sq = sleeptab_lookup(&sleeptab, ident);
479 1.166.2.7 ad sleepq_enter(sq, l);
480 1.166.2.1 ad
481 1.166.2.15 ad if (interlock != NULL) {
482 1.166.2.15 ad LOCK_ASSERT(simple_lock_held(interlock));
483 1.166.2.15 ad simple_unlock(interlock);
484 1.166.2.1 ad }
485 1.166.2.1 ad
486 1.166.2.7 ad catch = priority & PCATCH;
487 1.166.2.7 ad sleepq_block(sq, priority & PRIMASK, ident, wmesg, timo, catch,
488 1.166.2.7 ad &sleep_syncobj);
489 1.166.2.7 ad error = sleepq_unblock(timo, catch);
490 1.166.2.1 ad
491 1.166.2.15 ad if (interlock != NULL && (priority & PNORELOCK) == 0)
492 1.166.2.15 ad simple_lock(interlock);
493 1.166.2.2 ad
494 1.166.2.2 ad return error;
495 1.166.2.1 ad }
496 1.166.2.1 ad
497 1.166.2.7 ad /*
498 1.166.2.15 ad * General sleep call for situations where a wake-up is not expected.
499 1.166.2.7 ad */
500 1.166.2.7 ad int
501 1.166.2.15 ad kpause(const char *wmesg, boolean_t intr, int timo, kmutex_t *mtx)
502 1.166.2.7 ad {
503 1.166.2.7 ad struct lwp *l = curlwp;
504 1.166.2.7 ad sleepq_t *sq;
505 1.166.2.15 ad int error;
506 1.166.2.7 ad
507 1.166.2.7 ad if (sleepq_dontsleep(l))
508 1.166.2.7 ad return sleepq_abort(NULL, 0);
509 1.166.2.7 ad
510 1.166.2.16 ad if (mtx != NULL)
511 1.166.2.16 ad mutex_exit(mtx);
512 1.166.2.7 ad sq = sleeptab_lookup(&sleeptab, l);
513 1.166.2.7 ad sleepq_enter(sq, l);
514 1.166.2.7 ad sleepq_block(sq, sched_kpri(l), l, wmesg, timo, intr, &sleep_syncobj);
515 1.166.2.15 ad error = sleepq_unblock(timo, intr);
516 1.166.2.16 ad if (mtx != NULL)
517 1.166.2.16 ad mutex_enter(mtx);
518 1.166.2.15 ad
519 1.166.2.15 ad return error;
520 1.166.2.7 ad }
521 1.166.2.7 ad
522 1.26 cgd /*
523 1.166.2.15 ad * OBSOLETE INTERFACE
524 1.166.2.15 ad *
525 1.26 cgd * Make all processes sleeping on the specified identifier runnable.
526 1.26 cgd */
527 1.26 cgd void
528 1.166.2.2 ad wakeup(wchan_t ident)
529 1.26 cgd {
530 1.166.2.2 ad sleepq_t *sq;
531 1.83 thorpej
532 1.166.2.2 ad if (cold)
533 1.166.2.2 ad return;
534 1.83 thorpej
535 1.166.2.4 ad sq = sleeptab_lookup(&sleeptab, ident);
536 1.166.2.5 ad sleepq_wake(sq, ident, (u_int)-1);
537 1.63 thorpej }
538 1.63 thorpej
539 1.63 thorpej /*
540 1.166.2.15 ad * OBSOLETE INTERFACE
541 1.166.2.15 ad *
542 1.63 thorpej * Make the highest priority process first in line on the specified
543 1.63 thorpej * identifier runnable.
544 1.63 thorpej */
545 1.166.2.2 ad void
546 1.166.2.2 ad wakeup_one(wchan_t ident)
547 1.63 thorpej {
548 1.166.2.2 ad sleepq_t *sq;
549 1.77 thorpej
550 1.166.2.2 ad if (cold)
551 1.166.2.2 ad return;
552 1.166.2.2 ad
553 1.166.2.4 ad sq = sleeptab_lookup(&sleeptab, ident);
554 1.166.2.5 ad sleepq_wake(sq, ident, 1);
555 1.117 gmcgarry }
556 1.117 gmcgarry
557 1.166.2.2 ad
558 1.117 gmcgarry /*
559 1.117 gmcgarry * General yield call. Puts the current process back on its run queue and
560 1.117 gmcgarry * performs a voluntary context switch. Should only be called when the
561 1.117 gmcgarry * current process explicitly requests it (eg sched_yield(2) in compat code).
562 1.117 gmcgarry */
563 1.117 gmcgarry void
564 1.117 gmcgarry yield(void)
565 1.117 gmcgarry {
566 1.122 thorpej struct lwp *l = curlwp;
567 1.117 gmcgarry
568 1.166.2.18 ad KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
569 1.166.2.2 ad lwp_lock(l);
570 1.166.2.2 ad if (l->l_stat == LSONPROC) {
571 1.166.2.3 ad KASSERT(lwp_locked(l, &sched_mutex));
572 1.166.2.2 ad l->l_priority = l->l_usrpri;
573 1.166.2.2 ad }
574 1.166.2.2 ad l->l_nvcsw++;
575 1.122 thorpej mi_switch(l, NULL);
576 1.166.2.18 ad KERNEL_LOCK(l->l_biglocks, l);
577 1.69 thorpej }
578 1.69 thorpej
579 1.69 thorpej /*
580 1.69 thorpej * General preemption call. Puts the current process back on its run queue
581 1.156 rpaulo * and performs an involuntary context switch.
582 1.69 thorpej */
583 1.69 thorpej void
584 1.166.2.17 ad preempt(void)
585 1.69 thorpej {
586 1.122 thorpej struct lwp *l = curlwp;
587 1.69 thorpej
588 1.166.2.18 ad KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
589 1.166.2.2 ad lwp_lock(l);
590 1.166.2.2 ad if (l->l_stat == LSONPROC) {
591 1.166.2.3 ad KASSERT(lwp_locked(l, &sched_mutex));
592 1.166.2.2 ad l->l_priority = l->l_usrpri;
593 1.166.2.2 ad }
594 1.166.2.2 ad l->l_nivcsw++;
595 1.166.2.17 ad (void)mi_switch(l, NULL);
596 1.166.2.18 ad KERNEL_LOCK(l->l_biglocks, l);
597 1.69 thorpej }
598 1.69 thorpej
599 1.69 thorpej /*
600 1.166.2.2 ad * The machine independent parts of context switch. Switch to "new"
601 1.166.2.2 ad * if non-NULL, otherwise let cpu_switch choose the next lwp.
602 1.130 nathanw *
603 1.122 thorpej * Returns 1 if another process was actually run.
604 1.26 cgd */
605 1.122 thorpej int
606 1.122 thorpej mi_switch(struct lwp *l, struct lwp *newl)
607 1.26 cgd {
608 1.76 thorpej struct schedstate_percpu *spc;
609 1.26 cgd struct timeval tv;
610 1.166.2.2 ad int retval, oldspl;
611 1.166.2.2 ad long s, u;
612 1.166.2.2 ad #if PERFCTRS
613 1.122 thorpej struct proc *p = l->l_proc;
614 1.166.2.2 ad #endif
615 1.26 cgd
616 1.166.2.2 ad LOCK_ASSERT(lwp_locked(l, NULL));
617 1.83 thorpej
618 1.160 chs #ifdef LOCKDEBUG
619 1.82 thorpej spinlock_switchcheck();
620 1.81 thorpej simple_lock_switchcheck();
621 1.50 fvdl #endif
622 1.166.2.2 ad #ifdef KSTACK_CHECK_MAGIC
623 1.166.2.2 ad kstack_check_magic(l);
624 1.166.2.2 ad #endif
625 1.166.2.2 ad
626 1.166.2.2 ad /*
627 1.166.2.2 ad * It's safe to read the per CPU schedstate unlocked here, as all we
628 1.166.2.2 ad * are after is the run time and that's guarenteed to have been last
629 1.166.2.2 ad * updated by this CPU.
630 1.166.2.2 ad */
631 1.166.2.2 ad KDASSERT(l->l_cpu == curcpu());
632 1.166.2.2 ad spc = &l->l_cpu->ci_schedstate;
633 1.81 thorpej
634 1.26 cgd /*
635 1.26 cgd * Compute the amount of time during which the current
636 1.113 gmcgarry * process was running.
637 1.26 cgd */
638 1.26 cgd microtime(&tv);
639 1.166.2.2 ad u = l->l_rtime.tv_usec +
640 1.122 thorpej (tv.tv_usec - spc->spc_runtime.tv_usec);
641 1.166.2.2 ad s = l->l_rtime.tv_sec + (tv.tv_sec - spc->spc_runtime.tv_sec);
642 1.26 cgd if (u < 0) {
643 1.26 cgd u += 1000000;
644 1.26 cgd s--;
645 1.26 cgd } else if (u >= 1000000) {
646 1.26 cgd u -= 1000000;
647 1.26 cgd s++;
648 1.26 cgd }
649 1.166.2.2 ad l->l_rtime.tv_usec = u;
650 1.166.2.2 ad l->l_rtime.tv_sec = s;
651 1.26 cgd
652 1.26 cgd /*
653 1.166.2.2 ad * XXXSMP If we are using h/w performance counters, save context.
654 1.26 cgd */
655 1.166.2.2 ad #if PERFCTRS
656 1.166.2.2 ad if (PMC_ENABLED(p)) {
657 1.166.2.2 ad pmc_save_context(p);
658 1.26 cgd }
659 1.166.2.2 ad #endif
660 1.166.2.2 ad
661 1.166.2.2 ad /*
662 1.166.2.2 ad * Acquire the sched_mutex if necessary. It will be released by
663 1.166.2.2 ad * cpu_switch once it has decided to idle, or picked another LWP
664 1.166.2.2 ad * to run.
665 1.166.2.2 ad */
666 1.166.2.4 ad #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
667 1.166.2.4 ad if (l->l_mutex != &sched_mutex) {
668 1.166.2.14 ad mutex_spin_enter(&sched_mutex);
669 1.166.2.4 ad lwp_unlock(l);
670 1.26 cgd }
671 1.166.2.3 ad #endif
672 1.166.2.3 ad
673 1.166.2.3 ad /*
674 1.166.2.3 ad * If on the CPU and we have gotten this far, then we must yield.
675 1.166.2.3 ad */
676 1.166.2.3 ad KASSERT(l->l_stat != LSRUN);
677 1.166.2.3 ad if (l->l_stat == LSONPROC) {
678 1.166.2.4 ad KASSERT(lwp_locked(l, &sched_mutex));
679 1.166.2.3 ad l->l_stat = LSRUN;
680 1.166.2.3 ad setrunqueue(l);
681 1.166.2.3 ad }
682 1.166.2.2 ad uvmexp.swtch++;
683 1.69 thorpej
684 1.69 thorpej /*
685 1.69 thorpej * Process is about to yield the CPU; clear the appropriate
686 1.69 thorpej * scheduling flags.
687 1.69 thorpej */
688 1.73 thorpej spc->spc_flags &= ~SPCF_SWITCHCLEAR;
689 1.109 yamt
690 1.166.2.2 ad LOCKDEBUG_BARRIER(&sched_mutex, 1);
691 1.113 gmcgarry
692 1.113 gmcgarry /*
693 1.166.2.2 ad * Switch to the new current LWP. When we run again, we'll
694 1.166.2.2 ad * return back here.
695 1.113 gmcgarry */
696 1.166.2.4 ad oldspl = MUTEX_SPIN_OLDSPL(l->l_cpu);
697 1.166.2.4 ad
698 1.166.2.4 ad if (newl == NULL || newl->l_back == NULL)
699 1.122 thorpej retval = cpu_switch(l, NULL);
700 1.166.2.2 ad else {
701 1.166.2.4 ad KASSERT(lwp_locked(newl, &sched_mutex));
702 1.122 thorpej remrunqueue(newl);
703 1.122 thorpej cpu_switchto(l, newl);
704 1.122 thorpej retval = 0;
705 1.122 thorpej }
706 1.110 briggs
707 1.110 briggs /*
708 1.166.2.2 ad * XXXSMP If we are using h/w performance counters, restore context.
709 1.26 cgd */
710 1.114 gmcgarry #if PERFCTRS
711 1.166 christos if (PMC_ENABLED(p)) {
712 1.114 gmcgarry pmc_restore_context(p);
713 1.166 christos }
714 1.114 gmcgarry #endif
715 1.110 briggs
716 1.110 briggs /*
717 1.76 thorpej * We're running again; record our new start time. We might
718 1.166.2.2 ad * be running on a new CPU now, so don't use the cached
719 1.76 thorpej * schedstate_percpu pointer.
720 1.76 thorpej */
721 1.122 thorpej KDASSERT(l->l_cpu == curcpu());
722 1.122 thorpej microtime(&l->l_cpu->ci_schedstate.spc_runtime);
723 1.166.2.2 ad splx(oldspl);
724 1.122 thorpej
725 1.122 thorpej return retval;
726 1.26 cgd }
727 1.26 cgd
728 1.26 cgd /*
729 1.26 cgd * Initialize the (doubly-linked) run queues
730 1.26 cgd * to be empty.
731 1.26 cgd */
732 1.26 cgd void
733 1.26 cgd rqinit()
734 1.26 cgd {
735 1.71 augustss int i;
736 1.26 cgd
737 1.73 thorpej for (i = 0; i < RUNQUE_NQS; i++)
738 1.73 thorpej sched_qs[i].ph_link = sched_qs[i].ph_rlink =
739 1.122 thorpej (struct lwp *)&sched_qs[i];
740 1.166.2.2 ad
741 1.166.2.2 ad mutex_init(&sched_mutex, MUTEX_SPIN, IPL_SCHED);
742 1.26 cgd }
743 1.26 cgd
744 1.158 perry static inline void
745 1.166.2.2 ad resched_lwp(struct lwp *l, u_char pri)
746 1.119 thorpej {
747 1.119 thorpej struct cpu_info *ci;
748 1.119 thorpej
749 1.119 thorpej /*
750 1.119 thorpej * XXXSMP
751 1.122 thorpej * Since l->l_cpu persists across a context switch,
752 1.119 thorpej * this gives us *very weak* processor affinity, in
753 1.119 thorpej * that we notify the CPU on which the process last
754 1.119 thorpej * ran that it should try to switch.
755 1.119 thorpej *
756 1.119 thorpej * This does not guarantee that the process will run on
757 1.119 thorpej * that processor next, because another processor might
758 1.119 thorpej * grab it the next time it performs a context switch.
759 1.119 thorpej *
760 1.119 thorpej * This also does not handle the case where its last
761 1.119 thorpej * CPU is running a higher-priority process, but every
762 1.119 thorpej * other CPU is running a lower-priority process. There
763 1.119 thorpej * are ways to handle this situation, but they're not
764 1.119 thorpej * currently very pretty, and we also need to weigh the
765 1.119 thorpej * cost of moving a process from one CPU to another.
766 1.166.2.7 ad *
767 1.166.2.7 ad * XXXSMP
768 1.166.2.7 ad * There is also the issue of locking the other CPU's
769 1.166.2.7 ad * sched state, which we currently do not do.
770 1.119 thorpej */
771 1.122 thorpej ci = (l->l_cpu != NULL) ? l->l_cpu : curcpu();
772 1.121 thorpej if (pri < ci->ci_schedstate.spc_curpriority)
773 1.166.2.2 ad cpu_need_resched(ci);
774 1.119 thorpej }
775 1.119 thorpej
776 1.26 cgd /*
777 1.166.2.2 ad * Change process state to be runnable, placing it on the run queue if it is
778 1.166.2.2 ad * in memory, and awakening the swapper if it isn't in memory.
779 1.166.2.2 ad *
780 1.166.2.2 ad * Call with the process and LWP locked. Will return with the LWP unlocked.
781 1.26 cgd */
782 1.26 cgd void
783 1.122 thorpej setrunnable(struct lwp *l)
784 1.26 cgd {
785 1.122 thorpej struct proc *p = l->l_proc;
786 1.166.2.18 ad sigset_t *ss;
787 1.26 cgd
788 1.166.2.2 ad LOCK_ASSERT(mutex_owned(&p->p_smutex));
789 1.166.2.2 ad LOCK_ASSERT(lwp_locked(l, NULL));
790 1.83 thorpej
791 1.122 thorpej switch (l->l_stat) {
792 1.122 thorpej case LSSTOP:
793 1.33 mycroft /*
794 1.33 mycroft * If we're being traced (possibly because someone attached us
795 1.33 mycroft * while we were stopped), check for a signal from the debugger.
796 1.33 mycroft */
797 1.166.2.4 ad if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0) {
798 1.166.2.18 ad if ((sigprop[p->p_xstat] & SA_TOLWP) != 0)
799 1.166.2.18 ad ss = &l->l_sigpend.sp_set;
800 1.166.2.18 ad else
801 1.166.2.18 ad ss = &p->p_sigpend.sp_set;
802 1.166.2.18 ad sigaddset(ss, p->p_xstat);
803 1.166.2.2 ad signotify(l);
804 1.53 mycroft }
805 1.166.2.2 ad p->p_nrlwps++;
806 1.122 thorpej break;
807 1.122 thorpej case LSSUSPENDED:
808 1.166.2.4 ad l->l_flag &= ~L_WSUSPEND;
809 1.166.2.2 ad p->p_nrlwps++;
810 1.166.2.2 ad break;
811 1.166.2.2 ad case LSSLEEP:
812 1.166.2.4 ad KASSERT(l->l_wchan != NULL);
813 1.166.2.7 ad break;
814 1.166.2.7 ad default:
815 1.166.2.7 ad panic("setrunnable: lwp %p state was %d", l, l->l_stat);
816 1.166.2.7 ad }
817 1.166.2.2 ad
818 1.166.2.7 ad /*
819 1.166.2.7 ad * If the LWP was sleeping interruptably, then it's OK to start it
820 1.166.2.7 ad * again. If not, mark it as still sleeping.
821 1.166.2.7 ad */
822 1.166.2.7 ad if (l->l_wchan != NULL) {
823 1.166.2.7 ad l->l_stat = LSSLEEP;
824 1.166.2.7 ad if ((l->l_flag & L_SINTR) != 0)
825 1.166.2.4 ad lwp_unsleep(l);
826 1.166.2.7 ad else {
827 1.166.2.4 ad lwp_unlock(l);
828 1.166.2.4 ad #ifdef DIAGNOSTIC
829 1.166.2.4 ad panic("setrunnable: !L_SINTR");
830 1.166.2.4 ad #endif
831 1.166.2.4 ad }
832 1.166.2.2 ad return;
833 1.26 cgd }
834 1.139 cl
835 1.166.2.7 ad LOCK_ASSERT(lwp_locked(l, &sched_mutex));
836 1.166.2.7 ad
837 1.166.2.3 ad /*
838 1.166.2.7 ad * If the LWP is still on the CPU, mark it as LSONPROC. It may be
839 1.166.2.7 ad * about to call mi_switch(), in which case it will yield.
840 1.166.2.7 ad *
841 1.166.2.7 ad * XXXSMP Will need to change for preemption.
842 1.166.2.3 ad */
843 1.166.2.7 ad #ifdef MULTIPROCESSOR
844 1.166.2.7 ad if (l->l_cpu->ci_curlwp == l) {
845 1.166.2.7 ad #else
846 1.166.2.7 ad if (l == curlwp) {
847 1.166.2.7 ad #endif
848 1.166.2.3 ad l->l_stat = LSONPROC;
849 1.166.2.3 ad l->l_slptime = 0;
850 1.166.2.3 ad lwp_unlock(l);
851 1.166.2.3 ad return;
852 1.166.2.3 ad }
853 1.122 thorpej
854 1.166.2.3 ad /*
855 1.166.2.3 ad * Set the LWP runnable. If it's swapped out, we need to wake the swapper
856 1.166.2.3 ad * to bring it back in. Otherwise, enter it into a run queue.
857 1.166.2.3 ad */
858 1.122 thorpej if (l->l_slptime > 1)
859 1.122 thorpej updatepri(l);
860 1.166.2.7 ad l->l_stat = LSRUN;
861 1.122 thorpej l->l_slptime = 0;
862 1.166.2.2 ad
863 1.166.2.2 ad if (l->l_flag & L_INMEM) {
864 1.166.2.2 ad setrunqueue(l);
865 1.166.2.2 ad resched_lwp(l, l->l_priority);
866 1.166.2.2 ad lwp_unlock(l);
867 1.166.2.2 ad } else {
868 1.166.2.2 ad lwp_unlock(l);
869 1.166.2.2 ad wakeup(&proc0);
870 1.166.2.2 ad }
871 1.26 cgd }
872 1.26 cgd
873 1.26 cgd /*
874 1.26 cgd * Compute the priority of a process when running in user mode.
875 1.26 cgd * Arrange to reschedule if the resulting priority is better
876 1.26 cgd * than that of the current process.
877 1.26 cgd */
878 1.26 cgd void
879 1.122 thorpej resetpriority(struct lwp *l)
880 1.26 cgd {
881 1.71 augustss unsigned int newpriority;
882 1.122 thorpej struct proc *p = l->l_proc;
883 1.26 cgd
884 1.166.2.13 ad /* XXXSMP LOCK_ASSERT(mutex_owned(&p->p_stmutex)); */
885 1.166.2.2 ad LOCK_ASSERT(lwp_locked(l, NULL));
886 1.83 thorpej
887 1.166.2.11 ad if ((l->l_flag & L_SYSTEM) != 0)
888 1.166.2.11 ad return;
889 1.166.2.11 ad
890 1.153 yamt newpriority = PUSER + (p->p_estcpu >> ESTCPU_SHIFT) +
891 1.166.2.11 ad NICE_WEIGHT * (p->p_nice - NZERO);
892 1.26 cgd newpriority = min(newpriority, MAXPRI);
893 1.166.2.20 ad lwp_changepri(l, newpriority);
894 1.122 thorpej }
895 1.122 thorpej
896 1.130 nathanw /*
897 1.122 thorpej * Recompute priority for all LWPs in a process.
898 1.122 thorpej */
899 1.122 thorpej void
900 1.122 thorpej resetprocpriority(struct proc *p)
901 1.122 thorpej {
902 1.122 thorpej struct lwp *l;
903 1.122 thorpej
904 1.166.2.13 ad LOCK_ASSERT(mutex_owned(&p->p_stmutex));
905 1.166.2.2 ad
906 1.166.2.2 ad LIST_FOREACH(l, &p->p_lwps, l_sibling) {
907 1.166.2.2 ad lwp_lock(l);
908 1.166.2.2 ad resetpriority(l);
909 1.166.2.2 ad lwp_unlock(l);
910 1.166.2.2 ad }
911 1.55 ross }
912 1.55 ross
913 1.55 ross /*
914 1.56 ross * We adjust the priority of the current process. The priority of a process
915 1.141 wiz * gets worse as it accumulates CPU time. The CPU usage estimator (p_estcpu)
916 1.56 ross * is increased here. The formula for computing priorities (in kern_synch.c)
917 1.56 ross * will compute a different value each time p_estcpu increases. This can
918 1.56 ross * cause a switch, but unless the priority crosses a PPQ boundary the actual
919 1.141 wiz * queue will not change. The CPU usage estimator ramps up quite quickly
920 1.56 ross * when the process is running (linearly), and decays away exponentially, at
921 1.56 ross * a rate which is proportionally slower when the system is busy. The basic
922 1.80 nathanw * principle is that the system will 90% forget that the process used a lot
923 1.56 ross * of CPU time in 5 * loadav seconds. This causes the system to favor
924 1.56 ross * processes which haven't run much recently, and to round-robin among other
925 1.56 ross * processes.
926 1.55 ross */
927 1.55 ross
928 1.55 ross void
929 1.122 thorpej schedclock(struct lwp *l)
930 1.55 ross {
931 1.122 thorpej struct proc *p = l->l_proc;
932 1.166.2.2 ad
933 1.166.2.19 ad mutex_spin_enter(&p->p_stmutex);
934 1.153 yamt p->p_estcpu = ESTCPULIM(p->p_estcpu + (1 << ESTCPU_SHIFT));
935 1.166.2.2 ad lwp_lock(l);
936 1.166.2.2 ad resetpriority(l);
937 1.166.2.19 ad mutex_spin_exit(&p->p_stmutex);
938 1.166.2.11 ad if ((l->l_flag & L_SYSTEM) == 0 && l->l_priority >= PUSER)
939 1.122 thorpej l->l_priority = l->l_usrpri;
940 1.166.2.2 ad lwp_unlock(l);
941 1.26 cgd }
942 1.94 bouyer
943 1.166.2.2 ad /*
944 1.166.2.2 ad * suspendsched:
945 1.166.2.2 ad *
946 1.166.2.4 ad * Convert all non-L_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
947 1.166.2.2 ad */
948 1.94 bouyer void
949 1.166.2.4 ad suspendsched(void)
950 1.94 bouyer {
951 1.166.2.7 ad #ifdef MULTIPROCESSOR
952 1.166.2.4 ad CPU_INFO_ITERATOR cii;
953 1.166.2.4 ad struct cpu_info *ci;
954 1.166.2.7 ad #endif
955 1.122 thorpej struct lwp *l;
956 1.166.2.2 ad struct proc *p;
957 1.94 bouyer
958 1.166.2.4 ad /*
959 1.166.2.4 ad * We do this by process in order not to violate the locking rules.
960 1.166.2.4 ad */
961 1.166.2.7 ad mutex_enter(&proclist_mutex);
962 1.166.2.4 ad PROCLIST_FOREACH(p, &allproc) {
963 1.166.2.4 ad mutex_enter(&p->p_smutex);
964 1.166.2.4 ad
965 1.166.2.4 ad if ((p->p_flag & P_SYSTEM) != 0) {
966 1.166.2.4 ad mutex_exit(&p->p_smutex);
967 1.94 bouyer continue;
968 1.166.2.4 ad }
969 1.122 thorpej
970 1.166.2.4 ad p->p_stat = SSTOP;
971 1.166.2.2 ad
972 1.166.2.4 ad LIST_FOREACH(l, &p->p_lwps, l_sibling) {
973 1.166.2.4 ad if (l == curlwp)
974 1.166.2.4 ad continue;
975 1.166.2.4 ad
976 1.166.2.4 ad lwp_lock(l);
977 1.166.2.4 ad
978 1.166.2.4 ad /*
979 1.166.2.4 ad * Set L_WREBOOT so that the LWP will suspend itself
980 1.166.2.4 ad * when it tries to return to user mode. We want to
981 1.166.2.4 ad * try and get to get as many LWPs as possible to
982 1.166.2.4 ad * the user / kernel boundary, so that they will
983 1.166.2.4 ad * release any locks that they hold.
984 1.166.2.4 ad */
985 1.166.2.4 ad l->l_flag |= (L_WREBOOT | L_WSUSPEND);
986 1.166.2.4 ad
987 1.166.2.4 ad if (l->l_stat == LSSLEEP &&
988 1.166.2.4 ad (l->l_flag & L_SINTR) != 0) {
989 1.166.2.4 ad /* setrunnable() will release the lock. */
990 1.166.2.4 ad setrunnable(l);
991 1.166.2.4 ad continue;
992 1.166.2.4 ad }
993 1.166.2.4 ad
994 1.166.2.4 ad lwp_unlock(l);
995 1.94 bouyer }
996 1.166.2.4 ad
997 1.166.2.4 ad mutex_exit(&p->p_smutex);
998 1.94 bouyer }
999 1.166.2.7 ad mutex_exit(&proclist_mutex);
1000 1.166.2.4 ad
1001 1.166.2.4 ad /*
1002 1.166.2.4 ad * Kick all CPUs to make them preempt any LWPs running in user mode.
1003 1.166.2.4 ad * They'll trap into the kernel and suspend themselves in userret().
1004 1.166.2.4 ad */
1005 1.166.2.4 ad sched_lock(0);
1006 1.166.2.7 ad #ifdef MULTIPROCESSOR
1007 1.166.2.4 ad for (CPU_INFO_FOREACH(cii, ci))
1008 1.166.2.4 ad cpu_need_resched(ci);
1009 1.166.2.7 ad #else
1010 1.166.2.7 ad cpu_need_resched(curcpu());
1011 1.166.2.7 ad #endif
1012 1.166.2.4 ad sched_unlock(0);
1013 1.94 bouyer }
1014 1.113 gmcgarry
1015 1.113 gmcgarry /*
1016 1.151 yamt * scheduler_fork_hook:
1017 1.151 yamt *
1018 1.151 yamt * Inherit the parent's scheduler history.
1019 1.151 yamt */
1020 1.151 yamt void
1021 1.151 yamt scheduler_fork_hook(struct proc *parent, struct proc *child)
1022 1.151 yamt {
1023 1.151 yamt
1024 1.166.2.4 ad LOCK_ASSERT(mutex_owned(&parent->p_smutex));
1025 1.166.2.4 ad
1026 1.157 yamt child->p_estcpu = child->p_estcpu_inherited = parent->p_estcpu;
1027 1.157 yamt child->p_forktime = schedcpu_ticks;
1028 1.151 yamt }
1029 1.151 yamt
1030 1.151 yamt /*
1031 1.151 yamt * scheduler_wait_hook:
1032 1.151 yamt *
1033 1.151 yamt * Chargeback parents for the sins of their children.
1034 1.151 yamt */
1035 1.151 yamt void
1036 1.151 yamt scheduler_wait_hook(struct proc *parent, struct proc *child)
1037 1.151 yamt {
1038 1.157 yamt fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
1039 1.157 yamt fixpt_t estcpu;
1040 1.151 yamt
1041 1.151 yamt /* XXX Only if parent != init?? */
1042 1.157 yamt
1043 1.166.2.19 ad mutex_spin_enter(&parent->p_stmutex);
1044 1.157 yamt estcpu = decay_cpu_batch(loadfac, child->p_estcpu_inherited,
1045 1.157 yamt schedcpu_ticks - child->p_forktime);
1046 1.166.2.2 ad if (child->p_estcpu > estcpu)
1047 1.157 yamt parent->p_estcpu =
1048 1.157 yamt ESTCPULIM(parent->p_estcpu + child->p_estcpu - estcpu);
1049 1.166.2.19 ad mutex_spin_exit(&parent->p_stmutex);
1050 1.151 yamt }
1051 1.151 yamt
1052 1.151 yamt /*
1053 1.166.2.4 ad * sched_kpri:
1054 1.166.2.4 ad *
1055 1.166.2.11 ad * Scale a priority level to a kernel priority level, usually
1056 1.166.2.11 ad * for an LWP that is about to sleep.
1057 1.166.2.1 ad */
1058 1.166.2.1 ad int
1059 1.166.2.1 ad sched_kpri(struct lwp *l)
1060 1.166.2.1 ad {
1061 1.166.2.20 ad /*
1062 1.166.2.20 ad * Scale user priorities (127 -> 50) up to kernel priorities
1063 1.166.2.20 ad * in the range (49 -> 8). Reserve the top 8 kernel priorities
1064 1.166.2.20 ad * for high priority kthreads. Kernel priorities passed in
1065 1.166.2.20 ad * are left "as is". XXX This is somewhat arbitrary.
1066 1.166.2.20 ad */
1067 1.166.2.6 ad static const uint8_t kpri_tab[] = {
1068 1.166.2.11 ad 0, 1, 2, 3, 4, 5, 6, 7,
1069 1.166.2.11 ad 8, 9, 10, 11, 12, 13, 14, 15,
1070 1.166.2.11 ad 16, 17, 18, 19, 20, 21, 22, 23,
1071 1.166.2.11 ad 24, 25, 26, 27, 28, 29, 30, 31,
1072 1.166.2.11 ad 32, 33, 34, 35, 36, 37, 38, 39,
1073 1.166.2.11 ad 40, 41, 42, 43, 44, 45, 46, 47,
1074 1.166.2.11 ad 48, 49, 8, 8, 9, 9, 10, 10,
1075 1.166.2.11 ad 11, 11, 12, 12, 13, 14, 14, 15,
1076 1.166.2.11 ad 15, 16, 16, 17, 17, 18, 18, 19,
1077 1.166.2.11 ad 20, 20, 21, 21, 22, 22, 23, 23,
1078 1.166.2.11 ad 24, 24, 25, 26, 26, 27, 27, 28,
1079 1.166.2.11 ad 28, 29, 29, 30, 30, 31, 32, 32,
1080 1.166.2.11 ad 33, 33, 34, 34, 35, 35, 36, 36,
1081 1.166.2.11 ad 37, 38, 38, 39, 39, 40, 40, 41,
1082 1.166.2.11 ad 41, 42, 42, 43, 44, 44, 45, 45,
1083 1.166.2.12 ad 46, 46, 47, 47, 48, 48, 49, 49,
1084 1.166.2.4 ad };
1085 1.166.2.4 ad
1086 1.166.2.20 ad return kpri_tab[l->l_usrpri];
1087 1.166.2.1 ad }
1088 1.166.2.1 ad
1089 1.166.2.4 ad /*
1090 1.166.2.4 ad * sched_unsleep:
1091 1.166.2.4 ad *
1092 1.166.2.4 ad * The is called when the LWP has not been awoken normally but instead
1093 1.166.2.4 ad * interrupted: for example, if the sleep timed out. Because of this,
1094 1.166.2.4 ad * it's not a valid action for running or idle LWPs.
1095 1.166.2.4 ad */
1096 1.166.2.3 ad void
1097 1.166.2.4 ad sched_unsleep(struct lwp *l)
1098 1.166.2.3 ad {
1099 1.166.2.3 ad
1100 1.166.2.4 ad lwp_unlock(l);
1101 1.166.2.4 ad panic("sched_unsleep");
1102 1.166.2.3 ad }
1103 1.166.2.3 ad
1104 1.166.2.4 ad /*
1105 1.166.2.4 ad * sched_changepri:
1106 1.166.2.4 ad *
1107 1.166.2.4 ad * Adjust the priority of an LWP.
1108 1.166.2.4 ad */
1109 1.166.2.3 ad void
1110 1.166.2.4 ad sched_changepri(struct lwp *l, int pri)
1111 1.166.2.3 ad {
1112 1.166.2.3 ad
1113 1.166.2.7 ad LOCK_ASSERT(lwp_locked(l, &sched_mutex));
1114 1.166.2.7 ad
1115 1.166.2.20 ad l->l_usrpri = pri;
1116 1.166.2.20 ad
1117 1.166.2.20 ad if (l->l_priority < PUSER)
1118 1.166.2.20 ad return;
1119 1.166.2.7 ad if (l->l_stat != LSRUN || (l->l_flag & L_INMEM) == 0 ||
1120 1.166.2.12 ad (l->l_priority / PPQ) == (pri / PPQ)) {
1121 1.166.2.4 ad l->l_priority = pri;
1122 1.166.2.4 ad return;
1123 1.166.2.4 ad }
1124 1.166.2.4 ad
1125 1.166.2.4 ad remrunqueue(l);
1126 1.166.2.4 ad l->l_priority = pri;
1127 1.166.2.4 ad setrunqueue(l);
1128 1.166.2.7 ad resched_lwp(l, pri);
1129 1.166.2.3 ad }
1130 1.166.2.3 ad
1131 1.166.2.1 ad /*
1132 1.113 gmcgarry * Low-level routines to access the run queue. Optimised assembler
1133 1.113 gmcgarry * routines can override these.
1134 1.113 gmcgarry */
1135 1.113 gmcgarry
1136 1.113 gmcgarry #ifndef __HAVE_MD_RUNQUEUE
1137 1.115 nisimura
1138 1.130 nathanw /*
1139 1.134 matt * On some architectures, it's faster to use a MSB ordering for the priorites
1140 1.134 matt * than the traditional LSB ordering.
1141 1.134 matt */
1142 1.134 matt #ifdef __HAVE_BIGENDIAN_BITOPS
1143 1.134 matt #define RQMASK(n) (0x80000000 >> (n))
1144 1.134 matt #else
1145 1.134 matt #define RQMASK(n) (0x00000001 << (n))
1146 1.134 matt #endif
1147 1.134 matt
1148 1.134 matt /*
1149 1.115 nisimura * The primitives that manipulate the run queues. whichqs tells which
1150 1.115 nisimura * of the 32 queues qs have processes in them. Setrunqueue puts processes
1151 1.115 nisimura * into queues, remrunqueue removes them from queues. The running process is
1152 1.115 nisimura * on no queue, other processes are on a queue related to p->p_priority,
1153 1.115 nisimura * divided by 4 actually to shrink the 0-127 range of priorities into the 32
1154 1.115 nisimura * available queues.
1155 1.130 nathanw */
1156 1.146 matt #ifdef RQDEBUG
1157 1.146 matt static void
1158 1.146 matt checkrunqueue(int whichq, struct lwp *l)
1159 1.146 matt {
1160 1.146 matt const struct prochd * const rq = &sched_qs[whichq];
1161 1.146 matt struct lwp *l2;
1162 1.146 matt int found = 0;
1163 1.146 matt int die = 0;
1164 1.146 matt int empty = 1;
1165 1.164 christos for (l2 = rq->ph_link; l2 != (const void*) rq; l2 = l2->l_forw) {
1166 1.146 matt if (l2->l_stat != LSRUN) {
1167 1.146 matt printf("checkrunqueue[%d]: lwp %p state (%d) "
1168 1.146 matt " != LSRUN\n", whichq, l2, l2->l_stat);
1169 1.146 matt }
1170 1.146 matt if (l2->l_back->l_forw != l2) {
1171 1.146 matt printf("checkrunqueue[%d]: lwp %p back-qptr (%p) "
1172 1.146 matt "corrupt %p\n", whichq, l2, l2->l_back,
1173 1.146 matt l2->l_back->l_forw);
1174 1.146 matt die = 1;
1175 1.146 matt }
1176 1.146 matt if (l2->l_forw->l_back != l2) {
1177 1.146 matt printf("checkrunqueue[%d]: lwp %p forw-qptr (%p) "
1178 1.146 matt "corrupt %p\n", whichq, l2, l2->l_forw,
1179 1.146 matt l2->l_forw->l_back);
1180 1.146 matt die = 1;
1181 1.146 matt }
1182 1.146 matt if (l2 == l)
1183 1.146 matt found = 1;
1184 1.146 matt empty = 0;
1185 1.146 matt }
1186 1.146 matt if (empty && (sched_whichqs & RQMASK(whichq)) != 0) {
1187 1.146 matt printf("checkrunqueue[%d]: bit set for empty run-queue %p\n",
1188 1.146 matt whichq, rq);
1189 1.146 matt die = 1;
1190 1.146 matt } else if (!empty && (sched_whichqs & RQMASK(whichq)) == 0) {
1191 1.146 matt printf("checkrunqueue[%d]: bit clear for non-empty "
1192 1.146 matt "run-queue %p\n", whichq, rq);
1193 1.146 matt die = 1;
1194 1.146 matt }
1195 1.146 matt if (l != NULL && (sched_whichqs & RQMASK(whichq)) == 0) {
1196 1.146 matt printf("checkrunqueue[%d]: bit clear for active lwp %p\n",
1197 1.146 matt whichq, l);
1198 1.146 matt die = 1;
1199 1.146 matt }
1200 1.146 matt if (l != NULL && empty) {
1201 1.146 matt printf("checkrunqueue[%d]: empty run-queue %p with "
1202 1.146 matt "active lwp %p\n", whichq, rq, l);
1203 1.146 matt die = 1;
1204 1.146 matt }
1205 1.146 matt if (l != NULL && !found) {
1206 1.146 matt printf("checkrunqueue[%d]: lwp %p not in runqueue %p!",
1207 1.146 matt whichq, l, rq);
1208 1.146 matt die = 1;
1209 1.146 matt }
1210 1.146 matt if (die)
1211 1.146 matt panic("checkrunqueue: inconsistency found");
1212 1.146 matt }
1213 1.146 matt #endif /* RQDEBUG */
1214 1.146 matt
1215 1.113 gmcgarry void
1216 1.122 thorpej setrunqueue(struct lwp *l)
1217 1.113 gmcgarry {
1218 1.113 gmcgarry struct prochd *rq;
1219 1.122 thorpej struct lwp *prev;
1220 1.152 yamt const int whichq = l->l_priority / PPQ;
1221 1.113 gmcgarry
1222 1.166.2.3 ad LOCK_ASSERT(lwp_locked(l, &sched_mutex));
1223 1.166.2.2 ad
1224 1.146 matt #ifdef RQDEBUG
1225 1.146 matt checkrunqueue(whichq, NULL);
1226 1.146 matt #endif
1227 1.113 gmcgarry #ifdef DIAGNOSTIC
1228 1.166.2.2 ad if (l->l_back != NULL || l->l_stat != LSRUN)
1229 1.113 gmcgarry panic("setrunqueue");
1230 1.113 gmcgarry #endif
1231 1.134 matt sched_whichqs |= RQMASK(whichq);
1232 1.113 gmcgarry rq = &sched_qs[whichq];
1233 1.113 gmcgarry prev = rq->ph_rlink;
1234 1.122 thorpej l->l_forw = (struct lwp *)rq;
1235 1.122 thorpej rq->ph_rlink = l;
1236 1.122 thorpej prev->l_forw = l;
1237 1.122 thorpej l->l_back = prev;
1238 1.146 matt #ifdef RQDEBUG
1239 1.146 matt checkrunqueue(whichq, l);
1240 1.146 matt #endif
1241 1.113 gmcgarry }
1242 1.113 gmcgarry
1243 1.166.2.20 ad /*
1244 1.166.2.20 ad * XXXSMP When LWP dispatch (cpu_switch()) is changed to use remrunqueue(),
1245 1.166.2.20 ad * drop of the effective priority level from kernel to user needs to be
1246 1.166.2.20 ad * moved here from userret(). The assignment in userret() is currently
1247 1.166.2.20 ad * done unlocked.
1248 1.166.2.20 ad */
1249 1.113 gmcgarry void
1250 1.122 thorpej remrunqueue(struct lwp *l)
1251 1.113 gmcgarry {
1252 1.122 thorpej struct lwp *prev, *next;
1253 1.152 yamt const int whichq = l->l_priority / PPQ;
1254 1.166.2.2 ad
1255 1.166.2.2 ad LOCK_ASSERT(lwp_locked(l, &sched_mutex));
1256 1.166.2.2 ad
1257 1.146 matt #ifdef RQDEBUG
1258 1.146 matt checkrunqueue(whichq, l);
1259 1.146 matt #endif
1260 1.166.2.2 ad
1261 1.166.2.2 ad #if defined(DIAGNOSTIC)
1262 1.166.2.2 ad if (((sched_whichqs & RQMASK(whichq)) == 0) || l->l_back == NULL) {
1263 1.166.2.2 ad /* Shouldn't happen - interrupts disabled. */
1264 1.146 matt panic("remrunqueue: bit %d not set", whichq);
1265 1.166.2.2 ad }
1266 1.113 gmcgarry #endif
1267 1.122 thorpej prev = l->l_back;
1268 1.122 thorpej l->l_back = NULL;
1269 1.122 thorpej next = l->l_forw;
1270 1.122 thorpej prev->l_forw = next;
1271 1.122 thorpej next->l_back = prev;
1272 1.113 gmcgarry if (prev == next)
1273 1.134 matt sched_whichqs &= ~RQMASK(whichq);
1274 1.146 matt #ifdef RQDEBUG
1275 1.146 matt checkrunqueue(whichq, NULL);
1276 1.146 matt #endif
1277 1.113 gmcgarry }
1278 1.113 gmcgarry
1279 1.134 matt #undef RQMASK
1280 1.134 matt #endif /* !defined(__HAVE_MD_RUNQUEUE) */
1281