sched_4bsd.c revision 1.1.6.12 1 1.1.6.12 yamt /* $NetBSD: sched_4bsd.c,v 1.1.6.12 2007/10/24 11:57:59 yamt Exp $ */
2 1.1.6.1 ad
3 1.1.6.1 ad /*-
4 1.1.6.1 ad * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
5 1.1.6.1 ad * All rights reserved.
6 1.1.6.1 ad *
7 1.1.6.1 ad * This code is derived from software contributed to The NetBSD Foundation
8 1.1.6.1 ad * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 1.1.6.1 ad * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran, and
10 1.1.6.1 ad * Daniel Sieger.
11 1.1.6.1 ad *
12 1.1.6.1 ad * Redistribution and use in source and binary forms, with or without
13 1.1.6.1 ad * modification, are permitted provided that the following conditions
14 1.1.6.1 ad * are met:
15 1.1.6.1 ad * 1. Redistributions of source code must retain the above copyright
16 1.1.6.1 ad * notice, this list of conditions and the following disclaimer.
17 1.1.6.1 ad * 2. Redistributions in binary form must reproduce the above copyright
18 1.1.6.1 ad * notice, this list of conditions and the following disclaimer in the
19 1.1.6.1 ad * documentation and/or other materials provided with the distribution.
20 1.1.6.1 ad * 3. All advertising materials mentioning features or use of this software
21 1.1.6.1 ad * must display the following acknowledgement:
22 1.1.6.1 ad * This product includes software developed by the NetBSD
23 1.1.6.1 ad * Foundation, Inc. and its contributors.
24 1.1.6.1 ad * 4. Neither the name of The NetBSD Foundation nor the names of its
25 1.1.6.1 ad * contributors may be used to endorse or promote products derived
26 1.1.6.1 ad * from this software without specific prior written permission.
27 1.1.6.1 ad *
28 1.1.6.1 ad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29 1.1.6.1 ad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30 1.1.6.1 ad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31 1.1.6.1 ad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32 1.1.6.1 ad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33 1.1.6.1 ad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34 1.1.6.1 ad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35 1.1.6.1 ad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36 1.1.6.1 ad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37 1.1.6.1 ad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 1.1.6.1 ad * POSSIBILITY OF SUCH DAMAGE.
39 1.1.6.1 ad */
40 1.1.6.1 ad
41 1.1.6.1 ad /*-
42 1.1.6.1 ad * Copyright (c) 1982, 1986, 1990, 1991, 1993
43 1.1.6.1 ad * The Regents of the University of California. All rights reserved.
44 1.1.6.1 ad * (c) UNIX System Laboratories, Inc.
45 1.1.6.1 ad * All or some portions of this file are derived from material licensed
46 1.1.6.1 ad * to the University of California by American Telephone and Telegraph
47 1.1.6.1 ad * Co. or Unix System Laboratories, Inc. and are reproduced herein with
48 1.1.6.1 ad * the permission of UNIX System Laboratories, Inc.
49 1.1.6.1 ad *
50 1.1.6.1 ad * Redistribution and use in source and binary forms, with or without
51 1.1.6.1 ad * modification, are permitted provided that the following conditions
52 1.1.6.1 ad * are met:
53 1.1.6.1 ad * 1. Redistributions of source code must retain the above copyright
54 1.1.6.1 ad * notice, this list of conditions and the following disclaimer.
55 1.1.6.1 ad * 2. Redistributions in binary form must reproduce the above copyright
56 1.1.6.1 ad * notice, this list of conditions and the following disclaimer in the
57 1.1.6.1 ad * documentation and/or other materials provided with the distribution.
58 1.1.6.1 ad * 3. Neither the name of the University nor the names of its contributors
59 1.1.6.1 ad * may be used to endorse or promote products derived from this software
60 1.1.6.1 ad * without specific prior written permission.
61 1.1.6.1 ad *
62 1.1.6.1 ad * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
63 1.1.6.1 ad * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64 1.1.6.1 ad * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65 1.1.6.1 ad * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
66 1.1.6.1 ad * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67 1.1.6.1 ad * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68 1.1.6.1 ad * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69 1.1.6.1 ad * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70 1.1.6.1 ad * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71 1.1.6.1 ad * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72 1.1.6.1 ad * SUCH DAMAGE.
73 1.1.6.1 ad *
74 1.1.6.1 ad * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
75 1.1.6.1 ad */
76 1.1.6.1 ad
77 1.1.6.1 ad #include <sys/cdefs.h>
78 1.1.6.12 yamt __KERNEL_RCSID(0, "$NetBSD: sched_4bsd.c,v 1.1.6.12 2007/10/24 11:57:59 yamt Exp $");
79 1.1.6.1 ad
80 1.1.6.1 ad #include "opt_ddb.h"
81 1.1.6.1 ad #include "opt_lockdebug.h"
82 1.1.6.1 ad #include "opt_perfctrs.h"
83 1.1.6.1 ad
84 1.1.6.1 ad #define __MUTEX_PRIVATE
85 1.1.6.1 ad
86 1.1.6.1 ad #include <sys/param.h>
87 1.1.6.1 ad #include <sys/systm.h>
88 1.1.6.1 ad #include <sys/callout.h>
89 1.1.6.1 ad #include <sys/cpu.h>
90 1.1.6.1 ad #include <sys/proc.h>
91 1.1.6.1 ad #include <sys/kernel.h>
92 1.1.6.1 ad #include <sys/signalvar.h>
93 1.1.6.1 ad #include <sys/resourcevar.h>
94 1.1.6.1 ad #include <sys/sched.h>
95 1.1.6.1 ad #include <sys/sysctl.h>
96 1.1.6.1 ad #include <sys/kauth.h>
97 1.1.6.1 ad #include <sys/lockdebug.h>
98 1.1.6.1 ad #include <sys/kmem.h>
99 1.1.6.5 ad #include <sys/intr.h>
100 1.1.6.1 ad
101 1.1.6.1 ad #include <uvm/uvm_extern.h>
102 1.1.6.1 ad
103 1.1.6.1 ad /*
104 1.1.6.1 ad * Run queues.
105 1.1.6.1 ad *
106 1.1.6.9 ad * We maintain bitmasks of non-empty queues in order speed up finding
107 1.1.6.9 ad * the first runnable process. Since there can be (by definition) few
108 1.1.6.9 ad * real time LWPs in the the system, we maintain them on a linked list,
109 1.1.6.9 ad * sorted by priority.
110 1.1.6.1 ad */
111 1.1.6.1 ad
112 1.1.6.9 ad #define PPB_SHIFT 5
113 1.1.6.9 ad #define PPB_MASK 31
114 1.1.6.9 ad
115 1.1.6.9 ad #define NUM_Q (NPRI_KERNEL + NPRI_USER)
116 1.1.6.9 ad #define NUM_PPB (1 << PPB_SHIFT)
117 1.1.6.9 ad #define NUM_B (NUM_Q / NUM_PPB)
118 1.1.6.2 ad
119 1.1.6.1 ad typedef struct runqueue {
120 1.1.6.9 ad TAILQ_HEAD(, lwp) rq_queue[NUM_Q]; /* user+kernel */
121 1.1.6.9 ad TAILQ_HEAD(, lwp) rq_rt; /* realtime */
122 1.1.6.9 ad uint32_t rq_bitmap[NUM_B]; /* bitmap of queues */
123 1.1.6.9 ad u_int rq_count; /* total # jobs */
124 1.1.6.1 ad } runqueue_t;
125 1.1.6.2 ad
126 1.1.6.1 ad static runqueue_t global_queue;
127 1.1.6.1 ad
128 1.1.6.1 ad static void updatepri(struct lwp *);
129 1.1.6.1 ad static void resetpriority(struct lwp *);
130 1.1.6.1 ad static void resetprocpriority(struct proc *);
131 1.1.6.1 ad
132 1.1.6.10 rmind fixpt_t decay_cpu(fixpt_t, fixpt_t);
133 1.1.6.10 rmind
134 1.1.6.1 ad extern unsigned int sched_pstats_ticks; /* defined in kern_synch.c */
135 1.1.6.1 ad
136 1.1.6.1 ad /* The global scheduler state */
137 1.1.6.1 ad kmutex_t sched_mutex;
138 1.1.6.1 ad
139 1.1.6.1 ad /* Number of hardclock ticks per sched_tick() */
140 1.1.6.1 ad int rrticks;
141 1.1.6.1 ad
142 1.1.6.9 ad const int schedppq = 1;
143 1.1.6.4 ad
144 1.1.6.1 ad /*
145 1.1.6.1 ad * Force switch among equal priority processes every 100ms.
146 1.1.6.1 ad * Called from hardclock every hz/10 == rrticks hardclock ticks.
147 1.1.6.8 ad *
148 1.1.6.8 ad * There's no need to lock anywhere in this routine, as it's
149 1.1.6.8 ad * CPU-local and runs at IPL_SCHED (called from clock interrupt).
150 1.1.6.1 ad */
151 1.1.6.1 ad /* ARGSUSED */
152 1.1.6.1 ad void
153 1.1.6.1 ad sched_tick(struct cpu_info *ci)
154 1.1.6.1 ad {
155 1.1.6.1 ad struct schedstate_percpu *spc = &ci->ci_schedstate;
156 1.1.6.1 ad
157 1.1.6.1 ad spc->spc_ticks = rrticks;
158 1.1.6.1 ad
159 1.1.6.10 rmind if (CURCPU_IDLE_P())
160 1.1.6.10 rmind return;
161 1.1.6.10 rmind
162 1.1.6.10 rmind if (spc->spc_flags & SPCF_SEENRR) {
163 1.1.6.10 rmind /*
164 1.1.6.10 rmind * The process has already been through a roundrobin
165 1.1.6.10 rmind * without switching and may be hogging the CPU.
166 1.1.6.10 rmind * Indicate that the process should yield.
167 1.1.6.10 rmind */
168 1.1.6.10 rmind spc->spc_flags |= SPCF_SHOULDYIELD;
169 1.1.6.10 rmind } else
170 1.1.6.10 rmind spc->spc_flags |= SPCF_SEENRR;
171 1.1.6.10 rmind
172 1.1.6.7 ad cpu_need_resched(ci, 0);
173 1.1.6.1 ad }
174 1.1.6.1 ad
175 1.1.6.2 ad #define NICE_WEIGHT 1 /* priorities per nice level */
176 1.1.6.1 ad
177 1.1.6.1 ad #define ESTCPU_SHIFT 11
178 1.1.6.9 ad #define ESTCPU_MAX ((NICE_WEIGHT * PRIO_MAX - 1) << ESTCPU_SHIFT)
179 1.1.6.1 ad #define ESTCPULIM(e) min((e), ESTCPU_MAX)
180 1.1.6.1 ad
181 1.1.6.1 ad /*
182 1.1.6.1 ad * Constants for digital decay and forget:
183 1.1.6.1 ad * 90% of (p_estcpu) usage in 5 * loadav time
184 1.1.6.1 ad * 95% of (p_pctcpu) usage in 60 seconds (load insensitive)
185 1.1.6.1 ad * Note that, as ps(1) mentions, this can let percentages
186 1.1.6.1 ad * total over 100% (I've seen 137.9% for 3 processes).
187 1.1.6.1 ad *
188 1.1.6.1 ad * Note that hardclock updates p_estcpu and p_cpticks independently.
189 1.1.6.1 ad *
190 1.1.6.1 ad * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
191 1.1.6.1 ad * That is, the system wants to compute a value of decay such
192 1.1.6.1 ad * that the following for loop:
193 1.1.6.1 ad * for (i = 0; i < (5 * loadavg); i++)
194 1.1.6.1 ad * p_estcpu *= decay;
195 1.1.6.1 ad * will compute
196 1.1.6.1 ad * p_estcpu *= 0.1;
197 1.1.6.1 ad * for all values of loadavg:
198 1.1.6.1 ad *
199 1.1.6.1 ad * Mathematically this loop can be expressed by saying:
200 1.1.6.1 ad * decay ** (5 * loadavg) ~= .1
201 1.1.6.1 ad *
202 1.1.6.1 ad * The system computes decay as:
203 1.1.6.1 ad * decay = (2 * loadavg) / (2 * loadavg + 1)
204 1.1.6.1 ad *
205 1.1.6.1 ad * We wish to prove that the system's computation of decay
206 1.1.6.1 ad * will always fulfill the equation:
207 1.1.6.1 ad * decay ** (5 * loadavg) ~= .1
208 1.1.6.1 ad *
209 1.1.6.1 ad * If we compute b as:
210 1.1.6.1 ad * b = 2 * loadavg
211 1.1.6.1 ad * then
212 1.1.6.1 ad * decay = b / (b + 1)
213 1.1.6.1 ad *
214 1.1.6.1 ad * We now need to prove two things:
215 1.1.6.1 ad * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
216 1.1.6.1 ad * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
217 1.1.6.1 ad *
218 1.1.6.1 ad * Facts:
219 1.1.6.1 ad * For x close to zero, exp(x) =~ 1 + x, since
220 1.1.6.1 ad * exp(x) = 0! + x**1/1! + x**2/2! + ... .
221 1.1.6.1 ad * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
222 1.1.6.1 ad * For x close to zero, ln(1+x) =~ x, since
223 1.1.6.1 ad * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1
224 1.1.6.1 ad * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
225 1.1.6.1 ad * ln(.1) =~ -2.30
226 1.1.6.1 ad *
227 1.1.6.1 ad * Proof of (1):
228 1.1.6.1 ad * Solve (factor)**(power) =~ .1 given power (5*loadav):
229 1.1.6.1 ad * solving for factor,
230 1.1.6.1 ad * ln(factor) =~ (-2.30/5*loadav), or
231 1.1.6.1 ad * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
232 1.1.6.1 ad * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED
233 1.1.6.1 ad *
234 1.1.6.1 ad * Proof of (2):
235 1.1.6.1 ad * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
236 1.1.6.1 ad * solving for power,
237 1.1.6.1 ad * power*ln(b/(b+1)) =~ -2.30, or
238 1.1.6.1 ad * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED
239 1.1.6.1 ad *
240 1.1.6.1 ad * Actual power values for the implemented algorithm are as follows:
241 1.1.6.1 ad * loadav: 1 2 3 4
242 1.1.6.1 ad * power: 5.68 10.32 14.94 19.55
243 1.1.6.1 ad */
244 1.1.6.1 ad
245 1.1.6.1 ad /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
246 1.1.6.1 ad #define loadfactor(loadav) (2 * (loadav))
247 1.1.6.1 ad
248 1.1.6.10 rmind fixpt_t
249 1.1.6.1 ad decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
250 1.1.6.1 ad {
251 1.1.6.1 ad
252 1.1.6.1 ad if (estcpu == 0) {
253 1.1.6.1 ad return 0;
254 1.1.6.1 ad }
255 1.1.6.1 ad
256 1.1.6.1 ad #if !defined(_LP64)
257 1.1.6.1 ad /* avoid 64bit arithmetics. */
258 1.1.6.1 ad #define FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
259 1.1.6.1 ad if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
260 1.1.6.1 ad return estcpu * loadfac / (loadfac + FSCALE);
261 1.1.6.1 ad }
262 1.1.6.1 ad #endif /* !defined(_LP64) */
263 1.1.6.1 ad
264 1.1.6.1 ad return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
265 1.1.6.1 ad }
266 1.1.6.1 ad
267 1.1.6.1 ad /*
268 1.1.6.1 ad * For all load averages >= 1 and max p_estcpu of (255 << ESTCPU_SHIFT),
269 1.1.6.1 ad * sleeping for at least seven times the loadfactor will decay p_estcpu to
270 1.1.6.1 ad * less than (1 << ESTCPU_SHIFT).
271 1.1.6.1 ad *
272 1.1.6.1 ad * note that our ESTCPU_MAX is actually much smaller than (255 << ESTCPU_SHIFT).
273 1.1.6.1 ad */
274 1.1.6.1 ad static fixpt_t
275 1.1.6.1 ad decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n)
276 1.1.6.1 ad {
277 1.1.6.1 ad
278 1.1.6.1 ad if ((n << FSHIFT) >= 7 * loadfac) {
279 1.1.6.1 ad return 0;
280 1.1.6.1 ad }
281 1.1.6.1 ad
282 1.1.6.1 ad while (estcpu != 0 && n > 1) {
283 1.1.6.1 ad estcpu = decay_cpu(loadfac, estcpu);
284 1.1.6.1 ad n--;
285 1.1.6.1 ad }
286 1.1.6.1 ad
287 1.1.6.1 ad return estcpu;
288 1.1.6.1 ad }
289 1.1.6.1 ad
290 1.1.6.1 ad /*
291 1.1.6.1 ad * sched_pstats_hook:
292 1.1.6.1 ad *
293 1.1.6.1 ad * Periodically called from sched_pstats(); used to recalculate priorities.
294 1.1.6.1 ad */
295 1.1.6.1 ad void
296 1.1.6.10 rmind sched_pstats_hook(struct lwp *l)
297 1.1.6.1 ad {
298 1.1.6.1 ad
299 1.1.6.10 rmind if (l->l_slptime <= 1 && l->l_priority < PRI_KERNEL)
300 1.1.6.10 rmind resetpriority(l);
301 1.1.6.1 ad }
302 1.1.6.1 ad
303 1.1.6.1 ad /*
304 1.1.6.1 ad * Recalculate the priority of a process after it has slept for a while.
305 1.1.6.1 ad */
306 1.1.6.1 ad static void
307 1.1.6.1 ad updatepri(struct lwp *l)
308 1.1.6.1 ad {
309 1.1.6.1 ad struct proc *p = l->l_proc;
310 1.1.6.1 ad fixpt_t loadfac;
311 1.1.6.1 ad
312 1.1.6.1 ad KASSERT(lwp_locked(l, NULL));
313 1.1.6.1 ad KASSERT(l->l_slptime > 1);
314 1.1.6.1 ad
315 1.1.6.1 ad loadfac = loadfactor(averunnable.ldavg[0]);
316 1.1.6.1 ad
317 1.1.6.1 ad l->l_slptime--; /* the first time was done in sched_pstats */
318 1.1.6.1 ad /* XXX NJWLWP */
319 1.1.6.1 ad /* XXXSMP occasionally unlocked, should be per-LWP */
320 1.1.6.1 ad p->p_estcpu = decay_cpu_batch(loadfac, p->p_estcpu, l->l_slptime);
321 1.1.6.1 ad resetpriority(l);
322 1.1.6.1 ad }
323 1.1.6.1 ad
324 1.1.6.1 ad /*
325 1.1.6.4 ad * The primitives that manipulate the run queues. whichqs tells which of
326 1.1.6.4 ad * the queues have processes in them. sched_enqueue() puts processes into
327 1.1.6.4 ad * queues, sched_dequeue() removes them from queues.
328 1.1.6.1 ad */
329 1.1.6.1 ad #ifdef RQDEBUG
330 1.1.6.1 ad static void
331 1.1.6.1 ad runqueue_check(const runqueue_t *rq, int whichq, struct lwp *l)
332 1.1.6.1 ad {
333 1.1.6.1 ad const subqueue_t * const sq = &rq->rq_subqueues[whichq];
334 1.1.6.1 ad const uint32_t bitmap = rq->rq_bitmap;
335 1.1.6.1 ad struct lwp *l2;
336 1.1.6.1 ad int found = 0;
337 1.1.6.1 ad int die = 0;
338 1.1.6.1 ad int empty = 1;
339 1.1.6.9 ad int j;
340 1.1.6.1 ad
341 1.1.6.9 ad for (j = 0; j < PPQ; j++) {
342 1.1.6.9 ad TAILQ_FOREACH(l2, &sq->sq_queue[j], l_runq) {
343 1.1.6.9 ad if (l2->l_stat != LSRUN) {
344 1.1.6.9 ad printf("runqueue_check[%d]: lwp %p state (%d) "
345 1.1.6.9 ad " != LSRUN\n", whichq, l2, l2->l_stat);
346 1.1.6.9 ad }
347 1.1.6.9 ad if (l2 == l)
348 1.1.6.9 ad found = 1;
349 1.1.6.9 ad empty = 0;
350 1.1.6.1 ad }
351 1.1.6.1 ad }
352 1.1.6.1 ad if (empty && (bitmap & RQMASK(whichq)) != 0) {
353 1.1.6.1 ad printf("runqueue_check[%d]: bit set for empty run-queue %p\n",
354 1.1.6.1 ad whichq, rq);
355 1.1.6.1 ad die = 1;
356 1.1.6.1 ad } else if (!empty && (bitmap & RQMASK(whichq)) == 0) {
357 1.1.6.1 ad printf("runqueue_check[%d]: bit clear for non-empty "
358 1.1.6.1 ad "run-queue %p\n", whichq, rq);
359 1.1.6.1 ad die = 1;
360 1.1.6.1 ad }
361 1.1.6.1 ad if (l != NULL && (bitmap & RQMASK(whichq)) == 0) {
362 1.1.6.1 ad printf("runqueue_check[%d]: bit clear for active lwp %p\n",
363 1.1.6.1 ad whichq, l);
364 1.1.6.1 ad die = 1;
365 1.1.6.1 ad }
366 1.1.6.1 ad if (l != NULL && empty) {
367 1.1.6.1 ad printf("runqueue_check[%d]: empty run-queue %p with "
368 1.1.6.1 ad "active lwp %p\n", whichq, rq, l);
369 1.1.6.1 ad die = 1;
370 1.1.6.1 ad }
371 1.1.6.1 ad if (l != NULL && !found) {
372 1.1.6.1 ad printf("runqueue_check[%d]: lwp %p not in runqueue %p!",
373 1.1.6.1 ad whichq, l, rq);
374 1.1.6.1 ad die = 1;
375 1.1.6.1 ad }
376 1.1.6.1 ad if (die)
377 1.1.6.1 ad panic("runqueue_check: inconsistency found");
378 1.1.6.1 ad }
379 1.1.6.1 ad #else /* RQDEBUG */
380 1.1.6.1 ad #define runqueue_check(a, b, c) /* nothing */
381 1.1.6.1 ad #endif /* RQDEBUG */
382 1.1.6.1 ad
383 1.1.6.1 ad static void
384 1.1.6.1 ad runqueue_init(runqueue_t *rq)
385 1.1.6.1 ad {
386 1.1.6.1 ad int i;
387 1.1.6.1 ad
388 1.1.6.9 ad for (i = 0; i < NUM_Q; i++)
389 1.1.6.9 ad TAILQ_INIT(&rq->rq_queue[i]);
390 1.1.6.9 ad for (i = 0; i < NUM_B; i++)
391 1.1.6.9 ad rq->rq_bitmap[i] = 0;
392 1.1.6.9 ad TAILQ_INIT(&rq->rq_rt);
393 1.1.6.9 ad rq->rq_count = 0;
394 1.1.6.1 ad }
395 1.1.6.1 ad
396 1.1.6.1 ad static void
397 1.1.6.1 ad runqueue_enqueue(runqueue_t *rq, struct lwp *l)
398 1.1.6.1 ad {
399 1.1.6.9 ad pri_t pri;
400 1.1.6.9 ad lwp_t *l2;
401 1.1.6.1 ad
402 1.1.6.1 ad KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
403 1.1.6.1 ad
404 1.1.6.9 ad pri = lwp_eprio(l);
405 1.1.6.9 ad rq->rq_count++;
406 1.1.6.9 ad
407 1.1.6.9 ad if (pri >= PRI_USER_RT) {
408 1.1.6.9 ad TAILQ_FOREACH(l2, &rq->rq_rt, l_runq) {
409 1.1.6.9 ad if (lwp_eprio(l2) < pri) {
410 1.1.6.9 ad TAILQ_INSERT_BEFORE(l2, l, l_runq);
411 1.1.6.9 ad return;
412 1.1.6.9 ad }
413 1.1.6.9 ad }
414 1.1.6.9 ad TAILQ_INSERT_TAIL(&rq->rq_rt, l, l_runq);
415 1.1.6.9 ad return;
416 1.1.6.9 ad }
417 1.1.6.9 ad
418 1.1.6.9 ad runqueue_check(rq, pri, NULL);
419 1.1.6.9 ad rq->rq_bitmap[pri >> PPB_SHIFT] |=
420 1.1.6.9 ad (0x80000000 >> (pri & PPB_MASK));
421 1.1.6.9 ad TAILQ_INSERT_TAIL(&rq->rq_queue[pri], l, l_runq);
422 1.1.6.9 ad runqueue_check(rq, pri, l);
423 1.1.6.1 ad }
424 1.1.6.1 ad
425 1.1.6.1 ad static void
426 1.1.6.1 ad runqueue_dequeue(runqueue_t *rq, struct lwp *l)
427 1.1.6.1 ad {
428 1.1.6.9 ad pri_t pri;
429 1.1.6.1 ad
430 1.1.6.1 ad KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
431 1.1.6.1 ad
432 1.1.6.9 ad pri = lwp_eprio(l);
433 1.1.6.9 ad rq->rq_count--;
434 1.1.6.9 ad
435 1.1.6.9 ad if (pri >= PRI_USER_RT) {
436 1.1.6.9 ad TAILQ_REMOVE(&rq->rq_rt, l, l_runq);
437 1.1.6.9 ad return;
438 1.1.6.9 ad }
439 1.1.6.9 ad
440 1.1.6.9 ad runqueue_check(rq, pri, l);
441 1.1.6.9 ad TAILQ_REMOVE(&rq->rq_queue[pri], l, l_runq);
442 1.1.6.9 ad if (TAILQ_EMPTY(&rq->rq_queue[pri]))
443 1.1.6.9 ad rq->rq_bitmap[pri >> PPB_SHIFT] &=
444 1.1.6.9 ad ~(0x80000000 >> (pri & PPB_MASK));
445 1.1.6.9 ad runqueue_check(rq, pri, NULL);
446 1.1.6.1 ad }
447 1.1.6.1 ad
448 1.1.6.1 ad static struct lwp *
449 1.1.6.1 ad runqueue_nextlwp(runqueue_t *rq)
450 1.1.6.1 ad {
451 1.1.6.9 ad pri_t pri;
452 1.1.6.9 ad int i;
453 1.1.6.9 ad
454 1.1.6.9 ad KASSERT(rq->rq_count != 0);
455 1.1.6.9 ad
456 1.1.6.9 ad if (!TAILQ_EMPTY(&rq->rq_rt))
457 1.1.6.9 ad return TAILQ_FIRST(&rq->rq_rt);
458 1.1.6.1 ad
459 1.1.6.9 ad for (i = NUM_B - 1; i >= 0; i--) {
460 1.1.6.9 ad if (rq->rq_bitmap[i] != 0) {
461 1.1.6.9 ad pri = (32 - ffs(rq->rq_bitmap[i])) + i * NUM_PPB;
462 1.1.6.9 ad return TAILQ_FIRST(&rq->rq_queue[pri]);
463 1.1.6.9 ad }
464 1.1.6.1 ad }
465 1.1.6.9 ad
466 1.1.6.9 ad panic("runqueue_nextlwp");
467 1.1.6.1 ad }
468 1.1.6.1 ad
469 1.1.6.1 ad #if defined(DDB)
470 1.1.6.1 ad static void
471 1.1.6.1 ad runqueue_print(const runqueue_t *rq, void (*pr)(const char *, ...))
472 1.1.6.1 ad {
473 1.1.6.9 ad lwp_t *l;
474 1.1.6.9 ad int i;
475 1.1.6.1 ad
476 1.1.6.9 ad TAILQ_FOREACH(l, &rq->rq_rt, l_runq) {
477 1.1.6.9 ad (*pr)("\t%d.%d (%s) pri=%d usrpri=%d\n",
478 1.1.6.9 ad l->l_proc->p_pid, l->l_lid, l->l_proc->p_comm,
479 1.1.6.9 ad (int)l->l_priority, (int)l->l_usrpri);
480 1.1.6.9 ad }
481 1.1.6.9 ad
482 1.1.6.9 ad for (i = NUM_Q - 1; i >= 0; i--) {
483 1.1.6.9 ad TAILQ_FOREACH(l, &rq->rq_queue[i], l_runq) {
484 1.1.6.1 ad (*pr)("\t%d.%d (%s) pri=%d usrpri=%d\n",
485 1.1.6.9 ad l->l_proc->p_pid, l->l_lid, l->l_proc->p_comm,
486 1.1.6.9 ad (int)l->l_priority, (int)l->l_usrpri);
487 1.1.6.1 ad }
488 1.1.6.1 ad }
489 1.1.6.1 ad }
490 1.1.6.1 ad #endif /* defined(DDB) */
491 1.1.6.1 ad
492 1.1.6.1 ad /*
493 1.1.6.1 ad * Initialize the (doubly-linked) run queues
494 1.1.6.1 ad * to be empty.
495 1.1.6.1 ad */
496 1.1.6.1 ad void
497 1.1.6.1 ad sched_rqinit()
498 1.1.6.1 ad {
499 1.1.6.1 ad
500 1.1.6.1 ad runqueue_init(&global_queue);
501 1.1.6.1 ad mutex_init(&sched_mutex, MUTEX_SPIN, IPL_SCHED);
502 1.1.6.1 ad /* Initialize the lock pointer for lwp0 */
503 1.1.6.1 ad lwp0.l_mutex = &curcpu()->ci_schedstate.spc_lwplock;
504 1.1.6.1 ad }
505 1.1.6.1 ad
506 1.1.6.1 ad void
507 1.1.6.1 ad sched_cpuattach(struct cpu_info *ci)
508 1.1.6.1 ad {
509 1.1.6.1 ad runqueue_t *rq;
510 1.1.6.1 ad
511 1.1.6.1 ad ci->ci_schedstate.spc_mutex = &sched_mutex;
512 1.1.6.1 ad rq = kmem_zalloc(sizeof(*rq), KM_NOSLEEP);
513 1.1.6.1 ad runqueue_init(rq);
514 1.1.6.1 ad ci->ci_schedstate.spc_sched_info = rq;
515 1.1.6.1 ad }
516 1.1.6.1 ad
517 1.1.6.1 ad void
518 1.1.6.1 ad sched_setup()
519 1.1.6.1 ad {
520 1.1.6.1 ad
521 1.1.6.1 ad rrticks = hz / 10;
522 1.1.6.1 ad }
523 1.1.6.1 ad
524 1.1.6.1 ad void
525 1.1.6.1 ad sched_setrunnable(struct lwp *l)
526 1.1.6.1 ad {
527 1.1.6.1 ad
528 1.1.6.1 ad if (l->l_slptime > 1)
529 1.1.6.1 ad updatepri(l);
530 1.1.6.1 ad }
531 1.1.6.1 ad
532 1.1.6.1 ad bool
533 1.1.6.1 ad sched_curcpu_runnable_p(void)
534 1.1.6.1 ad {
535 1.1.6.6 ad struct schedstate_percpu *spc;
536 1.1.6.11 ad struct cpu_info *ci;
537 1.1.6.6 ad runqueue_t *rq;
538 1.1.6.6 ad
539 1.1.6.11 ad ci = curcpu();
540 1.1.6.11 ad spc = &ci->ci_schedstate;
541 1.1.6.6 ad rq = spc->spc_sched_info;
542 1.1.6.1 ad
543 1.1.6.11 ad #ifndef __HAVE_FAST_SOFTINTS
544 1.1.6.11 ad if (ci->ci_data.cpu_softints != 0)
545 1.1.6.11 ad return true;
546 1.1.6.11 ad #endif
547 1.1.6.6 ad if (__predict_true((spc->spc_flags & SPCF_OFFLINE) == 0))
548 1.1.6.9 ad return (global_queue.rq_count | rq->rq_count) != 0;
549 1.1.6.9 ad return rq->rq_count != 0;
550 1.1.6.1 ad }
551 1.1.6.1 ad
552 1.1.6.1 ad void
553 1.1.6.1 ad sched_nice(struct proc *chgp, int n)
554 1.1.6.1 ad {
555 1.1.6.1 ad
556 1.1.6.1 ad chgp->p_nice = n;
557 1.1.6.1 ad (void)resetprocpriority(chgp);
558 1.1.6.1 ad }
559 1.1.6.1 ad
560 1.1.6.1 ad /*
561 1.1.6.1 ad * Compute the priority of a process when running in user mode.
562 1.1.6.1 ad * Arrange to reschedule if the resulting priority is better
563 1.1.6.1 ad * than that of the current process.
564 1.1.6.1 ad */
565 1.1.6.1 ad static void
566 1.1.6.1 ad resetpriority(struct lwp *l)
567 1.1.6.1 ad {
568 1.1.6.12 yamt pri_t newpriority;
569 1.1.6.1 ad struct proc *p = l->l_proc;
570 1.1.6.1 ad
571 1.1.6.1 ad /* XXXSMP KASSERT(mutex_owned(&p->p_stmutex)); */
572 1.1.6.1 ad KASSERT(lwp_locked(l, NULL));
573 1.1.6.1 ad
574 1.1.6.1 ad if ((l->l_flag & LW_SYSTEM) != 0)
575 1.1.6.1 ad return;
576 1.1.6.1 ad
577 1.1.6.2 ad newpriority = PRI_KERNEL - 1 - (p->p_estcpu >> ESTCPU_SHIFT) -
578 1.1.6.1 ad NICE_WEIGHT * (p->p_nice - NZERO);
579 1.1.6.12 yamt newpriority = imax(newpriority, 0);
580 1.1.6.1 ad lwp_changepri(l, newpriority);
581 1.1.6.1 ad }
582 1.1.6.1 ad
583 1.1.6.1 ad /*
584 1.1.6.1 ad * Recompute priority for all LWPs in a process.
585 1.1.6.1 ad */
586 1.1.6.1 ad static void
587 1.1.6.1 ad resetprocpriority(struct proc *p)
588 1.1.6.1 ad {
589 1.1.6.1 ad struct lwp *l;
590 1.1.6.1 ad
591 1.1.6.1 ad KASSERT(mutex_owned(&p->p_stmutex));
592 1.1.6.1 ad
593 1.1.6.1 ad LIST_FOREACH(l, &p->p_lwps, l_sibling) {
594 1.1.6.1 ad lwp_lock(l);
595 1.1.6.1 ad resetpriority(l);
596 1.1.6.1 ad lwp_unlock(l);
597 1.1.6.1 ad }
598 1.1.6.1 ad }
599 1.1.6.1 ad
600 1.1.6.1 ad /*
601 1.1.6.1 ad * We adjust the priority of the current process. The priority of a process
602 1.1.6.1 ad * gets worse as it accumulates CPU time. The CPU usage estimator (p_estcpu)
603 1.1.6.1 ad * is increased here. The formula for computing priorities (in kern_synch.c)
604 1.1.6.1 ad * will compute a different value each time p_estcpu increases. This can
605 1.1.6.1 ad * cause a switch, but unless the priority crosses a PPQ boundary the actual
606 1.1.6.1 ad * queue will not change. The CPU usage estimator ramps up quite quickly
607 1.1.6.1 ad * when the process is running (linearly), and decays away exponentially, at
608 1.1.6.1 ad * a rate which is proportionally slower when the system is busy. The basic
609 1.1.6.1 ad * principle is that the system will 90% forget that the process used a lot
610 1.1.6.1 ad * of CPU time in 5 * loadav seconds. This causes the system to favor
611 1.1.6.1 ad * processes which haven't run much recently, and to round-robin among other
612 1.1.6.1 ad * processes.
613 1.1.6.1 ad */
614 1.1.6.1 ad
615 1.1.6.1 ad void
616 1.1.6.1 ad sched_schedclock(struct lwp *l)
617 1.1.6.1 ad {
618 1.1.6.1 ad struct proc *p = l->l_proc;
619 1.1.6.1 ad
620 1.1.6.1 ad KASSERT(!CURCPU_IDLE_P());
621 1.1.6.1 ad mutex_spin_enter(&p->p_stmutex);
622 1.1.6.1 ad p->p_estcpu = ESTCPULIM(p->p_estcpu + (1 << ESTCPU_SHIFT));
623 1.1.6.1 ad lwp_lock(l);
624 1.1.6.1 ad resetpriority(l);
625 1.1.6.1 ad mutex_spin_exit(&p->p_stmutex);
626 1.1.6.2 ad if ((l->l_flag & LW_SYSTEM) == 0 && l->l_priority < PRI_KERNEL)
627 1.1.6.1 ad l->l_priority = l->l_usrpri;
628 1.1.6.1 ad lwp_unlock(l);
629 1.1.6.1 ad }
630 1.1.6.1 ad
631 1.1.6.1 ad /*
632 1.1.6.1 ad * sched_proc_fork:
633 1.1.6.1 ad *
634 1.1.6.1 ad * Inherit the parent's scheduler history.
635 1.1.6.1 ad */
636 1.1.6.1 ad void
637 1.1.6.1 ad sched_proc_fork(struct proc *parent, struct proc *child)
638 1.1.6.1 ad {
639 1.1.6.1 ad
640 1.1.6.1 ad KASSERT(mutex_owned(&parent->p_smutex));
641 1.1.6.1 ad
642 1.1.6.1 ad child->p_estcpu = child->p_estcpu_inherited = parent->p_estcpu;
643 1.1.6.1 ad child->p_forktime = sched_pstats_ticks;
644 1.1.6.1 ad }
645 1.1.6.1 ad
646 1.1.6.1 ad /*
647 1.1.6.1 ad * sched_proc_exit:
648 1.1.6.1 ad *
649 1.1.6.1 ad * Chargeback parents for the sins of their children.
650 1.1.6.1 ad */
651 1.1.6.1 ad void
652 1.1.6.1 ad sched_proc_exit(struct proc *parent, struct proc *child)
653 1.1.6.1 ad {
654 1.1.6.1 ad fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
655 1.1.6.1 ad fixpt_t estcpu;
656 1.1.6.1 ad
657 1.1.6.1 ad /* XXX Only if parent != init?? */
658 1.1.6.1 ad
659 1.1.6.1 ad mutex_spin_enter(&parent->p_stmutex);
660 1.1.6.1 ad estcpu = decay_cpu_batch(loadfac, child->p_estcpu_inherited,
661 1.1.6.1 ad sched_pstats_ticks - child->p_forktime);
662 1.1.6.1 ad if (child->p_estcpu > estcpu)
663 1.1.6.1 ad parent->p_estcpu =
664 1.1.6.1 ad ESTCPULIM(parent->p_estcpu + child->p_estcpu - estcpu);
665 1.1.6.1 ad mutex_spin_exit(&parent->p_stmutex);
666 1.1.6.1 ad }
667 1.1.6.1 ad
668 1.1.6.1 ad void
669 1.1.6.1 ad sched_enqueue(struct lwp *l, bool ctxswitch)
670 1.1.6.1 ad {
671 1.1.6.1 ad
672 1.1.6.1 ad if ((l->l_flag & LW_BOUND) != 0)
673 1.1.6.1 ad runqueue_enqueue(l->l_cpu->ci_schedstate.spc_sched_info, l);
674 1.1.6.1 ad else
675 1.1.6.1 ad runqueue_enqueue(&global_queue, l);
676 1.1.6.1 ad }
677 1.1.6.1 ad
678 1.1.6.1 ad /*
679 1.1.6.1 ad * XXXSMP When LWP dispatch (cpu_switch()) is changed to use sched_dequeue(),
680 1.1.6.1 ad * drop of the effective priority level from kernel to user needs to be
681 1.1.6.1 ad * moved here from userret(). The assignment in userret() is currently
682 1.1.6.1 ad * done unlocked.
683 1.1.6.1 ad */
684 1.1.6.1 ad void
685 1.1.6.1 ad sched_dequeue(struct lwp *l)
686 1.1.6.1 ad {
687 1.1.6.1 ad
688 1.1.6.1 ad if ((l->l_flag & LW_BOUND) != 0)
689 1.1.6.1 ad runqueue_dequeue(l->l_cpu->ci_schedstate.spc_sched_info, l);
690 1.1.6.1 ad else
691 1.1.6.1 ad runqueue_dequeue(&global_queue, l);
692 1.1.6.1 ad }
693 1.1.6.1 ad
694 1.1.6.1 ad struct lwp *
695 1.1.6.1 ad sched_nextlwp(void)
696 1.1.6.1 ad {
697 1.1.6.6 ad struct schedstate_percpu *spc;
698 1.1.6.9 ad runqueue_t *rq;
699 1.1.6.1 ad lwp_t *l1, *l2;
700 1.1.6.1 ad
701 1.1.6.6 ad spc = &curcpu()->ci_schedstate;
702 1.1.6.6 ad
703 1.1.6.1 ad /* For now, just pick the highest priority LWP. */
704 1.1.6.9 ad rq = spc->spc_sched_info;
705 1.1.6.9 ad l1 = NULL;
706 1.1.6.9 ad if (rq->rq_count != 0)
707 1.1.6.9 ad l1 = runqueue_nextlwp(rq);
708 1.1.6.9 ad
709 1.1.6.9 ad rq = &global_queue;
710 1.1.6.9 ad if (__predict_false((spc->spc_flags & SPCF_OFFLINE) != 0) ||
711 1.1.6.9 ad rq->rq_count == 0)
712 1.1.6.6 ad return l1;
713 1.1.6.9 ad l2 = runqueue_nextlwp(rq);
714 1.1.6.1 ad
715 1.1.6.1 ad if (l1 == NULL)
716 1.1.6.1 ad return l2;
717 1.1.6.1 ad if (l2 == NULL)
718 1.1.6.1 ad return l1;
719 1.1.6.2 ad if (lwp_eprio(l2) > lwp_eprio(l1))
720 1.1.6.1 ad return l2;
721 1.1.6.1 ad else
722 1.1.6.1 ad return l1;
723 1.1.6.1 ad }
724 1.1.6.1 ad
725 1.1.6.10 rmind struct cpu_info *
726 1.1.6.10 rmind sched_takecpu(struct lwp *l)
727 1.1.6.10 rmind {
728 1.1.6.10 rmind
729 1.1.6.10 rmind return l->l_cpu;
730 1.1.6.10 rmind }
731 1.1.6.10 rmind
732 1.1.6.10 rmind void
733 1.1.6.10 rmind sched_wakeup(struct lwp *l)
734 1.1.6.10 rmind {
735 1.1.6.10 rmind
736 1.1.6.10 rmind }
737 1.1.6.10 rmind
738 1.1.6.10 rmind void
739 1.1.6.10 rmind sched_slept(struct lwp *l)
740 1.1.6.10 rmind {
741 1.1.6.10 rmind
742 1.1.6.10 rmind }
743 1.1.6.10 rmind
744 1.1.6.1 ad void
745 1.1.6.1 ad sched_lwp_fork(struct lwp *l)
746 1.1.6.1 ad {
747 1.1.6.1 ad
748 1.1.6.1 ad }
749 1.1.6.1 ad
750 1.1.6.1 ad void
751 1.1.6.1 ad sched_lwp_exit(struct lwp *l)
752 1.1.6.1 ad {
753 1.1.6.1 ad
754 1.1.6.1 ad }
755 1.1.6.1 ad
756 1.1.6.5 ad /*
757 1.1.6.5 ad * sysctl setup. XXX This should be split with kern_synch.c.
758 1.1.6.5 ad */
759 1.1.6.1 ad SYSCTL_SETUP(sysctl_sched_setup, "sysctl kern.sched subtree setup")
760 1.1.6.1 ad {
761 1.1.6.1 ad const struct sysctlnode *node = NULL;
762 1.1.6.1 ad
763 1.1.6.1 ad sysctl_createv(clog, 0, NULL, NULL,
764 1.1.6.1 ad CTLFLAG_PERMANENT,
765 1.1.6.1 ad CTLTYPE_NODE, "kern", NULL,
766 1.1.6.1 ad NULL, 0, NULL, 0,
767 1.1.6.1 ad CTL_KERN, CTL_EOL);
768 1.1.6.1 ad sysctl_createv(clog, 0, NULL, &node,
769 1.1.6.1 ad CTLFLAG_PERMANENT,
770 1.1.6.1 ad CTLTYPE_NODE, "sched",
771 1.1.6.1 ad SYSCTL_DESCR("Scheduler options"),
772 1.1.6.1 ad NULL, 0, NULL, 0,
773 1.1.6.1 ad CTL_KERN, CTL_CREATE, CTL_EOL);
774 1.1.6.1 ad
775 1.1.6.5 ad KASSERT(node != NULL);
776 1.1.6.5 ad
777 1.1.6.5 ad sysctl_createv(clog, 0, &node, NULL,
778 1.1.6.5 ad CTLFLAG_PERMANENT,
779 1.1.6.5 ad CTLTYPE_STRING, "name", NULL,
780 1.1.6.5 ad NULL, 0, __UNCONST("4.4BSD"), 0,
781 1.1.6.5 ad CTL_CREATE, CTL_EOL);
782 1.1.6.5 ad sysctl_createv(clog, 0, &node, NULL,
783 1.1.6.5 ad CTLFLAG_READWRITE,
784 1.1.6.5 ad CTLTYPE_INT, "timesoftints",
785 1.1.6.5 ad SYSCTL_DESCR("Track CPU time for soft interrupts"),
786 1.1.6.5 ad NULL, 0, &softint_timing, 0,
787 1.1.6.5 ad CTL_CREATE, CTL_EOL);
788 1.1.6.1 ad }
789 1.1.6.1 ad
790 1.1.6.1 ad #if defined(DDB)
791 1.1.6.1 ad void
792 1.1.6.1 ad sched_print_runqueue(void (*pr)(const char *, ...))
793 1.1.6.1 ad {
794 1.1.6.1 ad
795 1.1.6.1 ad runqueue_print(&global_queue, pr);
796 1.1.6.1 ad }
797 1.1.6.1 ad #endif /* defined(DDB) */
798