kern_synch.c revision 1.152 1 /* $NetBSD: kern_synch.c,v 1.152 2005/10/30 20:28:56 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2004 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 * This code is derived from software contributed to The NetBSD Foundation
11 * by Charles M. Hannum.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 * must display the following acknowledgement:
23 * This product includes software developed by the NetBSD
24 * Foundation, Inc. and its contributors.
25 * 4. Neither the name of The NetBSD Foundation nor the names of its
26 * contributors may be used to endorse or promote products derived
27 * from this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
30 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
31 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
32 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
33 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
36 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
37 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42 /*-
43 * Copyright (c) 1982, 1986, 1990, 1991, 1993
44 * The Regents of the University of California. All rights reserved.
45 * (c) UNIX System Laboratories, Inc.
46 * All or some portions of this file are derived from material licensed
47 * to the University of California by American Telephone and Telegraph
48 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
49 * the permission of UNIX System Laboratories, Inc.
50 *
51 * Redistribution and use in source and binary forms, with or without
52 * modification, are permitted provided that the following conditions
53 * are met:
54 * 1. Redistributions of source code must retain the above copyright
55 * notice, this list of conditions and the following disclaimer.
56 * 2. Redistributions in binary form must reproduce the above copyright
57 * notice, this list of conditions and the following disclaimer in the
58 * documentation and/or other materials provided with the distribution.
59 * 3. Neither the name of the University nor the names of its contributors
60 * may be used to endorse or promote products derived from this software
61 * without specific prior written permission.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
64 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
65 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
66 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
67 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
68 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
69 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
70 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
71 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
72 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73 * SUCH DAMAGE.
74 *
75 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.152 2005/10/30 20:28:56 yamt Exp $");
80
81 #include "opt_ddb.h"
82 #include "opt_ktrace.h"
83 #include "opt_kstack.h"
84 #include "opt_lockdebug.h"
85 #include "opt_multiprocessor.h"
86 #include "opt_perfctrs.h"
87
88 #include <sys/param.h>
89 #include <sys/systm.h>
90 #include <sys/callout.h>
91 #include <sys/proc.h>
92 #include <sys/kernel.h>
93 #include <sys/buf.h>
94 #if defined(PERFCTRS)
95 #include <sys/pmc.h>
96 #endif
97 #include <sys/signalvar.h>
98 #include <sys/resourcevar.h>
99 #include <sys/sched.h>
100 #include <sys/sa.h>
101 #include <sys/savar.h>
102
103 #include <uvm/uvm_extern.h>
104
105 #ifdef KTRACE
106 #include <sys/ktrace.h>
107 #endif
108
109 #include <machine/cpu.h>
110
111 int lbolt; /* once a second sleep address */
112 int rrticks; /* number of hardclock ticks per roundrobin() */
113
114 /*
115 * Sleep queues.
116 *
117 * We're only looking at 7 bits of the address; everything is
118 * aligned to 4, lots of things are aligned to greater powers
119 * of 2. Shift right by 8, i.e. drop the bottom 256 worth.
120 */
121 #define SLPQUE_TABLESIZE 128
122 #define SLPQUE_LOOKUP(x) (((u_long)(x) >> 8) & (SLPQUE_TABLESIZE - 1))
123
124 #define SLPQUE(ident) (&sched_slpque[SLPQUE_LOOKUP(ident)])
125
126 /*
127 * The global scheduler state.
128 */
129 struct prochd sched_qs[RUNQUE_NQS]; /* run queues */
130 __volatile u_int32_t sched_whichqs; /* bitmap of non-empty queues */
131 struct slpque sched_slpque[SLPQUE_TABLESIZE]; /* sleep queues */
132
133 struct simplelock sched_lock = SIMPLELOCK_INITIALIZER;
134
135 void schedcpu(void *);
136 void updatepri(struct lwp *);
137 void endtsleep(void *);
138
139 __inline void sa_awaken(struct lwp *);
140 __inline void awaken(struct lwp *);
141
142 struct callout schedcpu_ch = CALLOUT_INITIALIZER_SETFUNC(schedcpu, NULL);
143
144
145
146 /*
147 * Force switch among equal priority processes every 100ms.
148 * Called from hardclock every hz/10 == rrticks hardclock ticks.
149 */
150 /* ARGSUSED */
151 void
152 roundrobin(struct cpu_info *ci)
153 {
154 struct schedstate_percpu *spc = &ci->ci_schedstate;
155
156 spc->spc_rrticks = rrticks;
157
158 if (curlwp != NULL) {
159 if (spc->spc_flags & SPCF_SEENRR) {
160 /*
161 * The process has already been through a roundrobin
162 * without switching and may be hogging the CPU.
163 * Indicate that the process should yield.
164 */
165 spc->spc_flags |= SPCF_SHOULDYIELD;
166 } else
167 spc->spc_flags |= SPCF_SEENRR;
168 }
169 need_resched(curcpu());
170 }
171
172 /*
173 * Constants for digital decay and forget:
174 * 90% of (p_estcpu) usage in 5 * loadav time
175 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive)
176 * Note that, as ps(1) mentions, this can let percentages
177 * total over 100% (I've seen 137.9% for 3 processes).
178 *
179 * Note that hardclock updates p_estcpu and p_cpticks independently.
180 *
181 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
182 * That is, the system wants to compute a value of decay such
183 * that the following for loop:
184 * for (i = 0; i < (5 * loadavg); i++)
185 * p_estcpu *= decay;
186 * will compute
187 * p_estcpu *= 0.1;
188 * for all values of loadavg:
189 *
190 * Mathematically this loop can be expressed by saying:
191 * decay ** (5 * loadavg) ~= .1
192 *
193 * The system computes decay as:
194 * decay = (2 * loadavg) / (2 * loadavg + 1)
195 *
196 * We wish to prove that the system's computation of decay
197 * will always fulfill the equation:
198 * decay ** (5 * loadavg) ~= .1
199 *
200 * If we compute b as:
201 * b = 2 * loadavg
202 * then
203 * decay = b / (b + 1)
204 *
205 * We now need to prove two things:
206 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
207 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
208 *
209 * Facts:
210 * For x close to zero, exp(x) =~ 1 + x, since
211 * exp(x) = 0! + x**1/1! + x**2/2! + ... .
212 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
213 * For x close to zero, ln(1+x) =~ x, since
214 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1
215 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
216 * ln(.1) =~ -2.30
217 *
218 * Proof of (1):
219 * Solve (factor)**(power) =~ .1 given power (5*loadav):
220 * solving for factor,
221 * ln(factor) =~ (-2.30/5*loadav), or
222 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
223 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED
224 *
225 * Proof of (2):
226 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
227 * solving for power,
228 * power*ln(b/(b+1)) =~ -2.30, or
229 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED
230 *
231 * Actual power values for the implemented algorithm are as follows:
232 * loadav: 1 2 3 4
233 * power: 5.68 10.32 14.94 19.55
234 */
235
236 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
237 #define loadfactor(loadav) (2 * (loadav))
238 #define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE))
239
240 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
241 fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
242
243 /*
244 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
245 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
246 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
247 *
248 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
249 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
250 *
251 * If you dont want to bother with the faster/more-accurate formula, you
252 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
253 * (more general) method of calculating the %age of CPU used by a process.
254 */
255 #define CCPU_SHIFT 11
256
257 #define PPQ (128 / RUNQUE_NQS) /* priorities per queue */
258 #define NICE_WEIGHT 2 /* priorities per nice level */
259 #define ESTCPULIM(e) min((e), NICE_WEIGHT * PRIO_MAX - PPQ)
260
261 /*
262 * Recompute process priorities, every hz ticks.
263 */
264 /* ARGSUSED */
265 void
266 schedcpu(void *arg)
267 {
268 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
269 struct lwp *l;
270 struct proc *p;
271 int s, minslp;
272 unsigned int newcpu;
273 int clkhz;
274
275 proclist_lock_read();
276 PROCLIST_FOREACH(p, &allproc) {
277 /*
278 * Increment time in/out of memory and sleep time
279 * (if sleeping). We ignore overflow; with 16-bit int's
280 * (remember them?) overflow takes 45 days.
281 */
282 minslp = 2;
283 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
284 l->l_swtime++;
285 if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
286 l->l_stat == LSSUSPENDED) {
287 l->l_slptime++;
288 minslp = min(minslp, l->l_slptime);
289 } else
290 minslp = 0;
291 }
292 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
293 /*
294 * If the process has slept the entire second,
295 * stop recalculating its priority until it wakes up.
296 */
297 if (minslp > 1)
298 continue;
299 s = splstatclock(); /* prevent state changes */
300 /*
301 * p_pctcpu is only for ps.
302 */
303 clkhz = stathz != 0 ? stathz : hz;
304 #if (FSHIFT >= CCPU_SHIFT)
305 p->p_pctcpu += (clkhz == 100)?
306 ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
307 100 * (((fixpt_t) p->p_cpticks)
308 << (FSHIFT - CCPU_SHIFT)) / clkhz;
309 #else
310 p->p_pctcpu += ((FSCALE - ccpu) *
311 (p->p_cpticks * FSCALE / clkhz)) >> FSHIFT;
312 #endif
313 p->p_cpticks = 0;
314 newcpu = (u_int)decay_cpu(loadfac, p->p_estcpu);
315 p->p_estcpu = newcpu;
316 splx(s); /* Done with the process CPU ticks update */
317 SCHED_LOCK(s);
318 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
319 if (l->l_slptime > 1)
320 continue;
321 resetpriority(l);
322 if (l->l_priority >= PUSER) {
323 if (l->l_stat == LSRUN &&
324 (l->l_flag & L_INMEM) &&
325 (l->l_priority / PPQ) != (l->l_usrpri / PPQ)) {
326 remrunqueue(l);
327 l->l_priority = l->l_usrpri;
328 setrunqueue(l);
329 } else
330 l->l_priority = l->l_usrpri;
331 }
332 }
333 SCHED_UNLOCK(s);
334 }
335 proclist_unlock_read();
336 uvm_meter();
337 wakeup((caddr_t)&lbolt);
338 callout_schedule(&schedcpu_ch, hz);
339 }
340
341 /*
342 * Recalculate the priority of a process after it has slept for a while.
343 * For all load averages >= 1 and max p_estcpu of 255, sleeping for at
344 * least six times the loadfactor will decay p_estcpu to zero.
345 */
346 void
347 updatepri(struct lwp *l)
348 {
349 struct proc *p = l->l_proc;
350 unsigned int newcpu;
351 fixpt_t loadfac;
352
353 SCHED_ASSERT_LOCKED();
354
355 newcpu = p->p_estcpu;
356 loadfac = loadfactor(averunnable.ldavg[0]);
357
358 if (l->l_slptime > 5 * loadfac)
359 p->p_estcpu = 0; /* XXX NJWLWP */
360 else {
361 l->l_slptime--; /* the first time was done in schedcpu */
362 while (newcpu && --l->l_slptime)
363 newcpu = (int) decay_cpu(loadfac, newcpu);
364 p->p_estcpu = newcpu;
365 }
366 resetpriority(l);
367 }
368
369 /*
370 * During autoconfiguration or after a panic, a sleep will simply
371 * lower the priority briefly to allow interrupts, then return.
372 * The priority to be used (safepri) is machine-dependent, thus this
373 * value is initialized and maintained in the machine-dependent layers.
374 * This priority will typically be 0, or the lowest priority
375 * that is safe for use on the interrupt stack; it can be made
376 * higher to block network software interrupts after panics.
377 */
378 int safepri;
379
380 /*
381 * General sleep call. Suspends the current process until a wakeup is
382 * performed on the specified identifier. The process will then be made
383 * runnable with the specified priority. Sleeps at most timo/hz seconds
384 * (0 means no timeout). If pri includes PCATCH flag, signals are checked
385 * before and after sleeping, else signals are not checked. Returns 0 if
386 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
387 * signal needs to be delivered, ERESTART is returned if the current system
388 * call should be restarted if possible, and EINTR is returned if the system
389 * call should be interrupted by the signal (return EINTR).
390 *
391 * The interlock is held until the scheduler_slock is acquired. The
392 * interlock will be locked before returning back to the caller
393 * unless the PNORELOCK flag is specified, in which case the
394 * interlock will always be unlocked upon return.
395 */
396 int
397 ltsleep(__volatile const void *ident, int priority, const char *wmesg, int timo,
398 __volatile struct simplelock *interlock)
399 {
400 struct lwp *l = curlwp;
401 struct proc *p = l ? l->l_proc : NULL;
402 struct slpque *qp;
403 struct sadata_upcall *sau;
404 int sig, s;
405 int catch = priority & PCATCH;
406 int relock = (priority & PNORELOCK) == 0;
407 int exiterr = (priority & PNOEXITERR) == 0;
408
409 /*
410 * XXXSMP
411 * This is probably bogus. Figure out what the right
412 * thing to do here really is.
413 * Note that not sleeping if ltsleep is called with curlwp == NULL
414 * in the shutdown case is disgusting but partly necessary given
415 * how shutdown (barely) works.
416 */
417 if (cold || (doing_shutdown && (panicstr || (l == NULL)))) {
418 /*
419 * After a panic, or during autoconfiguration,
420 * just give interrupts a chance, then just return;
421 * don't run any other procs or panic below,
422 * in case this is the idle process and already asleep.
423 */
424 s = splhigh();
425 splx(safepri);
426 splx(s);
427 if (interlock != NULL && relock == 0)
428 simple_unlock(interlock);
429 return (0);
430 }
431
432 KASSERT(p != NULL);
433 LOCK_ASSERT(interlock == NULL || simple_lock_held(interlock));
434
435 #ifdef KTRACE
436 if (KTRPOINT(p, KTR_CSW))
437 ktrcsw(p, 1, 0);
438 #endif
439
440 /*
441 * XXX We need to allocate the sadata_upcall structure here,
442 * XXX since we can't sleep while waiting for memory inside
443 * XXX sa_upcall(). It would be nice if we could safely
444 * XXX allocate the sadata_upcall structure on the stack, here.
445 */
446 if (l->l_flag & L_SA) {
447 sau = sadata_upcall_alloc(0);
448 } else {
449 sau = NULL;
450 }
451
452 SCHED_LOCK(s);
453
454 #ifdef DIAGNOSTIC
455 if (ident == NULL)
456 panic("ltsleep: ident == NULL");
457 if (l->l_stat != LSONPROC)
458 panic("ltsleep: l_stat %d != LSONPROC", l->l_stat);
459 if (l->l_back != NULL)
460 panic("ltsleep: p_back != NULL");
461 #endif
462
463 l->l_wchan = ident;
464 l->l_wmesg = wmesg;
465 l->l_slptime = 0;
466 l->l_priority = priority & PRIMASK;
467
468 qp = SLPQUE(ident);
469 if (qp->sq_head == 0)
470 qp->sq_head = l;
471 else {
472 *qp->sq_tailp = l;
473 }
474 *(qp->sq_tailp = &l->l_forw) = 0;
475
476 if (timo)
477 callout_reset(&l->l_tsleep_ch, timo, endtsleep, l);
478
479 /*
480 * We can now release the interlock; the scheduler_slock
481 * is held, so a thread can't get in to do wakeup() before
482 * we do the switch.
483 *
484 * XXX We leave the code block here, after inserting ourselves
485 * on the sleep queue, because we might want a more clever
486 * data structure for the sleep queues at some point.
487 */
488 if (interlock != NULL)
489 simple_unlock(interlock);
490
491 /*
492 * We put ourselves on the sleep queue and start our timeout
493 * before calling CURSIG, as we could stop there, and a wakeup
494 * or a SIGCONT (or both) could occur while we were stopped.
495 * A SIGCONT would cause us to be marked as SSLEEP
496 * without resuming us, thus we must be ready for sleep
497 * when CURSIG is called. If the wakeup happens while we're
498 * stopped, p->p_wchan will be 0 upon return from CURSIG.
499 */
500 if (catch) {
501 l->l_flag |= L_SINTR;
502 if (((sig = CURSIG(l)) != 0) ||
503 ((p->p_flag & P_WEXIT) && p->p_nlwps > 1)) {
504 if (l->l_wchan != NULL)
505 unsleep(l);
506 l->l_stat = LSONPROC;
507 SCHED_UNLOCK(s);
508 goto resume;
509 }
510 if (l->l_wchan == NULL) {
511 catch = 0;
512 SCHED_UNLOCK(s);
513 goto resume;
514 }
515 } else
516 sig = 0;
517 l->l_stat = LSSLEEP;
518 p->p_nrlwps--;
519 p->p_stats->p_ru.ru_nvcsw++;
520 SCHED_ASSERT_LOCKED();
521 if (l->l_flag & L_SA)
522 sa_switch(l, sau, SA_UPCALL_BLOCKED);
523 else
524 mi_switch(l, NULL);
525
526 #if defined(DDB) && !defined(GPROF)
527 /* handy breakpoint location after process "wakes" */
528 __asm(".globl bpendtsleep\nbpendtsleep:");
529 #endif
530 /*
531 * p->p_nrlwps is incremented by whoever made us runnable again,
532 * either setrunnable() or awaken().
533 */
534
535 SCHED_ASSERT_UNLOCKED();
536 splx(s);
537
538 resume:
539 KDASSERT(l->l_cpu != NULL);
540 KDASSERT(l->l_cpu == curcpu());
541 l->l_cpu->ci_schedstate.spc_curpriority = l->l_usrpri;
542
543 l->l_flag &= ~L_SINTR;
544 if (l->l_flag & L_TIMEOUT) {
545 l->l_flag &= ~(L_TIMEOUT|L_CANCELLED);
546 if (sig == 0) {
547 #ifdef KTRACE
548 if (KTRPOINT(p, KTR_CSW))
549 ktrcsw(p, 0, 0);
550 #endif
551 if (relock && interlock != NULL)
552 simple_lock(interlock);
553 return (EWOULDBLOCK);
554 }
555 } else if (timo)
556 callout_stop(&l->l_tsleep_ch);
557
558 if (catch) {
559 const int cancelled = l->l_flag & L_CANCELLED;
560 l->l_flag &= ~L_CANCELLED;
561 if (sig != 0 || (sig = CURSIG(l)) != 0 || cancelled) {
562 #ifdef KTRACE
563 if (KTRPOINT(p, KTR_CSW))
564 ktrcsw(p, 0, 0);
565 #endif
566 if (relock && interlock != NULL)
567 simple_lock(interlock);
568 /*
569 * If this sleep was canceled, don't let the syscall
570 * restart.
571 */
572 if (cancelled ||
573 (SIGACTION(p, sig).sa_flags & SA_RESTART) == 0)
574 return (EINTR);
575 return (ERESTART);
576 }
577 }
578
579 #ifdef KTRACE
580 if (KTRPOINT(p, KTR_CSW))
581 ktrcsw(p, 0, 0);
582 #endif
583 if (relock && interlock != NULL)
584 simple_lock(interlock);
585
586 /* XXXNJW this is very much a kluge.
587 * revisit. a better way of preventing looping/hanging syscalls like
588 * wait4() and _lwp_wait() from wedging an exiting process
589 * would be preferred.
590 */
591 if (catch && ((p->p_flag & P_WEXIT) && p->p_nlwps > 1 && exiterr))
592 return (EINTR);
593 return (0);
594 }
595
596 /*
597 * Implement timeout for tsleep.
598 * If process hasn't been awakened (wchan non-zero),
599 * set timeout flag and undo the sleep. If proc
600 * is stopped, just unsleep so it will remain stopped.
601 */
602 void
603 endtsleep(void *arg)
604 {
605 struct lwp *l;
606 int s;
607
608 l = (struct lwp *)arg;
609 SCHED_LOCK(s);
610 if (l->l_wchan) {
611 if (l->l_stat == LSSLEEP)
612 setrunnable(l);
613 else
614 unsleep(l);
615 l->l_flag |= L_TIMEOUT;
616 }
617 SCHED_UNLOCK(s);
618 }
619
620 /*
621 * Remove a process from its wait queue
622 */
623 void
624 unsleep(struct lwp *l)
625 {
626 struct slpque *qp;
627 struct lwp **hp;
628
629 SCHED_ASSERT_LOCKED();
630
631 if (l->l_wchan) {
632 hp = &(qp = SLPQUE(l->l_wchan))->sq_head;
633 while (*hp != l)
634 hp = &(*hp)->l_forw;
635 *hp = l->l_forw;
636 if (qp->sq_tailp == &l->l_forw)
637 qp->sq_tailp = hp;
638 l->l_wchan = 0;
639 }
640 }
641
642 __inline void
643 sa_awaken(struct lwp *l)
644 {
645
646 SCHED_ASSERT_LOCKED();
647
648 if (l == l->l_savp->savp_lwp && l->l_flag & L_SA_YIELD)
649 l->l_flag &= ~L_SA_IDLE;
650 }
651
652 /*
653 * Optimized-for-wakeup() version of setrunnable().
654 */
655 __inline void
656 awaken(struct lwp *l)
657 {
658
659 SCHED_ASSERT_LOCKED();
660
661 if (l->l_proc->p_sa)
662 sa_awaken(l);
663
664 if (l->l_slptime > 1)
665 updatepri(l);
666 l->l_slptime = 0;
667 l->l_stat = LSRUN;
668 l->l_proc->p_nrlwps++;
669 /*
670 * Since curpriority is a user priority, p->p_priority
671 * is always better than curpriority on the last CPU on
672 * which it ran.
673 *
674 * XXXSMP See affinity comment in resched_proc().
675 */
676 if (l->l_flag & L_INMEM) {
677 setrunqueue(l);
678 KASSERT(l->l_cpu != NULL);
679 need_resched(l->l_cpu);
680 } else
681 sched_wakeup(&proc0);
682 }
683
684 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
685 void
686 sched_unlock_idle(void)
687 {
688
689 simple_unlock(&sched_lock);
690 }
691
692 void
693 sched_lock_idle(void)
694 {
695
696 simple_lock(&sched_lock);
697 }
698 #endif /* MULTIPROCESSOR || LOCKDEBUG */
699
700 /*
701 * Make all processes sleeping on the specified identifier runnable.
702 */
703
704 void
705 wakeup(__volatile const void *ident)
706 {
707 int s;
708
709 SCHED_ASSERT_UNLOCKED();
710
711 SCHED_LOCK(s);
712 sched_wakeup(ident);
713 SCHED_UNLOCK(s);
714 }
715
716 void
717 sched_wakeup(__volatile const void *ident)
718 {
719 struct slpque *qp;
720 struct lwp *l, **q;
721
722 SCHED_ASSERT_LOCKED();
723
724 qp = SLPQUE(ident);
725 restart:
726 for (q = &qp->sq_head; (l = *q) != NULL; ) {
727 #ifdef DIAGNOSTIC
728 if (l->l_back || (l->l_stat != LSSLEEP &&
729 l->l_stat != LSSTOP && l->l_stat != LSSUSPENDED))
730 panic("wakeup");
731 #endif
732 if (l->l_wchan == ident) {
733 l->l_wchan = 0;
734 *q = l->l_forw;
735 if (qp->sq_tailp == &l->l_forw)
736 qp->sq_tailp = q;
737 if (l->l_stat == LSSLEEP) {
738 awaken(l);
739 goto restart;
740 }
741 } else
742 q = &l->l_forw;
743 }
744 }
745
746 /*
747 * Make the highest priority process first in line on the specified
748 * identifier runnable.
749 */
750 void
751 wakeup_one(__volatile const void *ident)
752 {
753 struct slpque *qp;
754 struct lwp *l, **q;
755 struct lwp *best_sleepp, **best_sleepq;
756 struct lwp *best_stopp, **best_stopq;
757 int s;
758
759 best_sleepp = best_stopp = NULL;
760 best_sleepq = best_stopq = NULL;
761
762 SCHED_LOCK(s);
763
764 qp = SLPQUE(ident);
765
766 for (q = &qp->sq_head; (l = *q) != NULL; q = &l->l_forw) {
767 #ifdef DIAGNOSTIC
768 if (l->l_back || (l->l_stat != LSSLEEP &&
769 l->l_stat != LSSTOP && l->l_stat != LSSUSPENDED))
770 panic("wakeup_one");
771 #endif
772 if (l->l_wchan == ident) {
773 if (l->l_stat == LSSLEEP) {
774 if (best_sleepp == NULL ||
775 l->l_priority < best_sleepp->l_priority) {
776 best_sleepp = l;
777 best_sleepq = q;
778 }
779 } else {
780 if (best_stopp == NULL ||
781 l->l_priority < best_stopp->l_priority) {
782 best_stopp = l;
783 best_stopq = q;
784 }
785 }
786 }
787 }
788
789 /*
790 * Consider any SSLEEP process higher than the highest priority SSTOP
791 * process.
792 */
793 if (best_sleepp != NULL) {
794 l = best_sleepp;
795 q = best_sleepq;
796 } else {
797 l = best_stopp;
798 q = best_stopq;
799 }
800
801 if (l != NULL) {
802 l->l_wchan = NULL;
803 *q = l->l_forw;
804 if (qp->sq_tailp == &l->l_forw)
805 qp->sq_tailp = q;
806 if (l->l_stat == LSSLEEP)
807 awaken(l);
808 }
809 SCHED_UNLOCK(s);
810 }
811
812 /*
813 * General yield call. Puts the current process back on its run queue and
814 * performs a voluntary context switch. Should only be called when the
815 * current process explicitly requests it (eg sched_yield(2) in compat code).
816 */
817 void
818 yield(void)
819 {
820 struct lwp *l = curlwp;
821 int s;
822
823 SCHED_LOCK(s);
824 l->l_priority = l->l_usrpri;
825 l->l_stat = LSRUN;
826 setrunqueue(l);
827 l->l_proc->p_stats->p_ru.ru_nvcsw++;
828 mi_switch(l, NULL);
829 SCHED_ASSERT_UNLOCKED();
830 splx(s);
831 }
832
833 /*
834 * General preemption call. Puts the current process back on its run queue
835 * and performs an involuntary context switch. If a process is supplied,
836 * we switch to that process. Otherwise, we use the normal process selection
837 * criteria.
838 */
839
840 void
841 preempt(int more)
842 {
843 struct lwp *l = curlwp;
844 int r, s;
845
846 SCHED_LOCK(s);
847 l->l_priority = l->l_usrpri;
848 l->l_stat = LSRUN;
849 setrunqueue(l);
850 l->l_proc->p_stats->p_ru.ru_nivcsw++;
851 r = mi_switch(l, NULL);
852 SCHED_ASSERT_UNLOCKED();
853 splx(s);
854 if ((l->l_flag & L_SA) != 0 && r != 0 && more == 0)
855 sa_preempt(l);
856 }
857
858 /*
859 * The machine independent parts of context switch.
860 * Must be called at splsched() (no higher!) and with
861 * the sched_lock held.
862 * Switch to "new" if non-NULL, otherwise let cpu_switch choose
863 * the next lwp.
864 *
865 * Returns 1 if another process was actually run.
866 */
867 int
868 mi_switch(struct lwp *l, struct lwp *newl)
869 {
870 struct schedstate_percpu *spc;
871 struct rlimit *rlim;
872 long s, u;
873 struct timeval tv;
874 int hold_count;
875 struct proc *p = l->l_proc;
876 int retval;
877
878 SCHED_ASSERT_LOCKED();
879
880 /*
881 * Release the kernel_lock, as we are about to yield the CPU.
882 * The scheduler lock is still held until cpu_switch()
883 * selects a new process and removes it from the run queue.
884 */
885 hold_count = KERNEL_LOCK_RELEASE_ALL();
886
887 KDASSERT(l->l_cpu != NULL);
888 KDASSERT(l->l_cpu == curcpu());
889
890 spc = &l->l_cpu->ci_schedstate;
891
892 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC)
893 spinlock_switchcheck();
894 #endif
895 #ifdef LOCKDEBUG
896 simple_lock_switchcheck();
897 #endif
898
899 /*
900 * Compute the amount of time during which the current
901 * process was running.
902 */
903 microtime(&tv);
904 u = p->p_rtime.tv_usec +
905 (tv.tv_usec - spc->spc_runtime.tv_usec);
906 s = p->p_rtime.tv_sec + (tv.tv_sec - spc->spc_runtime.tv_sec);
907 if (u < 0) {
908 u += 1000000;
909 s--;
910 } else if (u >= 1000000) {
911 u -= 1000000;
912 s++;
913 }
914 p->p_rtime.tv_usec = u;
915 p->p_rtime.tv_sec = s;
916
917 /*
918 * Check if the process exceeds its CPU resource allocation.
919 * If over max, kill it. In any case, if it has run for more
920 * than 10 minutes, reduce priority to give others a chance.
921 */
922 rlim = &p->p_rlimit[RLIMIT_CPU];
923 if (s >= rlim->rlim_cur) {
924 /*
925 * XXXSMP: we're inside the scheduler lock perimeter;
926 * use sched_psignal.
927 */
928 if (s >= rlim->rlim_max)
929 sched_psignal(p, SIGKILL);
930 else {
931 sched_psignal(p, SIGXCPU);
932 if (rlim->rlim_cur < rlim->rlim_max)
933 rlim->rlim_cur += 5;
934 }
935 }
936 if (autonicetime && s > autonicetime && p->p_ucred->cr_uid &&
937 p->p_nice == NZERO) {
938 p->p_nice = autoniceval + NZERO;
939 resetpriority(l);
940 }
941
942 /*
943 * Process is about to yield the CPU; clear the appropriate
944 * scheduling flags.
945 */
946 spc->spc_flags &= ~SPCF_SWITCHCLEAR;
947
948 #ifdef KSTACK_CHECK_MAGIC
949 kstack_check_magic(l);
950 #endif
951
952 /*
953 * If we are using h/w performance counters, save context.
954 */
955 #if PERFCTRS
956 if (PMC_ENABLED(p))
957 pmc_save_context(p);
958 #endif
959
960 /*
961 * Switch to the new current process. When we
962 * run again, we'll return back here.
963 */
964 uvmexp.swtch++;
965 if (newl == NULL) {
966 retval = cpu_switch(l, NULL);
967 } else {
968 remrunqueue(newl);
969 cpu_switchto(l, newl);
970 retval = 0;
971 }
972
973 /*
974 * If we are using h/w performance counters, restore context.
975 */
976 #if PERFCTRS
977 if (PMC_ENABLED(p))
978 pmc_restore_context(p);
979 #endif
980
981 /*
982 * Make sure that MD code released the scheduler lock before
983 * resuming us.
984 */
985 SCHED_ASSERT_UNLOCKED();
986
987 /*
988 * We're running again; record our new start time. We might
989 * be running on a new CPU now, so don't use the cache'd
990 * schedstate_percpu pointer.
991 */
992 KDASSERT(l->l_cpu != NULL);
993 KDASSERT(l->l_cpu == curcpu());
994 microtime(&l->l_cpu->ci_schedstate.spc_runtime);
995
996 /*
997 * Reacquire the kernel_lock now. We do this after we've
998 * released the scheduler lock to avoid deadlock, and before
999 * we reacquire the interlock.
1000 */
1001 KERNEL_LOCK_ACQUIRE_COUNT(hold_count);
1002
1003 return retval;
1004 }
1005
1006 /*
1007 * Initialize the (doubly-linked) run queues
1008 * to be empty.
1009 */
1010 void
1011 rqinit()
1012 {
1013 int i;
1014
1015 for (i = 0; i < RUNQUE_NQS; i++)
1016 sched_qs[i].ph_link = sched_qs[i].ph_rlink =
1017 (struct lwp *)&sched_qs[i];
1018 }
1019
1020 static __inline void
1021 resched_proc(struct lwp *l, u_char pri)
1022 {
1023 struct cpu_info *ci;
1024
1025 /*
1026 * XXXSMP
1027 * Since l->l_cpu persists across a context switch,
1028 * this gives us *very weak* processor affinity, in
1029 * that we notify the CPU on which the process last
1030 * ran that it should try to switch.
1031 *
1032 * This does not guarantee that the process will run on
1033 * that processor next, because another processor might
1034 * grab it the next time it performs a context switch.
1035 *
1036 * This also does not handle the case where its last
1037 * CPU is running a higher-priority process, but every
1038 * other CPU is running a lower-priority process. There
1039 * are ways to handle this situation, but they're not
1040 * currently very pretty, and we also need to weigh the
1041 * cost of moving a process from one CPU to another.
1042 *
1043 * XXXSMP
1044 * There is also the issue of locking the other CPU's
1045 * sched state, which we currently do not do.
1046 */
1047 ci = (l->l_cpu != NULL) ? l->l_cpu : curcpu();
1048 if (pri < ci->ci_schedstate.spc_curpriority)
1049 need_resched(ci);
1050 }
1051
1052 /*
1053 * Change process state to be runnable,
1054 * placing it on the run queue if it is in memory,
1055 * and awakening the swapper if it isn't in memory.
1056 */
1057 void
1058 setrunnable(struct lwp *l)
1059 {
1060 struct proc *p = l->l_proc;
1061
1062 SCHED_ASSERT_LOCKED();
1063
1064 switch (l->l_stat) {
1065 case 0:
1066 case LSRUN:
1067 case LSONPROC:
1068 case LSZOMB:
1069 case LSDEAD:
1070 default:
1071 panic("setrunnable: lwp %p state was %d", l, l->l_stat);
1072 case LSSTOP:
1073 /*
1074 * If we're being traced (possibly because someone attached us
1075 * while we were stopped), check for a signal from the debugger.
1076 */
1077 if ((p->p_flag & P_TRACED) != 0 && p->p_xstat != 0) {
1078 sigaddset(&p->p_sigctx.ps_siglist, p->p_xstat);
1079 CHECKSIGS(p);
1080 }
1081 case LSSLEEP:
1082 unsleep(l); /* e.g. when sending signals */
1083 break;
1084
1085 case LSIDL:
1086 break;
1087 case LSSUSPENDED:
1088 break;
1089 }
1090
1091 if (l->l_proc->p_sa)
1092 sa_awaken(l);
1093
1094 l->l_stat = LSRUN;
1095 p->p_nrlwps++;
1096
1097 if (l->l_flag & L_INMEM)
1098 setrunqueue(l);
1099
1100 if (l->l_slptime > 1)
1101 updatepri(l);
1102 l->l_slptime = 0;
1103 if ((l->l_flag & L_INMEM) == 0)
1104 sched_wakeup((caddr_t)&proc0);
1105 else
1106 resched_proc(l, l->l_priority);
1107 }
1108
1109 /*
1110 * Compute the priority of a process when running in user mode.
1111 * Arrange to reschedule if the resulting priority is better
1112 * than that of the current process.
1113 */
1114 void
1115 resetpriority(struct lwp *l)
1116 {
1117 unsigned int newpriority;
1118 struct proc *p = l->l_proc;
1119
1120 SCHED_ASSERT_LOCKED();
1121
1122 newpriority = PUSER + p->p_estcpu +
1123 NICE_WEIGHT * (p->p_nice - NZERO);
1124 newpriority = min(newpriority, MAXPRI);
1125 l->l_usrpri = newpriority;
1126 resched_proc(l, l->l_usrpri);
1127 }
1128
1129 /*
1130 * Recompute priority for all LWPs in a process.
1131 */
1132 void
1133 resetprocpriority(struct proc *p)
1134 {
1135 struct lwp *l;
1136
1137 LIST_FOREACH(l, &p->p_lwps, l_sibling)
1138 resetpriority(l);
1139 }
1140
1141 /*
1142 * We adjust the priority of the current process. The priority of a process
1143 * gets worse as it accumulates CPU time. The CPU usage estimator (p_estcpu)
1144 * is increased here. The formula for computing priorities (in kern_synch.c)
1145 * will compute a different value each time p_estcpu increases. This can
1146 * cause a switch, but unless the priority crosses a PPQ boundary the actual
1147 * queue will not change. The CPU usage estimator ramps up quite quickly
1148 * when the process is running (linearly), and decays away exponentially, at
1149 * a rate which is proportionally slower when the system is busy. The basic
1150 * principle is that the system will 90% forget that the process used a lot
1151 * of CPU time in 5 * loadav seconds. This causes the system to favor
1152 * processes which haven't run much recently, and to round-robin among other
1153 * processes.
1154 */
1155
1156 void
1157 schedclock(struct lwp *l)
1158 {
1159 struct proc *p = l->l_proc;
1160 int s;
1161
1162 p->p_estcpu = ESTCPULIM(p->p_estcpu + 1);
1163 SCHED_LOCK(s);
1164 resetpriority(l);
1165 SCHED_UNLOCK(s);
1166
1167 if (l->l_priority >= PUSER)
1168 l->l_priority = l->l_usrpri;
1169 }
1170
1171 void
1172 suspendsched()
1173 {
1174 struct lwp *l;
1175 int s;
1176
1177 /*
1178 * Convert all non-P_SYSTEM LSSLEEP or LSRUN processes to
1179 * LSSUSPENDED.
1180 */
1181 proclist_lock_read();
1182 SCHED_LOCK(s);
1183 LIST_FOREACH(l, &alllwp, l_list) {
1184 if ((l->l_proc->p_flag & P_SYSTEM) != 0)
1185 continue;
1186
1187 switch (l->l_stat) {
1188 case LSRUN:
1189 l->l_proc->p_nrlwps--;
1190 if ((l->l_flag & L_INMEM) != 0)
1191 remrunqueue(l);
1192 /* FALLTHROUGH */
1193 case LSSLEEP:
1194 l->l_stat = LSSUSPENDED;
1195 break;
1196 case LSONPROC:
1197 /*
1198 * XXX SMP: we need to deal with processes on
1199 * others CPU !
1200 */
1201 break;
1202 default:
1203 break;
1204 }
1205 }
1206 SCHED_UNLOCK(s);
1207 proclist_unlock_read();
1208 }
1209
1210 /*
1211 * scheduler_fork_hook:
1212 *
1213 * Inherit the parent's scheduler history.
1214 */
1215 void
1216 scheduler_fork_hook(struct proc *parent, struct proc *child)
1217 {
1218
1219 child->p_estcpu = parent->p_estcpu;
1220 }
1221
1222 /*
1223 * scheduler_wait_hook:
1224 *
1225 * Chargeback parents for the sins of their children.
1226 */
1227 void
1228 scheduler_wait_hook(struct proc *parent, struct proc *child)
1229 {
1230
1231 /* XXX Only if parent != init?? */
1232 parent->p_estcpu = ESTCPULIM(parent->p_estcpu + child->p_estcpu);
1233 }
1234
1235 /*
1236 * Low-level routines to access the run queue. Optimised assembler
1237 * routines can override these.
1238 */
1239
1240 #ifndef __HAVE_MD_RUNQUEUE
1241
1242 /*
1243 * On some architectures, it's faster to use a MSB ordering for the priorites
1244 * than the traditional LSB ordering.
1245 */
1246 #ifdef __HAVE_BIGENDIAN_BITOPS
1247 #define RQMASK(n) (0x80000000 >> (n))
1248 #else
1249 #define RQMASK(n) (0x00000001 << (n))
1250 #endif
1251
1252 /*
1253 * The primitives that manipulate the run queues. whichqs tells which
1254 * of the 32 queues qs have processes in them. Setrunqueue puts processes
1255 * into queues, remrunqueue removes them from queues. The running process is
1256 * on no queue, other processes are on a queue related to p->p_priority,
1257 * divided by 4 actually to shrink the 0-127 range of priorities into the 32
1258 * available queues.
1259 */
1260
1261 #ifdef RQDEBUG
1262 static void
1263 checkrunqueue(int whichq, struct lwp *l)
1264 {
1265 const struct prochd * const rq = &sched_qs[whichq];
1266 struct lwp *l2;
1267 int found = 0;
1268 int die = 0;
1269 int empty = 1;
1270 for (l2 = rq->ph_link; l2 != (void*) rq; l2 = l2->l_forw) {
1271 if (l2->l_stat != LSRUN) {
1272 printf("checkrunqueue[%d]: lwp %p state (%d) "
1273 " != LSRUN\n", whichq, l2, l2->l_stat);
1274 }
1275 if (l2->l_back->l_forw != l2) {
1276 printf("checkrunqueue[%d]: lwp %p back-qptr (%p) "
1277 "corrupt %p\n", whichq, l2, l2->l_back,
1278 l2->l_back->l_forw);
1279 die = 1;
1280 }
1281 if (l2->l_forw->l_back != l2) {
1282 printf("checkrunqueue[%d]: lwp %p forw-qptr (%p) "
1283 "corrupt %p\n", whichq, l2, l2->l_forw,
1284 l2->l_forw->l_back);
1285 die = 1;
1286 }
1287 if (l2 == l)
1288 found = 1;
1289 empty = 0;
1290 }
1291 if (empty && (sched_whichqs & RQMASK(whichq)) != 0) {
1292 printf("checkrunqueue[%d]: bit set for empty run-queue %p\n",
1293 whichq, rq);
1294 die = 1;
1295 } else if (!empty && (sched_whichqs & RQMASK(whichq)) == 0) {
1296 printf("checkrunqueue[%d]: bit clear for non-empty "
1297 "run-queue %p\n", whichq, rq);
1298 die = 1;
1299 }
1300 if (l != NULL && (sched_whichqs & RQMASK(whichq)) == 0) {
1301 printf("checkrunqueue[%d]: bit clear for active lwp %p\n",
1302 whichq, l);
1303 die = 1;
1304 }
1305 if (l != NULL && empty) {
1306 printf("checkrunqueue[%d]: empty run-queue %p with "
1307 "active lwp %p\n", whichq, rq, l);
1308 die = 1;
1309 }
1310 if (l != NULL && !found) {
1311 printf("checkrunqueue[%d]: lwp %p not in runqueue %p!",
1312 whichq, l, rq);
1313 die = 1;
1314 }
1315 if (die)
1316 panic("checkrunqueue: inconsistency found");
1317 }
1318 #endif /* RQDEBUG */
1319
1320 void
1321 setrunqueue(struct lwp *l)
1322 {
1323 struct prochd *rq;
1324 struct lwp *prev;
1325 const int whichq = l->l_priority / PPQ;
1326
1327 #ifdef RQDEBUG
1328 checkrunqueue(whichq, NULL);
1329 #endif
1330 #ifdef DIAGNOSTIC
1331 if (l->l_back != NULL || l->l_wchan != NULL || l->l_stat != LSRUN)
1332 panic("setrunqueue");
1333 #endif
1334 sched_whichqs |= RQMASK(whichq);
1335 rq = &sched_qs[whichq];
1336 prev = rq->ph_rlink;
1337 l->l_forw = (struct lwp *)rq;
1338 rq->ph_rlink = l;
1339 prev->l_forw = l;
1340 l->l_back = prev;
1341 #ifdef RQDEBUG
1342 checkrunqueue(whichq, l);
1343 #endif
1344 }
1345
1346 void
1347 remrunqueue(struct lwp *l)
1348 {
1349 struct lwp *prev, *next;
1350 const int whichq = l->l_priority / PPQ;
1351 #ifdef RQDEBUG
1352 checkrunqueue(whichq, l);
1353 #endif
1354 #ifdef DIAGNOSTIC
1355 if (((sched_whichqs & RQMASK(whichq)) == 0))
1356 panic("remrunqueue: bit %d not set", whichq);
1357 #endif
1358 prev = l->l_back;
1359 l->l_back = NULL;
1360 next = l->l_forw;
1361 prev->l_forw = next;
1362 next->l_back = prev;
1363 if (prev == next)
1364 sched_whichqs &= ~RQMASK(whichq);
1365 #ifdef RQDEBUG
1366 checkrunqueue(whichq, NULL);
1367 #endif
1368 }
1369
1370 #undef RQMASK
1371 #endif /* !defined(__HAVE_MD_RUNQUEUE) */
1372