sched_4bsd.c revision 1.1.6.5 1 /* $NetBSD: sched_4bsd.c,v 1.1.6.5 2007/07/14 22:09:48 ad Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran, and
10 * Daniel Sieger.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the NetBSD
23 * Foundation, Inc. and its contributors.
24 * 4. Neither the name of The NetBSD Foundation nor the names of its
25 * contributors may be used to endorse or promote products derived
26 * from this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGE.
39 */
40
41 /*-
42 * Copyright (c) 1982, 1986, 1990, 1991, 1993
43 * The Regents of the University of California. All rights reserved.
44 * (c) UNIX System Laboratories, Inc.
45 * All or some portions of this file are derived from material licensed
46 * to the University of California by American Telephone and Telegraph
47 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
48 * the permission of UNIX System Laboratories, Inc.
49 *
50 * Redistribution and use in source and binary forms, with or without
51 * modification, are permitted provided that the following conditions
52 * are met:
53 * 1. Redistributions of source code must retain the above copyright
54 * notice, this list of conditions and the following disclaimer.
55 * 2. Redistributions in binary form must reproduce the above copyright
56 * notice, this list of conditions and the following disclaimer in the
57 * documentation and/or other materials provided with the distribution.
58 * 3. Neither the name of the University nor the names of its contributors
59 * may be used to endorse or promote products derived from this software
60 * without specific prior written permission.
61 *
62 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
63 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
66 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72 * SUCH DAMAGE.
73 *
74 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
75 */
76
77 #include <sys/cdefs.h>
78 __KERNEL_RCSID(0, "$NetBSD: sched_4bsd.c,v 1.1.6.5 2007/07/14 22:09:48 ad Exp $");
79
80 #include "opt_ddb.h"
81 #include "opt_lockdebug.h"
82 #include "opt_perfctrs.h"
83
84 #define __MUTEX_PRIVATE
85
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/callout.h>
89 #include <sys/cpu.h>
90 #include <sys/proc.h>
91 #include <sys/kernel.h>
92 #include <sys/signalvar.h>
93 #include <sys/resourcevar.h>
94 #include <sys/sched.h>
95 #include <sys/sysctl.h>
96 #include <sys/kauth.h>
97 #include <sys/lockdebug.h>
98 #include <sys/kmem.h>
99 #include <sys/intr.h>
100
101 #include <uvm/uvm_extern.h>
102
103 /*
104 * Run queues.
105 *
106 * We maintain a bitmask of non-empty queues in order speed up finding
107 * the first runnable process.
108 */
109
110 #define PPQ 4 /* priorities per queue */
111 #define RUNQUE_NQS (PRI_COUNT / PPQ) /* number of runqueues */
112
113 typedef struct subqueue {
114 TAILQ_HEAD(, lwp) sq_queue;
115 } subqueue_t;
116
117 typedef struct runqueue {
118 subqueue_t rq_subqueues[RUNQUE_NQS]; /* run queues */
119 uint64_t rq_bitmap; /* bitmap of non-empty queues */
120 } runqueue_t;
121
122 static runqueue_t global_queue;
123
124 static void updatepri(struct lwp *);
125 static void resetpriority(struct lwp *);
126 static void resetprocpriority(struct proc *);
127
128 extern unsigned int sched_pstats_ticks; /* defined in kern_synch.c */
129
130 /* The global scheduler state */
131 kmutex_t sched_mutex;
132
133 /* Number of hardclock ticks per sched_tick() */
134 int rrticks;
135
136 const int schedppq = PPQ;
137
138 /*
139 * Force switch among equal priority processes every 100ms.
140 * Called from hardclock every hz/10 == rrticks hardclock ticks.
141 */
142 /* ARGSUSED */
143 void
144 sched_tick(struct cpu_info *ci)
145 {
146 struct schedstate_percpu *spc = &ci->ci_schedstate;
147
148 spc->spc_ticks = rrticks;
149
150 if (!CURCPU_IDLE_P()) {
151 if (spc->spc_flags & SPCF_SEENRR) {
152 /*
153 * The process has already been through a roundrobin
154 * without switching and may be hogging the CPU.
155 * Indicate that the process should yield.
156 */
157 spc->spc_flags |= SPCF_SHOULDYIELD;
158 } else
159 spc->spc_flags |= SPCF_SEENRR;
160 }
161 cpu_need_resched(curcpu(), 0);
162 }
163
164 #define NICE_WEIGHT 1 /* priorities per nice level */
165
166 #define ESTCPU_SHIFT 11
167 #define ESTCPU_MAX ((NICE_WEIGHT * PRIO_MAX - PPQ) << ESTCPU_SHIFT)
168 #define ESTCPULIM(e) min((e), ESTCPU_MAX)
169
170 /*
171 * Constants for digital decay and forget:
172 * 90% of (p_estcpu) usage in 5 * loadav time
173 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive)
174 * Note that, as ps(1) mentions, this can let percentages
175 * total over 100% (I've seen 137.9% for 3 processes).
176 *
177 * Note that hardclock updates p_estcpu and p_cpticks independently.
178 *
179 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
180 * That is, the system wants to compute a value of decay such
181 * that the following for loop:
182 * for (i = 0; i < (5 * loadavg); i++)
183 * p_estcpu *= decay;
184 * will compute
185 * p_estcpu *= 0.1;
186 * for all values of loadavg:
187 *
188 * Mathematically this loop can be expressed by saying:
189 * decay ** (5 * loadavg) ~= .1
190 *
191 * The system computes decay as:
192 * decay = (2 * loadavg) / (2 * loadavg + 1)
193 *
194 * We wish to prove that the system's computation of decay
195 * will always fulfill the equation:
196 * decay ** (5 * loadavg) ~= .1
197 *
198 * If we compute b as:
199 * b = 2 * loadavg
200 * then
201 * decay = b / (b + 1)
202 *
203 * We now need to prove two things:
204 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
205 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
206 *
207 * Facts:
208 * For x close to zero, exp(x) =~ 1 + x, since
209 * exp(x) = 0! + x**1/1! + x**2/2! + ... .
210 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
211 * For x close to zero, ln(1+x) =~ x, since
212 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1
213 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
214 * ln(.1) =~ -2.30
215 *
216 * Proof of (1):
217 * Solve (factor)**(power) =~ .1 given power (5*loadav):
218 * solving for factor,
219 * ln(factor) =~ (-2.30/5*loadav), or
220 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
221 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED
222 *
223 * Proof of (2):
224 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
225 * solving for power,
226 * power*ln(b/(b+1)) =~ -2.30, or
227 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED
228 *
229 * Actual power values for the implemented algorithm are as follows:
230 * loadav: 1 2 3 4
231 * power: 5.68 10.32 14.94 19.55
232 */
233
234 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
235 #define loadfactor(loadav) (2 * (loadav))
236
237 static fixpt_t
238 decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
239 {
240
241 if (estcpu == 0) {
242 return 0;
243 }
244
245 #if !defined(_LP64)
246 /* avoid 64bit arithmetics. */
247 #define FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
248 if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
249 return estcpu * loadfac / (loadfac + FSCALE);
250 }
251 #endif /* !defined(_LP64) */
252
253 return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
254 }
255
256 /*
257 * For all load averages >= 1 and max p_estcpu of (255 << ESTCPU_SHIFT),
258 * sleeping for at least seven times the loadfactor will decay p_estcpu to
259 * less than (1 << ESTCPU_SHIFT).
260 *
261 * note that our ESTCPU_MAX is actually much smaller than (255 << ESTCPU_SHIFT).
262 */
263 static fixpt_t
264 decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n)
265 {
266
267 if ((n << FSHIFT) >= 7 * loadfac) {
268 return 0;
269 }
270
271 while (estcpu != 0 && n > 1) {
272 estcpu = decay_cpu(loadfac, estcpu);
273 n--;
274 }
275
276 return estcpu;
277 }
278
279 /*
280 * sched_pstats_hook:
281 *
282 * Periodically called from sched_pstats(); used to recalculate priorities.
283 */
284 void
285 sched_pstats_hook(struct proc *p, int minslp)
286 {
287 struct lwp *l;
288 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
289
290 /*
291 * If the process has slept the entire second,
292 * stop recalculating its priority until it wakes up.
293 */
294 if (minslp <= 1) {
295 p->p_estcpu = decay_cpu(loadfac, p->p_estcpu);
296
297 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
298 if ((l->l_flag & LW_IDLE) != 0)
299 continue;
300 lwp_lock(l);
301 if (l->l_slptime <= 1 && l->l_priority < PRI_KERNEL)
302 resetpriority(l);
303 lwp_unlock(l);
304 }
305 }
306 }
307
308 /*
309 * Recalculate the priority of a process after it has slept for a while.
310 */
311 static void
312 updatepri(struct lwp *l)
313 {
314 struct proc *p = l->l_proc;
315 fixpt_t loadfac;
316
317 KASSERT(lwp_locked(l, NULL));
318 KASSERT(l->l_slptime > 1);
319
320 loadfac = loadfactor(averunnable.ldavg[0]);
321
322 l->l_slptime--; /* the first time was done in sched_pstats */
323 /* XXX NJWLWP */
324 /* XXXSMP occasionally unlocked, should be per-LWP */
325 p->p_estcpu = decay_cpu_batch(loadfac, p->p_estcpu, l->l_slptime);
326 resetpriority(l);
327 }
328
329 /*
330 * On some architectures, it's faster to use a MSB ordering for the priorites
331 * than the traditional LSB ordering.
332 */
333 #define RQMASK(n) (1ULL << (n))
334 #define WHICHQ(p) (RUNQUE_NQS - 1 - ((p) / PPQ))
335
336 /*
337 * The primitives that manipulate the run queues. whichqs tells which of
338 * the queues have processes in them. sched_enqueue() puts processes into
339 * queues, sched_dequeue() removes them from queues.
340 */
341 #ifdef RQDEBUG
342 static void
343 runqueue_check(const runqueue_t *rq, int whichq, struct lwp *l)
344 {
345 const subqueue_t * const sq = &rq->rq_subqueues[whichq];
346 const uint32_t bitmap = rq->rq_bitmap;
347 struct lwp *l2;
348 int found = 0;
349 int die = 0;
350 int empty = 1;
351
352 TAILQ_FOREACH(l2, &sq->sq_queue, l_runq) {
353 if (l2->l_stat != LSRUN) {
354 printf("runqueue_check[%d]: lwp %p state (%d) "
355 " != LSRUN\n", whichq, l2, l2->l_stat);
356 }
357 if (l2 == l)
358 found = 1;
359 empty = 0;
360 }
361 if (empty && (bitmap & RQMASK(whichq)) != 0) {
362 printf("runqueue_check[%d]: bit set for empty run-queue %p\n",
363 whichq, rq);
364 die = 1;
365 } else if (!empty && (bitmap & RQMASK(whichq)) == 0) {
366 printf("runqueue_check[%d]: bit clear for non-empty "
367 "run-queue %p\n", whichq, rq);
368 die = 1;
369 }
370 if (l != NULL && (bitmap & RQMASK(whichq)) == 0) {
371 printf("runqueue_check[%d]: bit clear for active lwp %p\n",
372 whichq, l);
373 die = 1;
374 }
375 if (l != NULL && empty) {
376 printf("runqueue_check[%d]: empty run-queue %p with "
377 "active lwp %p\n", whichq, rq, l);
378 die = 1;
379 }
380 if (l != NULL && !found) {
381 printf("runqueue_check[%d]: lwp %p not in runqueue %p!",
382 whichq, l, rq);
383 die = 1;
384 }
385 if (die)
386 panic("runqueue_check: inconsistency found");
387 }
388 #else /* RQDEBUG */
389 #define runqueue_check(a, b, c) /* nothing */
390 #endif /* RQDEBUG */
391
392 static void
393 runqueue_init(runqueue_t *rq)
394 {
395 int i;
396
397 for (i = 0; i < RUNQUE_NQS; i++)
398 TAILQ_INIT(&rq->rq_subqueues[i].sq_queue);
399 }
400
401 static void
402 runqueue_enqueue(runqueue_t *rq, struct lwp *l)
403 {
404 subqueue_t *sq;
405 const int whichq = WHICHQ(lwp_eprio(l));
406 const uint64_t rqmask = RQMASK(whichq);
407
408 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
409
410 runqueue_check(rq, whichq, NULL);
411 rq->rq_bitmap |= rqmask;
412 sq = &rq->rq_subqueues[whichq];
413 TAILQ_INSERT_TAIL(&sq->sq_queue, l, l_runq);
414 runqueue_check(rq, whichq, l);
415 }
416
417 static void
418 runqueue_dequeue(runqueue_t *rq, struct lwp *l)
419 {
420 subqueue_t *sq;
421 const int whichq = WHICHQ(lwp_eprio(l));
422 const uint64_t rqmask = RQMASK(whichq);
423
424 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
425
426 runqueue_check(rq, whichq, l);
427 KASSERT((rq->rq_bitmap & rqmask) != 0);
428 sq = &rq->rq_subqueues[whichq];
429 TAILQ_REMOVE(&sq->sq_queue, l, l_runq);
430 if (TAILQ_EMPTY(&sq->sq_queue))
431 rq->rq_bitmap &= ~rqmask;
432 runqueue_check(rq, whichq, NULL);
433 }
434
435 static struct lwp *
436 runqueue_nextlwp(runqueue_t *rq)
437 {
438 const uint64_t bitmap = rq->rq_bitmap;
439 int whichq;
440
441 if (bitmap == 0) {
442 return NULL;
443 }
444 whichq = ffs((uint32_t)bitmap) - 1;
445 if (whichq != -1)
446 return TAILQ_FIRST(&rq->rq_subqueues[whichq].sq_queue);
447 whichq = ffs((uint32_t)(bitmap >> 32)) - 1;
448 return TAILQ_FIRST(&rq->rq_subqueues[whichq + 32].sq_queue);
449 }
450
451 #if defined(DDB)
452 static void
453 runqueue_print(const runqueue_t *rq, void (*pr)(const char *, ...))
454 {
455 const uint64_t bitmap = rq->rq_bitmap;
456 struct lwp *l;
457 int i, first;
458
459 for (i = 0; i < RUNQUE_NQS; i++) {
460 const subqueue_t *sq;
461 first = 1;
462 sq = &rq->rq_subqueues[i];
463 TAILQ_FOREACH(l, &sq->sq_queue, l_runq) {
464 if (first) {
465 (*pr)("%c%d",
466 (bitmap & RQMASK(i)) ? ' ' : '!', i);
467 first = 0;
468 }
469 (*pr)("\t%d.%d (%s) pri=%d usrpri=%d\n",
470 l->l_proc->p_pid,
471 l->l_lid, l->l_proc->p_comm,
472 (int)l->l_priority, (int)l->l_usrpri);
473 }
474 }
475 }
476 #endif /* defined(DDB) */
477
478 /*
479 * Initialize the (doubly-linked) run queues
480 * to be empty.
481 */
482 void
483 sched_rqinit()
484 {
485
486 runqueue_init(&global_queue);
487 mutex_init(&sched_mutex, MUTEX_SPIN, IPL_SCHED);
488 /* Initialize the lock pointer for lwp0 */
489 lwp0.l_mutex = &curcpu()->ci_schedstate.spc_lwplock;
490 }
491
492 void
493 sched_cpuattach(struct cpu_info *ci)
494 {
495 runqueue_t *rq;
496
497 ci->ci_schedstate.spc_mutex = &sched_mutex;
498 rq = kmem_zalloc(sizeof(*rq), KM_NOSLEEP);
499 runqueue_init(rq);
500 ci->ci_schedstate.spc_sched_info = rq;
501 }
502
503 void
504 sched_setup()
505 {
506
507 rrticks = hz / 10;
508 }
509
510 void
511 sched_setrunnable(struct lwp *l)
512 {
513
514 if (l->l_slptime > 1)
515 updatepri(l);
516 }
517
518 bool
519 sched_curcpu_runnable_p(void)
520 {
521 runqueue_t *rq = curcpu()->ci_schedstate.spc_sched_info;
522
523 return (global_queue.rq_bitmap | rq->rq_bitmap) != 0;
524 }
525
526 void
527 sched_nice(struct proc *chgp, int n)
528 {
529
530 chgp->p_nice = n;
531 (void)resetprocpriority(chgp);
532 }
533
534 /*
535 * Compute the priority of a process when running in user mode.
536 * Arrange to reschedule if the resulting priority is better
537 * than that of the current process.
538 */
539 static void
540 resetpriority(struct lwp *l)
541 {
542 unsigned int newpriority;
543 struct proc *p = l->l_proc;
544
545 /* XXXSMP KASSERT(mutex_owned(&p->p_stmutex)); */
546 KASSERT(lwp_locked(l, NULL));
547
548 if ((l->l_flag & LW_SYSTEM) != 0)
549 return;
550
551 newpriority = PRI_KERNEL - 1 - (p->p_estcpu >> ESTCPU_SHIFT) -
552 NICE_WEIGHT * (p->p_nice - NZERO);
553 newpriority = max(newpriority, 0);
554 lwp_changepri(l, newpriority);
555 }
556
557 /*
558 * Recompute priority for all LWPs in a process.
559 */
560 static void
561 resetprocpriority(struct proc *p)
562 {
563 struct lwp *l;
564
565 KASSERT(mutex_owned(&p->p_stmutex));
566
567 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
568 lwp_lock(l);
569 resetpriority(l);
570 lwp_unlock(l);
571 }
572 }
573
574 /*
575 * We adjust the priority of the current process. The priority of a process
576 * gets worse as it accumulates CPU time. The CPU usage estimator (p_estcpu)
577 * is increased here. The formula for computing priorities (in kern_synch.c)
578 * will compute a different value each time p_estcpu increases. This can
579 * cause a switch, but unless the priority crosses a PPQ boundary the actual
580 * queue will not change. The CPU usage estimator ramps up quite quickly
581 * when the process is running (linearly), and decays away exponentially, at
582 * a rate which is proportionally slower when the system is busy. The basic
583 * principle is that the system will 90% forget that the process used a lot
584 * of CPU time in 5 * loadav seconds. This causes the system to favor
585 * processes which haven't run much recently, and to round-robin among other
586 * processes.
587 */
588
589 void
590 sched_schedclock(struct lwp *l)
591 {
592 struct proc *p = l->l_proc;
593
594 KASSERT(!CURCPU_IDLE_P());
595 mutex_spin_enter(&p->p_stmutex);
596 p->p_estcpu = ESTCPULIM(p->p_estcpu + (1 << ESTCPU_SHIFT));
597 lwp_lock(l);
598 resetpriority(l);
599 mutex_spin_exit(&p->p_stmutex);
600 if ((l->l_flag & LW_SYSTEM) == 0 && l->l_priority < PRI_KERNEL)
601 l->l_priority = l->l_usrpri;
602 lwp_unlock(l);
603 }
604
605 /*
606 * sched_proc_fork:
607 *
608 * Inherit the parent's scheduler history.
609 */
610 void
611 sched_proc_fork(struct proc *parent, struct proc *child)
612 {
613
614 KASSERT(mutex_owned(&parent->p_smutex));
615
616 child->p_estcpu = child->p_estcpu_inherited = parent->p_estcpu;
617 child->p_forktime = sched_pstats_ticks;
618 }
619
620 /*
621 * sched_proc_exit:
622 *
623 * Chargeback parents for the sins of their children.
624 */
625 void
626 sched_proc_exit(struct proc *parent, struct proc *child)
627 {
628 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
629 fixpt_t estcpu;
630
631 /* XXX Only if parent != init?? */
632
633 mutex_spin_enter(&parent->p_stmutex);
634 estcpu = decay_cpu_batch(loadfac, child->p_estcpu_inherited,
635 sched_pstats_ticks - child->p_forktime);
636 if (child->p_estcpu > estcpu)
637 parent->p_estcpu =
638 ESTCPULIM(parent->p_estcpu + child->p_estcpu - estcpu);
639 mutex_spin_exit(&parent->p_stmutex);
640 }
641
642 void
643 sched_enqueue(struct lwp *l, bool ctxswitch)
644 {
645
646 if ((l->l_flag & LW_BOUND) != 0)
647 runqueue_enqueue(l->l_cpu->ci_schedstate.spc_sched_info, l);
648 else
649 runqueue_enqueue(&global_queue, l);
650 }
651
652 /*
653 * XXXSMP When LWP dispatch (cpu_switch()) is changed to use sched_dequeue(),
654 * drop of the effective priority level from kernel to user needs to be
655 * moved here from userret(). The assignment in userret() is currently
656 * done unlocked.
657 */
658 void
659 sched_dequeue(struct lwp *l)
660 {
661
662 if ((l->l_flag & LW_BOUND) != 0)
663 runqueue_dequeue(l->l_cpu->ci_schedstate.spc_sched_info, l);
664 else
665 runqueue_dequeue(&global_queue, l);
666 }
667
668 struct lwp *
669 sched_nextlwp(void)
670 {
671 lwp_t *l1, *l2;
672
673 /* For now, just pick the highest priority LWP. */
674 l1 = runqueue_nextlwp(curcpu()->ci_schedstate.spc_sched_info);
675 l2 = runqueue_nextlwp(&global_queue);
676
677 if (l1 == NULL)
678 return l2;
679 if (l2 == NULL)
680 return l1;
681 if (lwp_eprio(l2) > lwp_eprio(l1))
682 return l2;
683 else
684 return l1;
685 }
686
687 void
688 sched_lwp_fork(struct lwp *l)
689 {
690
691 }
692
693 void
694 sched_lwp_exit(struct lwp *l)
695 {
696
697 }
698
699 /*
700 * sysctl setup. XXX This should be split with kern_synch.c.
701 */
702 SYSCTL_SETUP(sysctl_sched_setup, "sysctl kern.sched subtree setup")
703 {
704 const struct sysctlnode *node = NULL;
705
706 sysctl_createv(clog, 0, NULL, NULL,
707 CTLFLAG_PERMANENT,
708 CTLTYPE_NODE, "kern", NULL,
709 NULL, 0, NULL, 0,
710 CTL_KERN, CTL_EOL);
711 sysctl_createv(clog, 0, NULL, &node,
712 CTLFLAG_PERMANENT,
713 CTLTYPE_NODE, "sched",
714 SYSCTL_DESCR("Scheduler options"),
715 NULL, 0, NULL, 0,
716 CTL_KERN, CTL_CREATE, CTL_EOL);
717
718 KASSERT(node != NULL);
719
720 sysctl_createv(clog, 0, &node, NULL,
721 CTLFLAG_PERMANENT,
722 CTLTYPE_STRING, "name", NULL,
723 NULL, 0, __UNCONST("4.4BSD"), 0,
724 CTL_CREATE, CTL_EOL);
725 sysctl_createv(clog, 0, &node, NULL,
726 CTLFLAG_READWRITE,
727 CTLTYPE_INT, "timesoftints",
728 SYSCTL_DESCR("Track CPU time for soft interrupts"),
729 NULL, 0, &softint_timing, 0,
730 CTL_CREATE, CTL_EOL);
731 }
732
733 #if defined(DDB)
734 void
735 sched_print_runqueue(void (*pr)(const char *, ...))
736 {
737
738 runqueue_print(&global_queue, pr);
739 }
740 #endif /* defined(DDB) */
741