sched_4bsd.c revision 1.15 1 /* $NetBSD: sched_4bsd.c,v 1.15 2008/04/02 17:40:15 ad Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran, and
10 * Daniel Sieger.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the NetBSD
23 * Foundation, Inc. and its contributors.
24 * 4. Neither the name of The NetBSD Foundation nor the names of its
25 * contributors may be used to endorse or promote products derived
26 * from this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGE.
39 */
40
41 /*-
42 * Copyright (c) 1982, 1986, 1990, 1991, 1993
43 * The Regents of the University of California. All rights reserved.
44 * (c) UNIX System Laboratories, Inc.
45 * All or some portions of this file are derived from material licensed
46 * to the University of California by American Telephone and Telegraph
47 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
48 * the permission of UNIX System Laboratories, Inc.
49 *
50 * Redistribution and use in source and binary forms, with or without
51 * modification, are permitted provided that the following conditions
52 * are met:
53 * 1. Redistributions of source code must retain the above copyright
54 * notice, this list of conditions and the following disclaimer.
55 * 2. Redistributions in binary form must reproduce the above copyright
56 * notice, this list of conditions and the following disclaimer in the
57 * documentation and/or other materials provided with the distribution.
58 * 3. Neither the name of the University nor the names of its contributors
59 * may be used to endorse or promote products derived from this software
60 * without specific prior written permission.
61 *
62 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
63 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
66 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72 * SUCH DAMAGE.
73 *
74 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
75 */
76
77 #include <sys/cdefs.h>
78 __KERNEL_RCSID(0, "$NetBSD: sched_4bsd.c,v 1.15 2008/04/02 17:40:15 ad Exp $");
79
80 #include "opt_ddb.h"
81 #include "opt_lockdebug.h"
82 #include "opt_perfctrs.h"
83
84 #define __MUTEX_PRIVATE
85
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/callout.h>
89 #include <sys/cpu.h>
90 #include <sys/proc.h>
91 #include <sys/kernel.h>
92 #include <sys/signalvar.h>
93 #include <sys/resourcevar.h>
94 #include <sys/sched.h>
95 #include <sys/sysctl.h>
96 #include <sys/kauth.h>
97 #include <sys/lockdebug.h>
98 #include <sys/kmem.h>
99 #include <sys/intr.h>
100
101 #include <uvm/uvm_extern.h>
102
103 /*
104 * Run queues.
105 *
106 * We maintain bitmasks of non-empty queues in order speed up finding
107 * the first runnable process. Since there can be (by definition) few
108 * real time LWPs in the the system, we maintain them on a linked list,
109 * sorted by priority.
110 */
111
112 #define PPB_SHIFT 5
113 #define PPB_MASK 31
114
115 #define NUM_Q (NPRI_KERNEL + NPRI_USER)
116 #define NUM_PPB (1 << PPB_SHIFT)
117 #define NUM_B (NUM_Q / NUM_PPB)
118
119 typedef struct runqueue {
120 TAILQ_HEAD(, lwp) rq_fixedpri; /* realtime, kthread */
121 u_int rq_count; /* total # jobs */
122 uint32_t rq_bitmap[NUM_B]; /* bitmap of queues */
123 TAILQ_HEAD(, lwp) rq_queue[NUM_Q]; /* user+kernel */
124 } runqueue_t;
125
126 static runqueue_t global_queue;
127
128 static void updatepri(struct lwp *);
129 static void resetpriority(struct lwp *);
130
131 fixpt_t decay_cpu(fixpt_t, fixpt_t);
132
133 extern unsigned int sched_pstats_ticks; /* defined in kern_synch.c */
134
135 /* The global scheduler state */
136 kmutex_t runqueue_lock;
137
138 /* Number of hardclock ticks per sched_tick() */
139 static int rrticks;
140
141 const int schedppq = 1;
142
143 /*
144 * Force switch among equal priority processes every 100ms.
145 * Called from hardclock every hz/10 == rrticks hardclock ticks.
146 *
147 * There's no need to lock anywhere in this routine, as it's
148 * CPU-local and runs at IPL_SCHED (called from clock interrupt).
149 */
150 /* ARGSUSED */
151 void
152 sched_tick(struct cpu_info *ci)
153 {
154 struct schedstate_percpu *spc = &ci->ci_schedstate;
155
156 spc->spc_ticks = rrticks;
157
158 if (CURCPU_IDLE_P()) {
159 cpu_need_resched(ci, 0);
160 return;
161 }
162
163 if (spc->spc_flags & SPCF_SEENRR) {
164 /*
165 * The process has already been through a roundrobin
166 * without switching and may be hogging the CPU.
167 * Indicate that the process should yield.
168 */
169 spc->spc_flags |= SPCF_SHOULDYIELD;
170 cpu_need_resched(ci, 0);
171 } else
172 spc->spc_flags |= SPCF_SEENRR;
173 }
174
175 /*
176 * Why PRIO_MAX - 2? From setpriority(2):
177 *
178 * prio is a value in the range -20 to 20. The default priority is
179 * 0; lower priorities cause more favorable scheduling. A value of
180 * 19 or 20 will schedule a process only when nothing at priority <=
181 * 0 is runnable.
182 *
183 * This gives estcpu influence over 18 priority levels, and leaves nice
184 * with 40 levels. One way to think about it is that nice has 20 levels
185 * either side of estcpu's 18.
186 */
187 #define ESTCPU_SHIFT 11
188 #define ESTCPU_MAX ((PRIO_MAX - 2) << ESTCPU_SHIFT)
189 #define ESTCPU_ACCUM (1 << (ESTCPU_SHIFT - 1))
190 #define ESTCPULIM(e) min((e), ESTCPU_MAX)
191
192 /*
193 * Constants for digital decay and forget:
194 * 90% of (l_estcpu) usage in 5 * loadav time
195 * 95% of (l_pctcpu) usage in 60 seconds (load insensitive)
196 * Note that, as ps(1) mentions, this can let percentages
197 * total over 100% (I've seen 137.9% for 3 processes).
198 *
199 * Note that hardclock updates l_estcpu and l_cpticks independently.
200 *
201 * We wish to decay away 90% of l_estcpu in (5 * loadavg) seconds.
202 * That is, the system wants to compute a value of decay such
203 * that the following for loop:
204 * for (i = 0; i < (5 * loadavg); i++)
205 * l_estcpu *= decay;
206 * will compute
207 * l_estcpu *= 0.1;
208 * for all values of loadavg:
209 *
210 * Mathematically this loop can be expressed by saying:
211 * decay ** (5 * loadavg) ~= .1
212 *
213 * The system computes decay as:
214 * decay = (2 * loadavg) / (2 * loadavg + 1)
215 *
216 * We wish to prove that the system's computation of decay
217 * will always fulfill the equation:
218 * decay ** (5 * loadavg) ~= .1
219 *
220 * If we compute b as:
221 * b = 2 * loadavg
222 * then
223 * decay = b / (b + 1)
224 *
225 * We now need to prove two things:
226 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
227 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
228 *
229 * Facts:
230 * For x close to zero, exp(x) =~ 1 + x, since
231 * exp(x) = 0! + x**1/1! + x**2/2! + ... .
232 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
233 * For x close to zero, ln(1+x) =~ x, since
234 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1
235 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
236 * ln(.1) =~ -2.30
237 *
238 * Proof of (1):
239 * Solve (factor)**(power) =~ .1 given power (5*loadav):
240 * solving for factor,
241 * ln(factor) =~ (-2.30/5*loadav), or
242 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
243 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED
244 *
245 * Proof of (2):
246 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
247 * solving for power,
248 * power*ln(b/(b+1)) =~ -2.30, or
249 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED
250 *
251 * Actual power values for the implemented algorithm are as follows:
252 * loadav: 1 2 3 4
253 * power: 5.68 10.32 14.94 19.55
254 */
255
256 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
257 #define loadfactor(loadav) (2 * (loadav))
258
259 fixpt_t
260 decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
261 {
262
263 if (estcpu == 0) {
264 return 0;
265 }
266
267 #if !defined(_LP64)
268 /* avoid 64bit arithmetics. */
269 #define FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
270 if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
271 return estcpu * loadfac / (loadfac + FSCALE);
272 }
273 #endif /* !defined(_LP64) */
274
275 return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
276 }
277
278 /*
279 * For all load averages >= 1 and max l_estcpu of (255 << ESTCPU_SHIFT),
280 * sleeping for at least seven times the loadfactor will decay l_estcpu to
281 * less than (1 << ESTCPU_SHIFT).
282 *
283 * note that our ESTCPU_MAX is actually much smaller than (255 << ESTCPU_SHIFT).
284 */
285 static fixpt_t
286 decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n)
287 {
288
289 if ((n << FSHIFT) >= 7 * loadfac) {
290 return 0;
291 }
292
293 while (estcpu != 0 && n > 1) {
294 estcpu = decay_cpu(loadfac, estcpu);
295 n--;
296 }
297
298 return estcpu;
299 }
300
301 /*
302 * sched_pstats_hook:
303 *
304 * Periodically called from sched_pstats(); used to recalculate priorities.
305 */
306 void
307 sched_pstats_hook(struct lwp *l)
308 {
309 fixpt_t loadfac;
310 int sleeptm;
311
312 /*
313 * If the LWP has slept an entire second, stop recalculating
314 * its priority until it wakes up.
315 */
316 if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
317 l->l_stat == LSSUSPENDED) {
318 l->l_slptime++;
319 sleeptm = 1;
320 } else {
321 sleeptm = 0x7fffffff;
322 }
323
324 if (l->l_slptime <= sleeptm) {
325 loadfac = 2 * (averunnable.ldavg[0]);
326 l->l_estcpu = decay_cpu(loadfac, l->l_estcpu);
327 resetpriority(l);
328 }
329 }
330
331 /*
332 * Recalculate the priority of a process after it has slept for a while.
333 */
334 static void
335 updatepri(struct lwp *l)
336 {
337 fixpt_t loadfac;
338
339 KASSERT(lwp_locked(l, NULL));
340 KASSERT(l->l_slptime > 1);
341
342 loadfac = loadfactor(averunnable.ldavg[0]);
343
344 l->l_slptime--; /* the first time was done in sched_pstats */
345 l->l_estcpu = decay_cpu_batch(loadfac, l->l_estcpu, l->l_slptime);
346 resetpriority(l);
347 }
348
349 static void
350 runqueue_init(runqueue_t *rq)
351 {
352 int i;
353
354 for (i = 0; i < NUM_Q; i++)
355 TAILQ_INIT(&rq->rq_queue[i]);
356 for (i = 0; i < NUM_B; i++)
357 rq->rq_bitmap[i] = 0;
358 TAILQ_INIT(&rq->rq_fixedpri);
359 rq->rq_count = 0;
360 }
361
362 static void
363 runqueue_enqueue(runqueue_t *rq, struct lwp *l)
364 {
365 pri_t pri;
366 lwp_t *l2;
367
368 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
369
370 pri = lwp_eprio(l);
371 rq->rq_count++;
372
373 if (pri >= PRI_KTHREAD) {
374 TAILQ_FOREACH(l2, &rq->rq_fixedpri, l_runq) {
375 if (lwp_eprio(l2) < pri) {
376 TAILQ_INSERT_BEFORE(l2, l, l_runq);
377 return;
378 }
379 }
380 TAILQ_INSERT_TAIL(&rq->rq_fixedpri, l, l_runq);
381 return;
382 }
383
384 rq->rq_bitmap[pri >> PPB_SHIFT] |=
385 (0x80000000U >> (pri & PPB_MASK));
386 TAILQ_INSERT_TAIL(&rq->rq_queue[pri], l, l_runq);
387 }
388
389 static void
390 runqueue_dequeue(runqueue_t *rq, struct lwp *l)
391 {
392 pri_t pri;
393
394 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
395
396 pri = lwp_eprio(l);
397 rq->rq_count--;
398
399 if (pri >= PRI_KTHREAD) {
400 TAILQ_REMOVE(&rq->rq_fixedpri, l, l_runq);
401 return;
402 }
403
404 TAILQ_REMOVE(&rq->rq_queue[pri], l, l_runq);
405 if (TAILQ_EMPTY(&rq->rq_queue[pri]))
406 rq->rq_bitmap[pri >> PPB_SHIFT] ^=
407 (0x80000000U >> (pri & PPB_MASK));
408 }
409
410 #if (NUM_B != 3) || (NUM_Q != 96)
411 #error adjust runqueue_nextlwp
412 #endif
413
414 static struct lwp *
415 runqueue_nextlwp(runqueue_t *rq)
416 {
417 pri_t pri;
418
419 KASSERT(rq->rq_count != 0);
420
421 if (!TAILQ_EMPTY(&rq->rq_fixedpri))
422 return TAILQ_FIRST(&rq->rq_fixedpri);
423
424 if (rq->rq_bitmap[2] != 0)
425 pri = 96 - ffs(rq->rq_bitmap[2]);
426 else if (rq->rq_bitmap[1] != 0)
427 pri = 64 - ffs(rq->rq_bitmap[1]);
428 else
429 pri = 32 - ffs(rq->rq_bitmap[0]);
430 return TAILQ_FIRST(&rq->rq_queue[pri]);
431 }
432
433 #if defined(DDB)
434 static void
435 runqueue_print(const runqueue_t *rq, void (*pr)(const char *, ...))
436 {
437 CPU_INFO_ITERATOR cii;
438 struct cpu_info *ci;
439 lwp_t *l;
440 int i;
441
442 printf("PID\tLID\tPRI\tIPRI\tEPRI\tLWP\t\t NAME\n");
443
444 TAILQ_FOREACH(l, &rq->rq_fixedpri, l_runq) {
445 (*pr)("%d\t%d\%d\t%d\t%d\t%016lx %s\n",
446 l->l_proc->p_pid, l->l_lid, (int)l->l_priority,
447 (int)l->l_inheritedprio, lwp_eprio(l),
448 (long)l, l->l_proc->p_comm);
449 }
450
451 for (i = NUM_Q - 1; i >= 0; i--) {
452 TAILQ_FOREACH(l, &rq->rq_queue[i], l_runq) {
453 (*pr)("%d\t%d\t%d\t%d\t%d\t%016lx %s\n",
454 l->l_proc->p_pid, l->l_lid, (int)l->l_priority,
455 (int)l->l_inheritedprio, lwp_eprio(l),
456 (long)l, l->l_proc->p_comm);
457 }
458 }
459
460 printf("CPUIDX\tRESCHED\tCURPRI\tFLAGS\n");
461 for (CPU_INFO_FOREACH(cii, ci)) {
462 printf("%d\t%d\t%d\t%04x\n", (int)ci->ci_index,
463 (int)ci->ci_want_resched,
464 (int)ci->ci_schedstate.spc_curpriority,
465 (int)ci->ci_schedstate.spc_flags);
466 }
467
468 printf("NEXTLWP\n%016lx\n", (long)sched_nextlwp());
469 }
470 #endif /* defined(DDB) */
471
472 /*
473 * Initialize the (doubly-linked) run queues
474 * to be empty.
475 */
476 void
477 sched_rqinit(void)
478 {
479
480 runqueue_init(&global_queue);
481 mutex_init(&runqueue_lock, MUTEX_DEFAULT, IPL_SCHED);
482 }
483
484 void
485 sched_cpuattach(struct cpu_info *ci)
486 {
487 runqueue_t *rq;
488
489 if (lwp0.l_cpu == ci) {
490 /* Initialize the lock pointer for lwp0 */
491 lwp0.l_mutex = curcpu()->ci_schedstate.spc_lwplock;
492 }
493
494 ci->ci_schedstate.spc_mutex = &runqueue_lock;
495 rq = kmem_zalloc(sizeof(*rq), KM_SLEEP);
496 runqueue_init(rq);
497 ci->ci_schedstate.spc_sched_info = rq;
498 }
499
500 void
501 sched_setup(void)
502 {
503
504 rrticks = hz / 10;
505 }
506
507 void
508 sched_setrunnable(struct lwp *l)
509 {
510
511 if (l->l_slptime > 1)
512 updatepri(l);
513 }
514
515 bool
516 sched_curcpu_runnable_p(void)
517 {
518 struct schedstate_percpu *spc;
519 struct cpu_info *ci;
520 int bits;
521
522 ci = curcpu();
523 spc = &ci->ci_schedstate;
524 #ifndef __HAVE_FAST_SOFTINTS
525 bits = ci->ci_data.cpu_softints;
526 bits |= ((runqueue_t *)spc->spc_sched_info)->rq_count;
527 #else
528 bits = ((runqueue_t *)spc->spc_sched_info)->rq_count;
529 #endif
530 if (__predict_true((spc->spc_flags & SPCF_OFFLINE) == 0))
531 bits |= global_queue.rq_count;
532 return bits != 0;
533 }
534
535 void
536 sched_nice(struct proc *p, int n)
537 {
538 struct lwp *l;
539
540 KASSERT(mutex_owned(&p->p_smutex));
541
542 p->p_nice = n;
543 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
544 lwp_lock(l);
545 resetpriority(l);
546 lwp_unlock(l);
547 }
548 }
549
550 /*
551 * Recompute the priority of an LWP. Arrange to reschedule if
552 * the resulting priority is better than that of the current LWP.
553 */
554 static void
555 resetpriority(struct lwp *l)
556 {
557 pri_t pri;
558 struct proc *p = l->l_proc;
559
560 KASSERT(lwp_locked(l, NULL));
561
562 if (l->l_class != SCHED_OTHER)
563 return;
564
565 /* See comments above ESTCPU_SHIFT definition. */
566 pri = (PRI_KERNEL - 1) - (l->l_estcpu >> ESTCPU_SHIFT) - p->p_nice;
567 pri = imax(pri, 0);
568 if (pri != l->l_priority)
569 lwp_changepri(l, pri);
570 }
571
572 /*
573 * We adjust the priority of the current process. The priority of a process
574 * gets worse as it accumulates CPU time. The CPU usage estimator (l_estcpu)
575 * is increased here. The formula for computing priorities (in kern_synch.c)
576 * will compute a different value each time l_estcpu increases. This can
577 * cause a switch, but unless the priority crosses a PPQ boundary the actual
578 * queue will not change. The CPU usage estimator ramps up quite quickly
579 * when the process is running (linearly), and decays away exponentially, at
580 * a rate which is proportionally slower when the system is busy. The basic
581 * principle is that the system will 90% forget that the process used a lot
582 * of CPU time in 5 * loadav seconds. This causes the system to favor
583 * processes which haven't run much recently, and to round-robin among other
584 * processes.
585 */
586
587 void
588 sched_schedclock(struct lwp *l)
589 {
590
591 if (l->l_class != SCHED_OTHER)
592 return;
593
594 KASSERT(!CURCPU_IDLE_P());
595 l->l_estcpu = ESTCPULIM(l->l_estcpu + ESTCPU_ACCUM);
596 lwp_lock(l);
597 resetpriority(l);
598 lwp_unlock(l);
599 }
600
601 /*
602 * sched_proc_fork:
603 *
604 * Inherit the parent's scheduler history.
605 */
606 void
607 sched_proc_fork(struct proc *parent, struct proc *child)
608 {
609 lwp_t *pl;
610
611 KASSERT(mutex_owned(&parent->p_smutex));
612
613 pl = LIST_FIRST(&parent->p_lwps);
614 child->p_estcpu_inherited = pl->l_estcpu;
615 child->p_forktime = sched_pstats_ticks;
616 }
617
618 /*
619 * sched_proc_exit:
620 *
621 * Chargeback parents for the sins of their children.
622 */
623 void
624 sched_proc_exit(struct proc *parent, struct proc *child)
625 {
626 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
627 fixpt_t estcpu;
628 lwp_t *pl, *cl;
629
630 /* XXX Only if parent != init?? */
631
632 mutex_enter(&parent->p_smutex);
633 pl = LIST_FIRST(&parent->p_lwps);
634 cl = LIST_FIRST(&child->p_lwps);
635 estcpu = decay_cpu_batch(loadfac, child->p_estcpu_inherited,
636 sched_pstats_ticks - child->p_forktime);
637 if (cl->l_estcpu > estcpu) {
638 lwp_lock(pl);
639 pl->l_estcpu = ESTCPULIM(pl->l_estcpu + cl->l_estcpu - estcpu);
640 lwp_unlock(pl);
641 }
642 mutex_exit(&parent->p_smutex);
643 }
644
645 void
646 sched_enqueue(struct lwp *l, bool ctxswitch)
647 {
648
649 if (__predict_false(l->l_target_cpu != NULL)) {
650 /* Global mutex is used - just change the CPU */
651 l->l_cpu = l->l_target_cpu;
652 l->l_target_cpu = NULL;
653 }
654
655 if ((l->l_flag & LW_BOUND) != 0)
656 runqueue_enqueue(l->l_cpu->ci_schedstate.spc_sched_info, l);
657 else
658 runqueue_enqueue(&global_queue, l);
659 }
660
661 /*
662 * XXXSMP When LWP dispatch (cpu_switch()) is changed to use sched_dequeue(),
663 * drop of the effective priority level from kernel to user needs to be
664 * moved here from userret(). The assignment in userret() is currently
665 * done unlocked.
666 */
667 void
668 sched_dequeue(struct lwp *l)
669 {
670
671 if ((l->l_flag & LW_BOUND) != 0)
672 runqueue_dequeue(l->l_cpu->ci_schedstate.spc_sched_info, l);
673 else
674 runqueue_dequeue(&global_queue, l);
675 }
676
677 struct lwp *
678 sched_nextlwp(void)
679 {
680 struct schedstate_percpu *spc;
681 runqueue_t *rq;
682 lwp_t *l1, *l2;
683
684 spc = &curcpu()->ci_schedstate;
685
686 /* For now, just pick the highest priority LWP. */
687 rq = spc->spc_sched_info;
688 l1 = NULL;
689 if (rq->rq_count != 0)
690 l1 = runqueue_nextlwp(rq);
691
692 rq = &global_queue;
693 if (__predict_false((spc->spc_flags & SPCF_OFFLINE) != 0) ||
694 rq->rq_count == 0)
695 return l1;
696 l2 = runqueue_nextlwp(rq);
697
698 if (l1 == NULL)
699 return l2;
700 if (l2 == NULL)
701 return l1;
702 if (lwp_eprio(l2) > lwp_eprio(l1))
703 return l2;
704 else
705 return l1;
706 }
707
708 struct cpu_info *
709 sched_takecpu(struct lwp *l)
710 {
711
712 return l->l_cpu;
713 }
714
715 void
716 sched_wakeup(struct lwp *l)
717 {
718
719 }
720
721 void
722 sched_slept(struct lwp *l)
723 {
724
725 }
726
727 void
728 sched_lwp_fork(struct lwp *l1, struct lwp *l2)
729 {
730
731 l2->l_estcpu = l1->l_estcpu;
732 }
733
734 void
735 sched_lwp_exit(struct lwp *l)
736 {
737
738 }
739
740 void
741 sched_lwp_collect(struct lwp *t)
742 {
743 lwp_t *l;
744
745 /* Absorb estcpu value of collected LWP. */
746 l = curlwp;
747 lwp_lock(l);
748 l->l_estcpu += t->l_estcpu;
749 lwp_unlock(l);
750 }
751
752 /*
753 * Sysctl nodes and initialization.
754 */
755
756 static int
757 sysctl_sched_rtts(SYSCTLFN_ARGS)
758 {
759 struct sysctlnode node;
760 int rttsms = hztoms(rrticks);
761
762 node = *rnode;
763 node.sysctl_data = &rttsms;
764 return sysctl_lookup(SYSCTLFN_CALL(&node));
765 }
766
767 SYSCTL_SETUP(sysctl_sched_setup, "sysctl kern.sched subtree setup")
768 {
769 const struct sysctlnode *node = NULL;
770
771 sysctl_createv(clog, 0, NULL, NULL,
772 CTLFLAG_PERMANENT,
773 CTLTYPE_NODE, "kern", NULL,
774 NULL, 0, NULL, 0,
775 CTL_KERN, CTL_EOL);
776 sysctl_createv(clog, 0, NULL, &node,
777 CTLFLAG_PERMANENT,
778 CTLTYPE_NODE, "sched",
779 SYSCTL_DESCR("Scheduler options"),
780 NULL, 0, NULL, 0,
781 CTL_KERN, CTL_CREATE, CTL_EOL);
782
783 KASSERT(node != NULL);
784
785 sysctl_createv(clog, 0, &node, NULL,
786 CTLFLAG_PERMANENT,
787 CTLTYPE_STRING, "name", NULL,
788 NULL, 0, __UNCONST("4.4BSD"), 0,
789 CTL_CREATE, CTL_EOL);
790 sysctl_createv(clog, 0, &node, NULL,
791 CTLFLAG_PERMANENT,
792 CTLTYPE_INT, "rtts",
793 SYSCTL_DESCR("Round-robin time quantum (in miliseconds)"),
794 sysctl_sched_rtts, 0, NULL, 0,
795 CTL_CREATE, CTL_EOL);
796 sysctl_createv(clog, 0, &node, NULL,
797 CTLFLAG_READWRITE,
798 CTLTYPE_INT, "timesoftints",
799 SYSCTL_DESCR("Track CPU time for soft interrupts"),
800 NULL, 0, &softint_timing, 0,
801 CTL_CREATE, CTL_EOL);
802 }
803
804 #if defined(DDB)
805 void
806 sched_print_runqueue(void (*pr)(const char *, ...))
807 {
808
809 runqueue_print(&global_queue, pr);
810 }
811 #endif /* defined(DDB) */
812