sched_4bsd.c revision 1.1.6.9 1 /* $NetBSD: sched_4bsd.c,v 1.1.6.9 2007/10/08 20:26:13 ad Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran, and
10 * Daniel Sieger.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the NetBSD
23 * Foundation, Inc. and its contributors.
24 * 4. Neither the name of The NetBSD Foundation nor the names of its
25 * contributors may be used to endorse or promote products derived
26 * from this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGE.
39 */
40
41 /*-
42 * Copyright (c) 1982, 1986, 1990, 1991, 1993
43 * The Regents of the University of California. All rights reserved.
44 * (c) UNIX System Laboratories, Inc.
45 * All or some portions of this file are derived from material licensed
46 * to the University of California by American Telephone and Telegraph
47 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
48 * the permission of UNIX System Laboratories, Inc.
49 *
50 * Redistribution and use in source and binary forms, with or without
51 * modification, are permitted provided that the following conditions
52 * are met:
53 * 1. Redistributions of source code must retain the above copyright
54 * notice, this list of conditions and the following disclaimer.
55 * 2. Redistributions in binary form must reproduce the above copyright
56 * notice, this list of conditions and the following disclaimer in the
57 * documentation and/or other materials provided with the distribution.
58 * 3. Neither the name of the University nor the names of its contributors
59 * may be used to endorse or promote products derived from this software
60 * without specific prior written permission.
61 *
62 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
63 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
66 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72 * SUCH DAMAGE.
73 *
74 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
75 */
76
77 #include <sys/cdefs.h>
78 __KERNEL_RCSID(0, "$NetBSD: sched_4bsd.c,v 1.1.6.9 2007/10/08 20:26:13 ad Exp $");
79
80 #include "opt_ddb.h"
81 #include "opt_lockdebug.h"
82 #include "opt_perfctrs.h"
83
84 #define __MUTEX_PRIVATE
85
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/callout.h>
89 #include <sys/cpu.h>
90 #include <sys/proc.h>
91 #include <sys/kernel.h>
92 #include <sys/signalvar.h>
93 #include <sys/resourcevar.h>
94 #include <sys/sched.h>
95 #include <sys/sysctl.h>
96 #include <sys/kauth.h>
97 #include <sys/lockdebug.h>
98 #include <sys/kmem.h>
99 #include <sys/intr.h>
100
101 #include <uvm/uvm_extern.h>
102
103 /*
104 * Run queues.
105 *
106 * We maintain bitmasks of non-empty queues in order speed up finding
107 * the first runnable process. Since there can be (by definition) few
108 * real time LWPs in the the system, we maintain them on a linked list,
109 * sorted by priority.
110 */
111
112 #define PPB_SHIFT 5
113 #define PPB_MASK 31
114
115 #define NUM_Q (NPRI_KERNEL + NPRI_USER)
116 #define NUM_PPB (1 << PPB_SHIFT)
117 #define NUM_B (NUM_Q / NUM_PPB)
118
119 typedef struct runqueue {
120 TAILQ_HEAD(, lwp) rq_queue[NUM_Q]; /* user+kernel */
121 TAILQ_HEAD(, lwp) rq_rt; /* realtime */
122 uint32_t rq_bitmap[NUM_B]; /* bitmap of queues */
123 u_int rq_count; /* total # jobs */
124 } runqueue_t;
125
126 static runqueue_t global_queue;
127
128 static void updatepri(struct lwp *);
129 static void resetpriority(struct lwp *);
130 static void resetprocpriority(struct proc *);
131
132 extern unsigned int sched_pstats_ticks; /* defined in kern_synch.c */
133
134 /* The global scheduler state */
135 kmutex_t sched_mutex;
136
137 /* Number of hardclock ticks per sched_tick() */
138 int rrticks;
139
140 const int schedppq = 1;
141
142 /*
143 * Force switch among equal priority processes every 100ms.
144 * Called from hardclock every hz/10 == rrticks hardclock ticks.
145 *
146 * There's no need to lock anywhere in this routine, as it's
147 * CPU-local and runs at IPL_SCHED (called from clock interrupt).
148 */
149 /* ARGSUSED */
150 void
151 sched_tick(struct cpu_info *ci)
152 {
153 struct schedstate_percpu *spc = &ci->ci_schedstate;
154
155 spc->spc_ticks = rrticks;
156
157 if (!CURCPU_IDLE_P()) {
158 if (spc->spc_flags & SPCF_SEENRR) {
159 /*
160 * The process has already been through a roundrobin
161 * without switching and may be hogging the CPU.
162 * Indicate that the process should yield.
163 */
164 spc->spc_flags |= SPCF_SHOULDYIELD;
165 } else
166 spc->spc_flags |= SPCF_SEENRR;
167 }
168 cpu_need_resched(ci, 0);
169 }
170
171 #define NICE_WEIGHT 1 /* priorities per nice level */
172
173 #define ESTCPU_SHIFT 11
174 #define ESTCPU_MAX ((NICE_WEIGHT * PRIO_MAX - 1) << ESTCPU_SHIFT)
175 #define ESTCPULIM(e) min((e), ESTCPU_MAX)
176
177 /*
178 * Constants for digital decay and forget:
179 * 90% of (p_estcpu) usage in 5 * loadav time
180 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive)
181 * Note that, as ps(1) mentions, this can let percentages
182 * total over 100% (I've seen 137.9% for 3 processes).
183 *
184 * Note that hardclock updates p_estcpu and p_cpticks independently.
185 *
186 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
187 * That is, the system wants to compute a value of decay such
188 * that the following for loop:
189 * for (i = 0; i < (5 * loadavg); i++)
190 * p_estcpu *= decay;
191 * will compute
192 * p_estcpu *= 0.1;
193 * for all values of loadavg:
194 *
195 * Mathematically this loop can be expressed by saying:
196 * decay ** (5 * loadavg) ~= .1
197 *
198 * The system computes decay as:
199 * decay = (2 * loadavg) / (2 * loadavg + 1)
200 *
201 * We wish to prove that the system's computation of decay
202 * will always fulfill the equation:
203 * decay ** (5 * loadavg) ~= .1
204 *
205 * If we compute b as:
206 * b = 2 * loadavg
207 * then
208 * decay = b / (b + 1)
209 *
210 * We now need to prove two things:
211 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
212 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
213 *
214 * Facts:
215 * For x close to zero, exp(x) =~ 1 + x, since
216 * exp(x) = 0! + x**1/1! + x**2/2! + ... .
217 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
218 * For x close to zero, ln(1+x) =~ x, since
219 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1
220 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
221 * ln(.1) =~ -2.30
222 *
223 * Proof of (1):
224 * Solve (factor)**(power) =~ .1 given power (5*loadav):
225 * solving for factor,
226 * ln(factor) =~ (-2.30/5*loadav), or
227 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
228 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED
229 *
230 * Proof of (2):
231 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
232 * solving for power,
233 * power*ln(b/(b+1)) =~ -2.30, or
234 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED
235 *
236 * Actual power values for the implemented algorithm are as follows:
237 * loadav: 1 2 3 4
238 * power: 5.68 10.32 14.94 19.55
239 */
240
241 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
242 #define loadfactor(loadav) (2 * (loadav))
243
244 static fixpt_t
245 decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
246 {
247
248 if (estcpu == 0) {
249 return 0;
250 }
251
252 #if !defined(_LP64)
253 /* avoid 64bit arithmetics. */
254 #define FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
255 if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
256 return estcpu * loadfac / (loadfac + FSCALE);
257 }
258 #endif /* !defined(_LP64) */
259
260 return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
261 }
262
263 /*
264 * For all load averages >= 1 and max p_estcpu of (255 << ESTCPU_SHIFT),
265 * sleeping for at least seven times the loadfactor will decay p_estcpu to
266 * less than (1 << ESTCPU_SHIFT).
267 *
268 * note that our ESTCPU_MAX is actually much smaller than (255 << ESTCPU_SHIFT).
269 */
270 static fixpt_t
271 decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n)
272 {
273
274 if ((n << FSHIFT) >= 7 * loadfac) {
275 return 0;
276 }
277
278 while (estcpu != 0 && n > 1) {
279 estcpu = decay_cpu(loadfac, estcpu);
280 n--;
281 }
282
283 return estcpu;
284 }
285
286 /*
287 * sched_pstats_hook:
288 *
289 * Periodically called from sched_pstats(); used to recalculate priorities.
290 */
291 void
292 sched_pstats_hook(struct proc *p, int minslp)
293 {
294 struct lwp *l;
295 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
296
297 /*
298 * If the process has slept the entire second,
299 * stop recalculating its priority until it wakes up.
300 */
301 if (minslp <= 1) {
302 p->p_estcpu = decay_cpu(loadfac, p->p_estcpu);
303
304 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
305 if ((l->l_flag & LW_IDLE) != 0)
306 continue;
307 lwp_lock(l);
308 if (l->l_slptime <= 1 && l->l_priority < PRI_KERNEL)
309 resetpriority(l);
310 lwp_unlock(l);
311 }
312 }
313 }
314
315 /*
316 * Recalculate the priority of a process after it has slept for a while.
317 */
318 static void
319 updatepri(struct lwp *l)
320 {
321 struct proc *p = l->l_proc;
322 fixpt_t loadfac;
323
324 KASSERT(lwp_locked(l, NULL));
325 KASSERT(l->l_slptime > 1);
326
327 loadfac = loadfactor(averunnable.ldavg[0]);
328
329 l->l_slptime--; /* the first time was done in sched_pstats */
330 /* XXX NJWLWP */
331 /* XXXSMP occasionally unlocked, should be per-LWP */
332 p->p_estcpu = decay_cpu_batch(loadfac, p->p_estcpu, l->l_slptime);
333 resetpriority(l);
334 }
335
336 /*
337 * The primitives that manipulate the run queues. whichqs tells which of
338 * the queues have processes in them. sched_enqueue() puts processes into
339 * queues, sched_dequeue() removes them from queues.
340 */
341 #ifdef RQDEBUG
342 static void
343 runqueue_check(const runqueue_t *rq, int whichq, struct lwp *l)
344 {
345 const subqueue_t * const sq = &rq->rq_subqueues[whichq];
346 const uint32_t bitmap = rq->rq_bitmap;
347 struct lwp *l2;
348 int found = 0;
349 int die = 0;
350 int empty = 1;
351 int j;
352
353 for (j = 0; j < PPQ; j++) {
354 TAILQ_FOREACH(l2, &sq->sq_queue[j], l_runq) {
355 if (l2->l_stat != LSRUN) {
356 printf("runqueue_check[%d]: lwp %p state (%d) "
357 " != LSRUN\n", whichq, l2, l2->l_stat);
358 }
359 if (l2 == l)
360 found = 1;
361 empty = 0;
362 }
363 }
364 if (empty && (bitmap & RQMASK(whichq)) != 0) {
365 printf("runqueue_check[%d]: bit set for empty run-queue %p\n",
366 whichq, rq);
367 die = 1;
368 } else if (!empty && (bitmap & RQMASK(whichq)) == 0) {
369 printf("runqueue_check[%d]: bit clear for non-empty "
370 "run-queue %p\n", whichq, rq);
371 die = 1;
372 }
373 if (l != NULL && (bitmap & RQMASK(whichq)) == 0) {
374 printf("runqueue_check[%d]: bit clear for active lwp %p\n",
375 whichq, l);
376 die = 1;
377 }
378 if (l != NULL && empty) {
379 printf("runqueue_check[%d]: empty run-queue %p with "
380 "active lwp %p\n", whichq, rq, l);
381 die = 1;
382 }
383 if (l != NULL && !found) {
384 printf("runqueue_check[%d]: lwp %p not in runqueue %p!",
385 whichq, l, rq);
386 die = 1;
387 }
388 if (die)
389 panic("runqueue_check: inconsistency found");
390 }
391 #else /* RQDEBUG */
392 #define runqueue_check(a, b, c) /* nothing */
393 #endif /* RQDEBUG */
394
395 static void
396 runqueue_init(runqueue_t *rq)
397 {
398 int i;
399
400 for (i = 0; i < NUM_Q; i++)
401 TAILQ_INIT(&rq->rq_queue[i]);
402 for (i = 0; i < NUM_B; i++)
403 rq->rq_bitmap[i] = 0;
404 TAILQ_INIT(&rq->rq_rt);
405 rq->rq_count = 0;
406 }
407
408 static void
409 runqueue_enqueue(runqueue_t *rq, struct lwp *l)
410 {
411 pri_t pri;
412 lwp_t *l2;
413
414 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
415
416 pri = lwp_eprio(l);
417 rq->rq_count++;
418
419 if (pri >= PRI_USER_RT) {
420 TAILQ_FOREACH(l2, &rq->rq_rt, l_runq) {
421 if (lwp_eprio(l2) < pri) {
422 TAILQ_INSERT_BEFORE(l2, l, l_runq);
423 return;
424 }
425 }
426 TAILQ_INSERT_TAIL(&rq->rq_rt, l, l_runq);
427 return;
428 }
429
430 runqueue_check(rq, pri, NULL);
431 rq->rq_bitmap[pri >> PPB_SHIFT] |=
432 (0x80000000 >> (pri & PPB_MASK));
433 TAILQ_INSERT_TAIL(&rq->rq_queue[pri], l, l_runq);
434 runqueue_check(rq, pri, l);
435 }
436
437 static void
438 runqueue_dequeue(runqueue_t *rq, struct lwp *l)
439 {
440 pri_t pri;
441
442 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
443
444 pri = lwp_eprio(l);
445 rq->rq_count--;
446
447 if (pri >= PRI_USER_RT) {
448 TAILQ_REMOVE(&rq->rq_rt, l, l_runq);
449 return;
450 }
451
452 runqueue_check(rq, pri, l);
453 TAILQ_REMOVE(&rq->rq_queue[pri], l, l_runq);
454 if (TAILQ_EMPTY(&rq->rq_queue[pri]))
455 rq->rq_bitmap[pri >> PPB_SHIFT] &=
456 ~(0x80000000 >> (pri & PPB_MASK));
457 runqueue_check(rq, pri, NULL);
458 }
459
460 static struct lwp *
461 runqueue_nextlwp(runqueue_t *rq)
462 {
463 pri_t pri;
464 int i;
465
466 KASSERT(rq->rq_count != 0);
467
468 if (!TAILQ_EMPTY(&rq->rq_rt))
469 return TAILQ_FIRST(&rq->rq_rt);
470
471 for (i = NUM_B - 1; i >= 0; i--) {
472 if (rq->rq_bitmap[i] != 0) {
473 pri = (32 - ffs(rq->rq_bitmap[i])) + i * NUM_PPB;
474 return TAILQ_FIRST(&rq->rq_queue[pri]);
475 }
476 }
477
478 panic("runqueue_nextlwp");
479 }
480
481 #if defined(DDB)
482 static void
483 runqueue_print(const runqueue_t *rq, void (*pr)(const char *, ...))
484 {
485 lwp_t *l;
486 int i;
487
488 TAILQ_FOREACH(l, &rq->rq_rt, l_runq) {
489 (*pr)("\t%d.%d (%s) pri=%d usrpri=%d\n",
490 l->l_proc->p_pid, l->l_lid, l->l_proc->p_comm,
491 (int)l->l_priority, (int)l->l_usrpri);
492 }
493
494 for (i = NUM_Q - 1; i >= 0; i--) {
495 TAILQ_FOREACH(l, &rq->rq_queue[i], l_runq) {
496 (*pr)("\t%d.%d (%s) pri=%d usrpri=%d\n",
497 l->l_proc->p_pid, l->l_lid, l->l_proc->p_comm,
498 (int)l->l_priority, (int)l->l_usrpri);
499 }
500 }
501 }
502 #endif /* defined(DDB) */
503
504 /*
505 * Initialize the (doubly-linked) run queues
506 * to be empty.
507 */
508 void
509 sched_rqinit()
510 {
511
512 runqueue_init(&global_queue);
513 mutex_init(&sched_mutex, MUTEX_SPIN, IPL_SCHED);
514 /* Initialize the lock pointer for lwp0 */
515 lwp0.l_mutex = &curcpu()->ci_schedstate.spc_lwplock;
516 }
517
518 void
519 sched_cpuattach(struct cpu_info *ci)
520 {
521 runqueue_t *rq;
522
523 ci->ci_schedstate.spc_mutex = &sched_mutex;
524 rq = kmem_zalloc(sizeof(*rq), KM_NOSLEEP);
525 runqueue_init(rq);
526 ci->ci_schedstate.spc_sched_info = rq;
527 }
528
529 void
530 sched_setup()
531 {
532
533 rrticks = hz / 10;
534 }
535
536 void
537 sched_setrunnable(struct lwp *l)
538 {
539
540 if (l->l_slptime > 1)
541 updatepri(l);
542 }
543
544 bool
545 sched_curcpu_runnable_p(void)
546 {
547 struct schedstate_percpu *spc;
548 runqueue_t *rq;
549
550 spc = &curcpu()->ci_schedstate;
551 rq = spc->spc_sched_info;
552
553 if (__predict_true((spc->spc_flags & SPCF_OFFLINE) == 0))
554 return (global_queue.rq_count | rq->rq_count) != 0;
555 return rq->rq_count != 0;
556 }
557
558 void
559 sched_nice(struct proc *chgp, int n)
560 {
561
562 chgp->p_nice = n;
563 (void)resetprocpriority(chgp);
564 }
565
566 /*
567 * Compute the priority of a process when running in user mode.
568 * Arrange to reschedule if the resulting priority is better
569 * than that of the current process.
570 */
571 static void
572 resetpriority(struct lwp *l)
573 {
574 unsigned int newpriority;
575 struct proc *p = l->l_proc;
576
577 /* XXXSMP KASSERT(mutex_owned(&p->p_stmutex)); */
578 KASSERT(lwp_locked(l, NULL));
579
580 if ((l->l_flag & LW_SYSTEM) != 0)
581 return;
582
583 newpriority = PRI_KERNEL - 1 - (p->p_estcpu >> ESTCPU_SHIFT) -
584 NICE_WEIGHT * (p->p_nice - NZERO);
585 newpriority = max(newpriority, 0);
586 lwp_changepri(l, newpriority);
587 }
588
589 /*
590 * Recompute priority for all LWPs in a process.
591 */
592 static void
593 resetprocpriority(struct proc *p)
594 {
595 struct lwp *l;
596
597 KASSERT(mutex_owned(&p->p_stmutex));
598
599 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
600 lwp_lock(l);
601 resetpriority(l);
602 lwp_unlock(l);
603 }
604 }
605
606 /*
607 * We adjust the priority of the current process. The priority of a process
608 * gets worse as it accumulates CPU time. The CPU usage estimator (p_estcpu)
609 * is increased here. The formula for computing priorities (in kern_synch.c)
610 * will compute a different value each time p_estcpu increases. This can
611 * cause a switch, but unless the priority crosses a PPQ boundary the actual
612 * queue will not change. The CPU usage estimator ramps up quite quickly
613 * when the process is running (linearly), and decays away exponentially, at
614 * a rate which is proportionally slower when the system is busy. The basic
615 * principle is that the system will 90% forget that the process used a lot
616 * of CPU time in 5 * loadav seconds. This causes the system to favor
617 * processes which haven't run much recently, and to round-robin among other
618 * processes.
619 */
620
621 void
622 sched_schedclock(struct lwp *l)
623 {
624 struct proc *p = l->l_proc;
625
626 KASSERT(!CURCPU_IDLE_P());
627 mutex_spin_enter(&p->p_stmutex);
628 p->p_estcpu = ESTCPULIM(p->p_estcpu + (1 << ESTCPU_SHIFT));
629 lwp_lock(l);
630 resetpriority(l);
631 mutex_spin_exit(&p->p_stmutex);
632 if ((l->l_flag & LW_SYSTEM) == 0 && l->l_priority < PRI_KERNEL)
633 l->l_priority = l->l_usrpri;
634 lwp_unlock(l);
635 }
636
637 /*
638 * sched_proc_fork:
639 *
640 * Inherit the parent's scheduler history.
641 */
642 void
643 sched_proc_fork(struct proc *parent, struct proc *child)
644 {
645
646 KASSERT(mutex_owned(&parent->p_smutex));
647
648 child->p_estcpu = child->p_estcpu_inherited = parent->p_estcpu;
649 child->p_forktime = sched_pstats_ticks;
650 }
651
652 /*
653 * sched_proc_exit:
654 *
655 * Chargeback parents for the sins of their children.
656 */
657 void
658 sched_proc_exit(struct proc *parent, struct proc *child)
659 {
660 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
661 fixpt_t estcpu;
662
663 /* XXX Only if parent != init?? */
664
665 mutex_spin_enter(&parent->p_stmutex);
666 estcpu = decay_cpu_batch(loadfac, child->p_estcpu_inherited,
667 sched_pstats_ticks - child->p_forktime);
668 if (child->p_estcpu > estcpu)
669 parent->p_estcpu =
670 ESTCPULIM(parent->p_estcpu + child->p_estcpu - estcpu);
671 mutex_spin_exit(&parent->p_stmutex);
672 }
673
674 void
675 sched_enqueue(struct lwp *l, bool ctxswitch)
676 {
677
678 if ((l->l_flag & LW_BOUND) != 0)
679 runqueue_enqueue(l->l_cpu->ci_schedstate.spc_sched_info, l);
680 else
681 runqueue_enqueue(&global_queue, l);
682 }
683
684 /*
685 * XXXSMP When LWP dispatch (cpu_switch()) is changed to use sched_dequeue(),
686 * drop of the effective priority level from kernel to user needs to be
687 * moved here from userret(). The assignment in userret() is currently
688 * done unlocked.
689 */
690 void
691 sched_dequeue(struct lwp *l)
692 {
693
694 if ((l->l_flag & LW_BOUND) != 0)
695 runqueue_dequeue(l->l_cpu->ci_schedstate.spc_sched_info, l);
696 else
697 runqueue_dequeue(&global_queue, l);
698 }
699
700 struct lwp *
701 sched_nextlwp(void)
702 {
703 struct schedstate_percpu *spc;
704 runqueue_t *rq;
705 lwp_t *l1, *l2;
706
707 spc = &curcpu()->ci_schedstate;
708
709 /* For now, just pick the highest priority LWP. */
710 rq = spc->spc_sched_info;
711 l1 = NULL;
712 if (rq->rq_count != 0)
713 l1 = runqueue_nextlwp(rq);
714
715 rq = &global_queue;
716 if (__predict_false((spc->spc_flags & SPCF_OFFLINE) != 0) ||
717 rq->rq_count == 0)
718 return l1;
719 l2 = runqueue_nextlwp(rq);
720
721 if (l1 == NULL)
722 return l2;
723 if (l2 == NULL)
724 return l1;
725 if (lwp_eprio(l2) > lwp_eprio(l1))
726 return l2;
727 else
728 return l1;
729 }
730
731 void
732 sched_lwp_fork(struct lwp *l)
733 {
734
735 }
736
737 void
738 sched_lwp_exit(struct lwp *l)
739 {
740
741 }
742
743 /*
744 * sysctl setup. XXX This should be split with kern_synch.c.
745 */
746 SYSCTL_SETUP(sysctl_sched_setup, "sysctl kern.sched subtree setup")
747 {
748 const struct sysctlnode *node = NULL;
749
750 sysctl_createv(clog, 0, NULL, NULL,
751 CTLFLAG_PERMANENT,
752 CTLTYPE_NODE, "kern", NULL,
753 NULL, 0, NULL, 0,
754 CTL_KERN, CTL_EOL);
755 sysctl_createv(clog, 0, NULL, &node,
756 CTLFLAG_PERMANENT,
757 CTLTYPE_NODE, "sched",
758 SYSCTL_DESCR("Scheduler options"),
759 NULL, 0, NULL, 0,
760 CTL_KERN, CTL_CREATE, CTL_EOL);
761
762 KASSERT(node != NULL);
763
764 sysctl_createv(clog, 0, &node, NULL,
765 CTLFLAG_PERMANENT,
766 CTLTYPE_STRING, "name", NULL,
767 NULL, 0, __UNCONST("4.4BSD"), 0,
768 CTL_CREATE, CTL_EOL);
769 sysctl_createv(clog, 0, &node, NULL,
770 CTLFLAG_READWRITE,
771 CTLTYPE_INT, "timesoftints",
772 SYSCTL_DESCR("Track CPU time for soft interrupts"),
773 NULL, 0, &softint_timing, 0,
774 CTL_CREATE, CTL_EOL);
775 }
776
777 #if defined(DDB)
778 void
779 sched_print_runqueue(void (*pr)(const char *, ...))
780 {
781
782 runqueue_print(&global_queue, pr);
783 }
784 #endif /* defined(DDB) */
785