sched_4bsd.c revision 1.7 1 /* $NetBSD: sched_4bsd.c,v 1.7 2007/10/10 21:24:53 rmind Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran, and
10 * Daniel Sieger.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the NetBSD
23 * Foundation, Inc. and its contributors.
24 * 4. Neither the name of The NetBSD Foundation nor the names of its
25 * contributors may be used to endorse or promote products derived
26 * from this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGE.
39 */
40
41 /*-
42 * Copyright (c) 1982, 1986, 1990, 1991, 1993
43 * The Regents of the University of California. All rights reserved.
44 * (c) UNIX System Laboratories, Inc.
45 * All or some portions of this file are derived from material licensed
46 * to the University of California by American Telephone and Telegraph
47 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
48 * the permission of UNIX System Laboratories, Inc.
49 *
50 * Redistribution and use in source and binary forms, with or without
51 * modification, are permitted provided that the following conditions
52 * are met:
53 * 1. Redistributions of source code must retain the above copyright
54 * notice, this list of conditions and the following disclaimer.
55 * 2. Redistributions in binary form must reproduce the above copyright
56 * notice, this list of conditions and the following disclaimer in the
57 * documentation and/or other materials provided with the distribution.
58 * 3. Neither the name of the University nor the names of its contributors
59 * may be used to endorse or promote products derived from this software
60 * without specific prior written permission.
61 *
62 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
63 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
66 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72 * SUCH DAMAGE.
73 *
74 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
75 */
76
77 #include <sys/cdefs.h>
78 __KERNEL_RCSID(0, "$NetBSD: sched_4bsd.c,v 1.7 2007/10/10 21:24:53 rmind Exp $");
79
80 #include "opt_ddb.h"
81 #include "opt_lockdebug.h"
82 #include "opt_perfctrs.h"
83
84 #define __MUTEX_PRIVATE
85
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/callout.h>
89 #include <sys/cpu.h>
90 #include <sys/proc.h>
91 #include <sys/kernel.h>
92 #include <sys/signalvar.h>
93 #include <sys/resourcevar.h>
94 #include <sys/sched.h>
95 #include <sys/sysctl.h>
96 #include <sys/kauth.h>
97 #include <sys/lockdebug.h>
98 #include <sys/kmem.h>
99 #include <sys/intr.h>
100
101 #include <uvm/uvm_extern.h>
102
103 /*
104 * Run queues.
105 *
106 * We have 32 run queues in descending priority of 0..31. We maintain
107 * a bitmask of non-empty queues in order speed up finding the first
108 * runnable process. The bitmask is maintained only by machine-dependent
109 * code, allowing the most efficient instructions to be used to find the
110 * first non-empty queue.
111 */
112
113 #define RUNQUE_NQS 32 /* number of runqueues */
114 #define PPQ (128 / RUNQUE_NQS) /* priorities per queue */
115
116 typedef struct subqueue {
117 TAILQ_HEAD(, lwp) sq_queue;
118 } subqueue_t;
119 typedef struct runqueue {
120 subqueue_t rq_subqueues[RUNQUE_NQS]; /* run queues */
121 uint32_t rq_bitmap; /* bitmap of non-empty queues */
122 } runqueue_t;
123 static runqueue_t global_queue;
124
125 static void updatepri(struct lwp *);
126 static void resetpriority(struct lwp *);
127 static void resetprocpriority(struct proc *);
128
129 fixpt_t decay_cpu(fixpt_t, fixpt_t);
130
131 extern unsigned int sched_pstats_ticks; /* defined in kern_synch.c */
132
133 /* The global scheduler state */
134 kmutex_t sched_mutex;
135
136 /* Number of hardclock ticks per sched_tick() */
137 int rrticks;
138
139 /*
140 * Force switch among equal priority processes every 100ms.
141 * Called from hardclock every hz/10 == rrticks hardclock ticks.
142 *
143 * There's no need to lock anywhere in this routine, as it's
144 * CPU-local and runs at IPL_SCHED (called from clock interrupt).
145 */
146 /* ARGSUSED */
147 void
148 sched_tick(struct cpu_info *ci)
149 {
150 struct schedstate_percpu *spc = &ci->ci_schedstate;
151
152 spc->spc_ticks = rrticks;
153
154 if (CURCPU_IDLE_P())
155 return;
156
157 if (spc->spc_flags & SPCF_SEENRR) {
158 /*
159 * The process has already been through a roundrobin
160 * without switching and may be hogging the CPU.
161 * Indicate that the process should yield.
162 */
163 spc->spc_flags |= SPCF_SHOULDYIELD;
164 } else
165 spc->spc_flags |= SPCF_SEENRR;
166
167 cpu_need_resched(ci, 0);
168 }
169
170 #define NICE_WEIGHT 2 /* priorities per nice level */
171
172 #define ESTCPU_SHIFT 11
173 #define ESTCPU_MAX ((NICE_WEIGHT * PRIO_MAX - PPQ) << ESTCPU_SHIFT)
174 #define ESTCPULIM(e) min((e), ESTCPU_MAX)
175
176 /*
177 * Constants for digital decay and forget:
178 * 90% of (p_estcpu) usage in 5 * loadav time
179 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive)
180 * Note that, as ps(1) mentions, this can let percentages
181 * total over 100% (I've seen 137.9% for 3 processes).
182 *
183 * Note that hardclock updates p_estcpu and p_cpticks independently.
184 *
185 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
186 * That is, the system wants to compute a value of decay such
187 * that the following for loop:
188 * for (i = 0; i < (5 * loadavg); i++)
189 * p_estcpu *= decay;
190 * will compute
191 * p_estcpu *= 0.1;
192 * for all values of loadavg:
193 *
194 * Mathematically this loop can be expressed by saying:
195 * decay ** (5 * loadavg) ~= .1
196 *
197 * The system computes decay as:
198 * decay = (2 * loadavg) / (2 * loadavg + 1)
199 *
200 * We wish to prove that the system's computation of decay
201 * will always fulfill the equation:
202 * decay ** (5 * loadavg) ~= .1
203 *
204 * If we compute b as:
205 * b = 2 * loadavg
206 * then
207 * decay = b / (b + 1)
208 *
209 * We now need to prove two things:
210 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
211 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
212 *
213 * Facts:
214 * For x close to zero, exp(x) =~ 1 + x, since
215 * exp(x) = 0! + x**1/1! + x**2/2! + ... .
216 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
217 * For x close to zero, ln(1+x) =~ x, since
218 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1
219 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
220 * ln(.1) =~ -2.30
221 *
222 * Proof of (1):
223 * Solve (factor)**(power) =~ .1 given power (5*loadav):
224 * solving for factor,
225 * ln(factor) =~ (-2.30/5*loadav), or
226 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
227 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED
228 *
229 * Proof of (2):
230 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
231 * solving for power,
232 * power*ln(b/(b+1)) =~ -2.30, or
233 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED
234 *
235 * Actual power values for the implemented algorithm are as follows:
236 * loadav: 1 2 3 4
237 * power: 5.68 10.32 14.94 19.55
238 */
239
240 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
241 #define loadfactor(loadav) (2 * (loadav))
242
243 fixpt_t
244 decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
245 {
246
247 if (estcpu == 0) {
248 return 0;
249 }
250
251 #if !defined(_LP64)
252 /* avoid 64bit arithmetics. */
253 #define FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
254 if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
255 return estcpu * loadfac / (loadfac + FSCALE);
256 }
257 #endif /* !defined(_LP64) */
258
259 return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
260 }
261
262 /*
263 * For all load averages >= 1 and max p_estcpu of (255 << ESTCPU_SHIFT),
264 * sleeping for at least seven times the loadfactor will decay p_estcpu to
265 * less than (1 << ESTCPU_SHIFT).
266 *
267 * note that our ESTCPU_MAX is actually much smaller than (255 << ESTCPU_SHIFT).
268 */
269 static fixpt_t
270 decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n)
271 {
272
273 if ((n << FSHIFT) >= 7 * loadfac) {
274 return 0;
275 }
276
277 while (estcpu != 0 && n > 1) {
278 estcpu = decay_cpu(loadfac, estcpu);
279 n--;
280 }
281
282 return estcpu;
283 }
284
285 /*
286 * sched_pstats_hook:
287 *
288 * Periodically called from sched_pstats(); used to recalculate priorities.
289 */
290 void
291 sched_pstats_hook(struct lwp *l)
292 {
293
294 if (l->l_slptime <= 1 && l->l_priority >= PUSER)
295 resetpriority(l);
296 }
297
298 /*
299 * Recalculate the priority of a process after it has slept for a while.
300 */
301 static void
302 updatepri(struct lwp *l)
303 {
304 struct proc *p = l->l_proc;
305 fixpt_t loadfac;
306
307 KASSERT(lwp_locked(l, NULL));
308 KASSERT(l->l_slptime > 1);
309
310 loadfac = loadfactor(averunnable.ldavg[0]);
311
312 l->l_slptime--; /* the first time was done in sched_pstats */
313 /* XXX NJWLWP */
314 /* XXXSMP occasionally unlocked, should be per-LWP */
315 p->p_estcpu = decay_cpu_batch(loadfac, p->p_estcpu, l->l_slptime);
316 resetpriority(l);
317 }
318
319 /*
320 * On some architectures, it's faster to use a MSB ordering for the priorites
321 * than the traditional LSB ordering.
322 */
323 #define RQMASK(n) (0x00000001 << (n))
324
325 /*
326 * The primitives that manipulate the run queues. whichqs tells which
327 * of the 32 queues qs have processes in them. sched_enqueue() puts processes
328 * into queues, sched_dequeue removes them from queues. The running process is
329 * on no queue, other processes are on a queue related to p->p_priority,
330 * divided by 4 actually to shrink the 0-127 range of priorities into the 32
331 * available queues.
332 */
333 #ifdef RQDEBUG
334 static void
335 runqueue_check(const runqueue_t *rq, int whichq, struct lwp *l)
336 {
337 const subqueue_t * const sq = &rq->rq_subqueues[whichq];
338 const uint32_t bitmap = rq->rq_bitmap;
339 struct lwp *l2;
340 int found = 0;
341 int die = 0;
342 int empty = 1;
343
344 TAILQ_FOREACH(l2, &sq->sq_queue, l_runq) {
345 if (l2->l_stat != LSRUN) {
346 printf("runqueue_check[%d]: lwp %p state (%d) "
347 " != LSRUN\n", whichq, l2, l2->l_stat);
348 }
349 if (l2 == l)
350 found = 1;
351 empty = 0;
352 }
353 if (empty && (bitmap & RQMASK(whichq)) != 0) {
354 printf("runqueue_check[%d]: bit set for empty run-queue %p\n",
355 whichq, rq);
356 die = 1;
357 } else if (!empty && (bitmap & RQMASK(whichq)) == 0) {
358 printf("runqueue_check[%d]: bit clear for non-empty "
359 "run-queue %p\n", whichq, rq);
360 die = 1;
361 }
362 if (l != NULL && (bitmap & RQMASK(whichq)) == 0) {
363 printf("runqueue_check[%d]: bit clear for active lwp %p\n",
364 whichq, l);
365 die = 1;
366 }
367 if (l != NULL && empty) {
368 printf("runqueue_check[%d]: empty run-queue %p with "
369 "active lwp %p\n", whichq, rq, l);
370 die = 1;
371 }
372 if (l != NULL && !found) {
373 printf("runqueue_check[%d]: lwp %p not in runqueue %p!",
374 whichq, l, rq);
375 die = 1;
376 }
377 if (die)
378 panic("runqueue_check: inconsistency found");
379 }
380 #else /* RQDEBUG */
381 #define runqueue_check(a, b, c) /* nothing */
382 #endif /* RQDEBUG */
383
384 static void
385 runqueue_init(runqueue_t *rq)
386 {
387 int i;
388
389 for (i = 0; i < RUNQUE_NQS; i++)
390 TAILQ_INIT(&rq->rq_subqueues[i].sq_queue);
391 }
392
393 static void
394 runqueue_enqueue(runqueue_t *rq, struct lwp *l)
395 {
396 subqueue_t *sq;
397 const int whichq = lwp_eprio(l) / PPQ;
398
399 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
400
401 runqueue_check(rq, whichq, NULL);
402 rq->rq_bitmap |= RQMASK(whichq);
403 sq = &rq->rq_subqueues[whichq];
404 TAILQ_INSERT_TAIL(&sq->sq_queue, l, l_runq);
405 runqueue_check(rq, whichq, l);
406 }
407
408 static void
409 runqueue_dequeue(runqueue_t *rq, struct lwp *l)
410 {
411 subqueue_t *sq;
412 const int whichq = lwp_eprio(l) / PPQ;
413
414 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
415
416 runqueue_check(rq, whichq, l);
417 KASSERT((rq->rq_bitmap & RQMASK(whichq)) != 0);
418 sq = &rq->rq_subqueues[whichq];
419 TAILQ_REMOVE(&sq->sq_queue, l, l_runq);
420 if (TAILQ_EMPTY(&sq->sq_queue))
421 rq->rq_bitmap &= ~RQMASK(whichq);
422 runqueue_check(rq, whichq, NULL);
423 }
424
425 static struct lwp *
426 runqueue_nextlwp(runqueue_t *rq)
427 {
428 const uint32_t bitmap = rq->rq_bitmap;
429 int whichq;
430
431 if (bitmap == 0) {
432 return NULL;
433 }
434 whichq = ffs(bitmap) - 1;
435 return TAILQ_FIRST(&rq->rq_subqueues[whichq].sq_queue);
436 }
437
438 #if defined(DDB)
439 static void
440 runqueue_print(const runqueue_t *rq, void (*pr)(const char *, ...))
441 {
442 const uint32_t bitmap = rq->rq_bitmap;
443 struct lwp *l;
444 int i, first;
445
446 for (i = 0; i < RUNQUE_NQS; i++) {
447 const subqueue_t *sq;
448 first = 1;
449 sq = &rq->rq_subqueues[i];
450 TAILQ_FOREACH(l, &sq->sq_queue, l_runq) {
451 if (first) {
452 (*pr)("%c%d",
453 (bitmap & RQMASK(i)) ? ' ' : '!', i);
454 first = 0;
455 }
456 (*pr)("\t%d.%d (%s) pri=%d usrpri=%d\n",
457 l->l_proc->p_pid,
458 l->l_lid, l->l_proc->p_comm,
459 (int)l->l_priority, (int)l->l_usrpri);
460 }
461 }
462 }
463 #endif /* defined(DDB) */
464 #undef RQMASK
465
466 /*
467 * Initialize the (doubly-linked) run queues
468 * to be empty.
469 */
470 void
471 sched_rqinit()
472 {
473
474 runqueue_init(&global_queue);
475 mutex_init(&sched_mutex, MUTEX_SPIN, IPL_SCHED);
476 /* Initialize the lock pointer for lwp0 */
477 lwp0.l_mutex = &curcpu()->ci_schedstate.spc_lwplock;
478 }
479
480 void
481 sched_cpuattach(struct cpu_info *ci)
482 {
483 runqueue_t *rq;
484
485 ci->ci_schedstate.spc_mutex = &sched_mutex;
486 rq = kmem_zalloc(sizeof(*rq), KM_NOSLEEP);
487 runqueue_init(rq);
488 ci->ci_schedstate.spc_sched_info = rq;
489 }
490
491 void
492 sched_setup()
493 {
494
495 rrticks = hz / 10;
496 }
497
498 void
499 sched_setrunnable(struct lwp *l)
500 {
501
502 if (l->l_slptime > 1)
503 updatepri(l);
504 }
505
506 bool
507 sched_curcpu_runnable_p(void)
508 {
509 struct schedstate_percpu *spc;
510 runqueue_t *rq;
511
512 spc = &curcpu()->ci_schedstate;
513 rq = spc->spc_sched_info;
514
515 if (__predict_true((spc->spc_flags & SPCF_OFFLINE) == 0))
516 return (global_queue.rq_bitmap | rq->rq_bitmap) != 0;
517 return rq->rq_bitmap != 0;
518 }
519
520 void
521 sched_nice(struct proc *chgp, int n)
522 {
523
524 chgp->p_nice = n;
525 (void)resetprocpriority(chgp);
526 }
527
528 /*
529 * Compute the priority of a process when running in user mode.
530 * Arrange to reschedule if the resulting priority is better
531 * than that of the current process.
532 */
533 static void
534 resetpriority(struct lwp *l)
535 {
536 unsigned int newpriority;
537 struct proc *p = l->l_proc;
538
539 /* XXXSMP LOCK_ASSERT(mutex_owned(&p->p_stmutex)); */
540 LOCK_ASSERT(lwp_locked(l, NULL));
541
542 if ((l->l_flag & LW_SYSTEM) != 0)
543 return;
544
545 newpriority = PUSER + (p->p_estcpu >> ESTCPU_SHIFT) +
546 NICE_WEIGHT * (p->p_nice - NZERO);
547 newpriority = min(newpriority, MAXPRI);
548 lwp_changepri(l, newpriority);
549 }
550
551 /*
552 * Recompute priority for all LWPs in a process.
553 */
554 static void
555 resetprocpriority(struct proc *p)
556 {
557 struct lwp *l;
558
559 KASSERT(mutex_owned(&p->p_stmutex));
560
561 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
562 lwp_lock(l);
563 resetpriority(l);
564 lwp_unlock(l);
565 }
566 }
567
568 /*
569 * We adjust the priority of the current process. The priority of a process
570 * gets worse as it accumulates CPU time. The CPU usage estimator (p_estcpu)
571 * is increased here. The formula for computing priorities (in kern_synch.c)
572 * will compute a different value each time p_estcpu increases. This can
573 * cause a switch, but unless the priority crosses a PPQ boundary the actual
574 * queue will not change. The CPU usage estimator ramps up quite quickly
575 * when the process is running (linearly), and decays away exponentially, at
576 * a rate which is proportionally slower when the system is busy. The basic
577 * principle is that the system will 90% forget that the process used a lot
578 * of CPU time in 5 * loadav seconds. This causes the system to favor
579 * processes which haven't run much recently, and to round-robin among other
580 * processes.
581 */
582
583 void
584 sched_schedclock(struct lwp *l)
585 {
586 struct proc *p = l->l_proc;
587
588 KASSERT(!CURCPU_IDLE_P());
589 mutex_spin_enter(&p->p_stmutex);
590 p->p_estcpu = ESTCPULIM(p->p_estcpu + (1 << ESTCPU_SHIFT));
591 lwp_lock(l);
592 resetpriority(l);
593 mutex_spin_exit(&p->p_stmutex);
594 if ((l->l_flag & LW_SYSTEM) == 0 && l->l_priority >= PUSER)
595 l->l_priority = l->l_usrpri;
596 lwp_unlock(l);
597 }
598
599 /*
600 * sched_proc_fork:
601 *
602 * Inherit the parent's scheduler history.
603 */
604 void
605 sched_proc_fork(struct proc *parent, struct proc *child)
606 {
607
608 KASSERT(mutex_owned(&parent->p_smutex));
609
610 child->p_estcpu = child->p_estcpu_inherited = parent->p_estcpu;
611 child->p_forktime = sched_pstats_ticks;
612 }
613
614 /*
615 * sched_proc_exit:
616 *
617 * Chargeback parents for the sins of their children.
618 */
619 void
620 sched_proc_exit(struct proc *parent, struct proc *child)
621 {
622 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
623 fixpt_t estcpu;
624
625 /* XXX Only if parent != init?? */
626
627 mutex_spin_enter(&parent->p_stmutex);
628 estcpu = decay_cpu_batch(loadfac, child->p_estcpu_inherited,
629 sched_pstats_ticks - child->p_forktime);
630 if (child->p_estcpu > estcpu)
631 parent->p_estcpu =
632 ESTCPULIM(parent->p_estcpu + child->p_estcpu - estcpu);
633 mutex_spin_exit(&parent->p_stmutex);
634 }
635
636 void
637 sched_enqueue(struct lwp *l, bool ctxswitch)
638 {
639
640 if ((l->l_flag & LW_BOUND) != 0)
641 runqueue_enqueue(l->l_cpu->ci_schedstate.spc_sched_info, l);
642 else
643 runqueue_enqueue(&global_queue, l);
644 }
645
646 /*
647 * XXXSMP When LWP dispatch (cpu_switch()) is changed to use sched_dequeue(),
648 * drop of the effective priority level from kernel to user needs to be
649 * moved here from userret(). The assignment in userret() is currently
650 * done unlocked.
651 */
652 void
653 sched_dequeue(struct lwp *l)
654 {
655
656 if ((l->l_flag & LW_BOUND) != 0)
657 runqueue_dequeue(l->l_cpu->ci_schedstate.spc_sched_info, l);
658 else
659 runqueue_dequeue(&global_queue, l);
660 }
661
662 struct lwp *
663 sched_nextlwp(void)
664 {
665 struct schedstate_percpu *spc;
666 lwp_t *l1, *l2;
667
668 spc = &curcpu()->ci_schedstate;
669
670 /* For now, just pick the highest priority LWP. */
671 l1 = runqueue_nextlwp(spc->spc_sched_info);
672 if (__predict_false((spc->spc_flags & SPCF_OFFLINE) != 0))
673 return l1;
674 l2 = runqueue_nextlwp(&global_queue);
675
676 if (l1 == NULL)
677 return l2;
678 if (l2 == NULL)
679 return l1;
680 if (lwp_eprio(l2) < lwp_eprio(l1))
681 return l2;
682 else
683 return l1;
684 }
685
686 /*
687 * Dummy.
688 */
689
690 struct cpu_info *
691 sched_takecpu(struct lwp *l)
692 {
693
694 return l->l_cpu;
695 }
696
697 void
698 sched_wakeup(struct lwp *l)
699 {
700
701 }
702
703 void
704 sched_slept(struct lwp *l)
705 {
706
707 }
708
709 void
710 sched_lwp_fork(struct lwp *l)
711 {
712
713 }
714
715 void
716 sched_lwp_exit(struct lwp *l)
717 {
718
719 }
720
721 /*
722 * sysctl setup. XXX This should be split with kern_synch.c.
723 */
724 SYSCTL_SETUP(sysctl_sched_setup, "sysctl kern.sched subtree setup")
725 {
726 const struct sysctlnode *node = NULL;
727
728 sysctl_createv(clog, 0, NULL, NULL,
729 CTLFLAG_PERMANENT,
730 CTLTYPE_NODE, "kern", NULL,
731 NULL, 0, NULL, 0,
732 CTL_KERN, CTL_EOL);
733 sysctl_createv(clog, 0, NULL, &node,
734 CTLFLAG_PERMANENT,
735 CTLTYPE_NODE, "sched",
736 SYSCTL_DESCR("Scheduler options"),
737 NULL, 0, NULL, 0,
738 CTL_KERN, CTL_CREATE, CTL_EOL);
739
740 KASSERT(node != NULL);
741
742 sysctl_createv(clog, 0, &node, NULL,
743 CTLFLAG_PERMANENT,
744 CTLTYPE_STRING, "name", NULL,
745 NULL, 0, __UNCONST("4.4BSD"), 0,
746 CTL_CREATE, CTL_EOL);
747 sysctl_createv(clog, 0, &node, NULL,
748 CTLFLAG_READWRITE,
749 CTLTYPE_INT, "timesoftints",
750 SYSCTL_DESCR("Track CPU time for soft interrupts"),
751 NULL, 0, &softint_timing, 0,
752 CTL_CREATE, CTL_EOL);
753 }
754
755 #if defined(DDB)
756 void
757 sched_print_runqueue(void (*pr)(const char *, ...))
758 {
759
760 runqueue_print(&global_queue, pr);
761 }
762 #endif /* defined(DDB) */
763