sched_4bsd.c revision 1.33 1 /* $NetBSD: sched_4bsd.c,v 1.33 2017/07/14 13:23:48 maxv Exp $ */
2
3 /*
4 * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran, and
10 * Daniel Sieger.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * Copyright (c) 1982, 1986, 1990, 1991, 1993
36 * The Regents of the University of California. All rights reserved.
37 * (c) UNIX System Laboratories, Inc.
38 * All or some portions of this file are derived from material licensed
39 * to the University of California by American Telephone and Telegraph
40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
41 * the permission of UNIX System Laboratories, Inc.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. Neither the name of the University nor the names of its contributors
52 * may be used to endorse or promote products derived from this software
53 * without specific prior written permission.
54 *
55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 * SUCH DAMAGE.
66 *
67 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
68 */
69
70 #include <sys/cdefs.h>
71 __KERNEL_RCSID(0, "$NetBSD: sched_4bsd.c,v 1.33 2017/07/14 13:23:48 maxv Exp $");
72
73 #include "opt_ddb.h"
74 #include "opt_lockdebug.h"
75 #include "opt_perfctrs.h"
76
77 #include <sys/param.h>
78 #include <sys/systm.h>
79 #include <sys/callout.h>
80 #include <sys/cpu.h>
81 #include <sys/proc.h>
82 #include <sys/kernel.h>
83 #include <sys/resourcevar.h>
84 #include <sys/sched.h>
85 #include <sys/sysctl.h>
86 #include <sys/lockdebug.h>
87 #include <sys/intr.h>
88
89 static void updatepri(struct lwp *);
90 static void resetpriority(struct lwp *);
91
92 extern unsigned int sched_pstats_ticks; /* defined in kern_synch.c */
93
94 /* Number of hardclock ticks per sched_tick() */
95 static int rrticks __read_mostly;
96
97 /*
98 * Force switch among equal priority processes every 100ms.
99 * Called from hardclock every hz/10 == rrticks hardclock ticks.
100 *
101 * There's no need to lock anywhere in this routine, as it's
102 * CPU-local and runs at IPL_SCHED (called from clock interrupt).
103 */
104 /* ARGSUSED */
105 void
106 sched_tick(struct cpu_info *ci)
107 {
108 struct schedstate_percpu *spc = &ci->ci_schedstate;
109 lwp_t *l;
110
111 spc->spc_ticks = rrticks;
112
113 if (CURCPU_IDLE_P()) {
114 cpu_need_resched(ci, 0);
115 return;
116 }
117 l = ci->ci_data.cpu_onproc;
118 if (l == NULL) {
119 return;
120 }
121 switch (l->l_class) {
122 case SCHED_FIFO:
123 /* No timeslicing for FIFO jobs. */
124 break;
125 case SCHED_RR:
126 /* Force it into mi_switch() to look for other jobs to run. */
127 cpu_need_resched(ci, RESCHED_KPREEMPT);
128 break;
129 default:
130 if (spc->spc_flags & SPCF_SHOULDYIELD) {
131 /*
132 * Process is stuck in kernel somewhere, probably
133 * due to buggy or inefficient code. Force a
134 * kernel preemption.
135 */
136 cpu_need_resched(ci, RESCHED_KPREEMPT);
137 } else if (spc->spc_flags & SPCF_SEENRR) {
138 /*
139 * The process has already been through a roundrobin
140 * without switching and may be hogging the CPU.
141 * Indicate that the process should yield.
142 */
143 spc->spc_flags |= SPCF_SHOULDYIELD;
144 cpu_need_resched(ci, 0);
145 } else {
146 spc->spc_flags |= SPCF_SEENRR;
147 }
148 break;
149 }
150 }
151
152 /*
153 * Why PRIO_MAX - 2? From setpriority(2):
154 *
155 * prio is a value in the range -20 to 20. The default priority is
156 * 0; lower priorities cause more favorable scheduling. A value of
157 * 19 or 20 will schedule a process only when nothing at priority <=
158 * 0 is runnable.
159 *
160 * This gives estcpu influence over 18 priority levels, and leaves nice
161 * with 40 levels. One way to think about it is that nice has 20 levels
162 * either side of estcpu's 18.
163 */
164 #define ESTCPU_SHIFT 11
165 #define ESTCPU_MAX ((PRIO_MAX - 2) << ESTCPU_SHIFT)
166 #define ESTCPU_ACCUM (1 << (ESTCPU_SHIFT - 1))
167 #define ESTCPULIM(e) min((e), ESTCPU_MAX)
168
169 /*
170 * The main parameter used by this algorithm is 'l_estcpu'. It is an estimate
171 * of the recent CPU utilization of the thread.
172 *
173 * l_estcpu is:
174 * - increased each time the hardclock ticks and the thread is found to
175 * be executing, in sched_schedclock() called from hardclock()
176 * - decreased (filtered) on each sched tick, in sched_pstats_hook()
177 * If the lwp is sleeping for more than a second, we don't touch l_estcpu: it
178 * will be updated in sched_setrunnable() when the lwp wakes up, in burst mode
179 * (ie, we decrease it n times).
180 *
181 * Note that hardclock updates l_estcpu and l_cpticks independently.
182 *
183 * -----------------------------------------------------------------------------
184 *
185 * Here we describe how l_estcpu is decreased.
186 *
187 * Constants for digital decay (filter):
188 * 90% of l_estcpu usage in (5 * loadavg) seconds
189 *
190 * We wish to decay away 90% of l_estcpu in (5 * loadavg) seconds. That is, we
191 * want to compute a value of decay such that the following loop:
192 * for (i = 0; i < (5 * loadavg); i++)
193 * l_estcpu *= decay;
194 * will result in
195 * l_estcpu *= 0.1;
196 * for all values of loadavg.
197 *
198 * Mathematically this loop can be expressed by saying:
199 * decay ** (5 * loadavg) ~= .1
200 *
201 * And finally, the corresponding value of decay we're using is:
202 * decay = (2 * loadavg) / (2 * loadavg + 1)
203 *
204 * -----------------------------------------------------------------------------
205 *
206 * Now, let's prove that the value of decay stated above will always fulfill
207 * the equation:
208 * decay ** (5 * loadavg) ~= .1
209 *
210 * If we compute b as:
211 * b = 2 * loadavg
212 * then
213 * decay = b / (b + 1)
214 *
215 * We now need to prove two things:
216 * 1) Given [factor ** (5 * loadavg) =~ .1], prove [factor == b/(b+1)].
217 * 2) Given [b/(b+1) ** power =~ .1], prove [power == (5 * loadavg)].
218 *
219 * Facts:
220 * * For x real: exp(x) = 0! + x**1/1! + x**2/2! + ...
221 * Therefore, for x close to zero, exp(x) =~ 1 + x.
222 * In turn, for b large enough, exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
223 *
224 * * For b large enough, (b-1)/b =~ b/(b+1).
225 *
226 * * For x belonging to [-1;1[, ln(1-x) = - x - x**2/2 - x**3/3 - ...
227 * Therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
228 *
229 * * ln(0.1) =~ -2.30
230 *
231 * Proof of (1):
232 * factor ** (5 * loadavg) =~ 0.1
233 * => ln(factor) =~ -2.30 / (5 * loadavg)
234 * => factor =~ exp(-1 / ((5 / 2.30) * loadavg))
235 * =~ exp(-1 / (2 * loadavg))
236 * =~ exp(-1 / b)
237 * =~ (b - 1) / b
238 * =~ b / (b + 1)
239 * =~ (2 * loadavg) / ((2 * loadavg) + 1)
240 *
241 * Proof of (2):
242 * (b / (b + 1)) ** power =~ .1
243 * => power * ln(b / (b + 1)) =~ -2.30
244 * => power * (-1 / (b + 1)) =~ -2.30
245 * => power =~ 2.30 * (b + 1)
246 * => power =~ 4.60 * loadavg + 2.30
247 * => power =~ 5 * loadavg
248 *
249 * Conclusion: decay = (2 * loadavg) / (2 * loadavg + 1)
250 */
251
252 /* See calculations above */
253 #define loadfactor(loadavg) (2 * (loadavg))
254
255 static fixpt_t
256 decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
257 {
258
259 if (estcpu == 0) {
260 return 0;
261 }
262
263 #if !defined(_LP64)
264 /* avoid 64bit arithmetics. */
265 #define FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
266 if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
267 return estcpu * loadfac / (loadfac + FSCALE);
268 }
269 #endif
270
271 return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
272 }
273
274 static fixpt_t
275 decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n)
276 {
277
278 /*
279 * For all load averages >= 1 and max l_estcpu of (255 << ESTCPU_SHIFT),
280 * if we slept for at least seven times the loadfactor, we will decay
281 * l_estcpu to less than (1 << ESTCPU_SHIFT), and therefore we can
282 * return zero directly.
283 *
284 * Note that our ESTCPU_MAX is actually much smaller than
285 * (255 << ESTCPU_SHIFT).
286 */
287 if ((n << FSHIFT) >= 7 * loadfac) {
288 return 0;
289 }
290
291 while (estcpu != 0 && n > 1) {
292 estcpu = decay_cpu(loadfac, estcpu);
293 n--;
294 }
295
296 return estcpu;
297 }
298
299 /*
300 * sched_pstats_hook:
301 *
302 * Periodically called from sched_pstats(); used to recalculate priorities.
303 */
304 void
305 sched_pstats_hook(struct lwp *l, int batch)
306 {
307 fixpt_t loadfac;
308
309 /*
310 * If the LWP has slept an entire second, stop recalculating
311 * its priority until it wakes up.
312 */
313 KASSERT(lwp_locked(l, NULL));
314 if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
315 l->l_stat == LSSUSPENDED) {
316 if (l->l_slptime > 1) {
317 return;
318 }
319 }
320
321 loadfac = loadfactor(averunnable.ldavg[0]);
322 l->l_estcpu = decay_cpu(loadfac, l->l_estcpu);
323 resetpriority(l);
324 }
325
326 /*
327 * Recalculate the priority of an LWP after it has slept for a while.
328 */
329 static void
330 updatepri(struct lwp *l)
331 {
332 fixpt_t loadfac;
333
334 KASSERT(lwp_locked(l, NULL));
335 KASSERT(l->l_slptime > 1);
336
337 loadfac = loadfactor(averunnable.ldavg[0]);
338
339 l->l_slptime--; /* the first time was done in sched_pstats */
340 l->l_estcpu = decay_cpu_batch(loadfac, l->l_estcpu, l->l_slptime);
341 resetpriority(l);
342 }
343
344 void
345 sched_rqinit(void)
346 {
347
348 }
349
350 void
351 sched_setrunnable(struct lwp *l)
352 {
353
354 if (l->l_slptime > 1)
355 updatepri(l);
356 }
357
358 void
359 sched_nice(struct proc *p, int n)
360 {
361 struct lwp *l;
362
363 KASSERT(mutex_owned(p->p_lock));
364
365 p->p_nice = n;
366 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
367 lwp_lock(l);
368 resetpriority(l);
369 lwp_unlock(l);
370 }
371 }
372
373 /*
374 * Recompute the priority of an LWP. Arrange to reschedule if
375 * the resulting priority is better than that of the current LWP.
376 */
377 static void
378 resetpriority(struct lwp *l)
379 {
380 pri_t pri;
381 struct proc *p = l->l_proc;
382
383 KASSERT(lwp_locked(l, NULL));
384
385 if (l->l_class != SCHED_OTHER)
386 return;
387
388 /* See comments above ESTCPU_SHIFT definition. */
389 pri = (PRI_KERNEL - 1) - (l->l_estcpu >> ESTCPU_SHIFT) - p->p_nice;
390 pri = imax(pri, 0);
391 if (pri != l->l_priority)
392 lwp_changepri(l, pri);
393 }
394
395 /*
396 * We adjust the priority of the current LWP. The priority of a LWP
397 * gets worse as it accumulates CPU time. The CPU usage estimator (l_estcpu)
398 * is increased here. The formula for computing priorities will compute a
399 * different value each time l_estcpu increases. This can cause a switch,
400 * but unless the priority crosses a PPQ boundary the actual queue will not
401 * change. The CPU usage estimator ramps up quite quickly when the process
402 * is running (linearly), and decays away exponentially, at a rate which is
403 * proportionally slower when the system is busy. The basic principle is
404 * that the system will 90% forget that the process used a lot of CPU time
405 * in (5 * loadavg) seconds. This causes the system to favor processes which
406 * haven't run much recently, and to round-robin among other processes.
407 */
408 void
409 sched_schedclock(struct lwp *l)
410 {
411
412 if (l->l_class != SCHED_OTHER)
413 return;
414
415 KASSERT(!CURCPU_IDLE_P());
416 l->l_estcpu = ESTCPULIM(l->l_estcpu + ESTCPU_ACCUM);
417 lwp_lock(l);
418 resetpriority(l);
419 lwp_unlock(l);
420 }
421
422 /*
423 * sched_proc_fork:
424 *
425 * Inherit the parent's scheduler history.
426 */
427 void
428 sched_proc_fork(struct proc *parent, struct proc *child)
429 {
430 lwp_t *pl;
431
432 KASSERT(mutex_owned(parent->p_lock));
433
434 pl = LIST_FIRST(&parent->p_lwps);
435 child->p_estcpu_inherited = pl->l_estcpu;
436 child->p_forktime = sched_pstats_ticks;
437 }
438
439 /*
440 * sched_proc_exit:
441 *
442 * Chargeback parents for the sins of their children.
443 */
444 void
445 sched_proc_exit(struct proc *parent, struct proc *child)
446 {
447 fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
448 fixpt_t estcpu;
449 lwp_t *pl, *cl;
450
451 /* XXX Only if parent != init?? */
452
453 mutex_enter(parent->p_lock);
454 pl = LIST_FIRST(&parent->p_lwps);
455 cl = LIST_FIRST(&child->p_lwps);
456 estcpu = decay_cpu_batch(loadfac, child->p_estcpu_inherited,
457 sched_pstats_ticks - child->p_forktime);
458 if (cl->l_estcpu > estcpu) {
459 lwp_lock(pl);
460 pl->l_estcpu = ESTCPULIM(pl->l_estcpu + cl->l_estcpu - estcpu);
461 lwp_unlock(pl);
462 }
463 mutex_exit(parent->p_lock);
464 }
465
466 void
467 sched_wakeup(struct lwp *l)
468 {
469
470 }
471
472 void
473 sched_slept(struct lwp *l)
474 {
475
476 }
477
478 void
479 sched_lwp_fork(struct lwp *l1, struct lwp *l2)
480 {
481
482 l2->l_estcpu = l1->l_estcpu;
483 }
484
485 void
486 sched_lwp_collect(struct lwp *t)
487 {
488 lwp_t *l;
489
490 /* Absorb estcpu value of collected LWP. */
491 l = curlwp;
492 lwp_lock(l);
493 l->l_estcpu += t->l_estcpu;
494 lwp_unlock(l);
495 }
496
497 void
498 sched_oncpu(lwp_t *l)
499 {
500
501 }
502
503 void
504 sched_newts(lwp_t *l)
505 {
506
507 }
508
509 /*
510 * Sysctl nodes and initialization.
511 */
512
513 static int
514 sysctl_sched_rtts(SYSCTLFN_ARGS)
515 {
516 struct sysctlnode node;
517 int rttsms = hztoms(rrticks);
518
519 node = *rnode;
520 node.sysctl_data = &rttsms;
521 return sysctl_lookup(SYSCTLFN_CALL(&node));
522 }
523
524 SYSCTL_SETUP(sysctl_sched_4bsd_setup, "sysctl sched setup")
525 {
526 const struct sysctlnode *node = NULL;
527
528 sysctl_createv(clog, 0, NULL, &node,
529 CTLFLAG_PERMANENT,
530 CTLTYPE_NODE, "sched",
531 SYSCTL_DESCR("Scheduler options"),
532 NULL, 0, NULL, 0,
533 CTL_KERN, CTL_CREATE, CTL_EOL);
534
535 if (node == NULL)
536 return;
537
538 rrticks = hz / 10;
539
540 sysctl_createv(NULL, 0, &node, NULL,
541 CTLFLAG_PERMANENT,
542 CTLTYPE_STRING, "name", NULL,
543 NULL, 0, __UNCONST("4.4BSD"), 0,
544 CTL_CREATE, CTL_EOL);
545 sysctl_createv(NULL, 0, &node, NULL,
546 CTLFLAG_PERMANENT,
547 CTLTYPE_INT, "rtts",
548 SYSCTL_DESCR("Round-robin time quantum (in milliseconds)"),
549 sysctl_sched_rtts, 0, NULL, 0,
550 CTL_CREATE, CTL_EOL);
551 }
552