sched_m2.c revision 1.6.2.3 1 /* $NetBSD: sched_m2.c,v 1.6.2.3 2007/11/04 21:03:33 jmcneill Exp $ */
2
3 /*
4 * Copyright (c) 2007, Mindaugas Rasiukevicius
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
17 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
19 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 /*
29 * TODO:
30 * - Implementation of fair share queue;
31 * - Support for NUMA;
32 */
33
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: sched_m2.c,v 1.6.2.3 2007/11/04 21:03:33 jmcneill Exp $");
36
37 #include <sys/param.h>
38
39 #include <sys/bitops.h>
40 #include <sys/cpu.h>
41 #include <sys/callout.h>
42 #include <sys/errno.h>
43 #include <sys/kernel.h>
44 #include <sys/kmem.h>
45 #include <sys/lwp.h>
46 #include <sys/mutex.h>
47 #include <sys/pool.h>
48 #include <sys/proc.h>
49 #include <sys/resource.h>
50 #include <sys/resourcevar.h>
51 #include <sys/sched.h>
52 #include <sys/syscallargs.h>
53 #include <sys/sysctl.h>
54 #include <sys/types.h>
55
56 /*
57 * XXX: Some definitions below will disappear
58 * XXX: with the merge of vmlocking branch.
59 */
60 #define PRI_MAX MAXPRI
61 #define PRI_COUNT (PRI_MAX + 1) /* 0 .. 127 -> 128 */
62 #define PRI_RT_COUNT (50) /* 0 .. 49 -> 50 */
63 #define PRI_TS_COUNT (PRI_COUNT - PRI_RT_COUNT) /* 50 .. 127 -> 78 */
64
65 #define PRI_DEFAULT 70 /* 70 */
66 #define PRI_REALTIME 50 /* 50 */
67 #define PRI_HTS_RANGE 10 /* 50 .. 60 -> 10 */
68
69 /*
70 * Bits per map.
71 */
72 #define BITMAP_SHIFT 5 /* 32 bits */
73 #define BITMAP_SIZE PRI_COUNT >> BITMAP_SHIFT
74
75 /*
76 * Time-slices and priorities.
77 */
78 static u_int min_ts; /* Minimal time-slice */
79 static u_int max_ts; /* Maximal time-slice */
80 static u_int rt_ts; /* Real-time time-slice */
81 static u_int ts_map[PRI_COUNT]; /* Map of time-slices */
82 static pri_t high_pri[PRI_COUNT]; /* Map for priority increase */
83
84 /*
85 * Migration and balancing.
86 */
87 #ifdef MULTIPROCESSOR
88 static u_int cacheht_time; /* Cache hotness time */
89 static u_int min_catch; /* Minimal LWP count for catching */
90
91 static u_int balance_period; /* Balance period */
92 static struct callout balance_ch; /* Callout of balancer */
93
94 static struct cpu_info * volatile worker_ci;
95
96 #define CACHE_HOT(sil) (sil->sl_lrtime && \
97 (hardclock_ticks - sil->sl_lrtime < cacheht_time))
98
99 #endif
100
101 /*
102 * Structures, runqueue.
103 */
104
105 typedef struct {
106 TAILQ_HEAD(, lwp) q_head;
107 } queue_t;
108
109 typedef struct {
110 /* Lock and bitmap */
111 kmutex_t r_rq_mutex;
112 uint32_t r_bitmap[BITMAP_SIZE];
113 /* Counters */
114 u_int r_count; /* Count of the threads */
115 pri_t r_highest_pri; /* Highest priority */
116 u_int r_avgcount; /* Average count of threads */
117 u_int r_mcount; /* Count of migratable threads */
118 /* Runqueues */
119 queue_t r_rt_queue[PRI_RT_COUNT];
120 queue_t r_ts_queue[PRI_TS_COUNT];
121 } runqueue_t;
122
123 typedef struct {
124 u_int sl_flags;
125 u_int sl_timeslice; /* Time-slice of thread */
126 u_int sl_slept; /* Saved sleep time for sleep sum */
127 u_int sl_slpsum; /* Sum of sleep time */
128 u_int sl_rtime; /* Saved start time of run */
129 u_int sl_rtsum; /* Sum of the run time */
130 u_int sl_lrtime; /* Last run time */
131 } sched_info_lwp_t;
132
133 /* Flags */
134 #define SL_BATCH 0x01
135
136 /* Pool of the scheduler-specific structures for threads */
137 static struct pool sil_pool;
138
139 /*
140 * Prototypes.
141 */
142
143 static inline void * sched_getrq(runqueue_t *, const pri_t);
144 static inline void sched_newts(struct lwp *);
145 static void sched_precalcts(void);
146
147 #ifdef MULTIPROCESSOR
148 static struct lwp * sched_catchlwp(void);
149 static void sched_balance(void *);
150 #endif
151
152 /*
153 * Initialization and setup.
154 */
155
156 void
157 sched_rqinit(void)
158 {
159 struct cpu_info *ci = curcpu();
160
161 if (hz < 100) {
162 panic("sched_rqinit: value of HZ is too low\n");
163 }
164
165 /* Default timing ranges */
166 min_ts = mstohz(50); /* ~50ms */
167 max_ts = mstohz(150); /* ~150ms */
168 rt_ts = mstohz(100); /* ~100ms */
169 sched_precalcts();
170
171 #ifdef MULTIPROCESSOR
172 /* Balancing */
173 worker_ci = ci;
174 cacheht_time = mstohz(5); /* ~5 ms */
175 balance_period = mstohz(300); /* ~300ms */
176 min_catch = ~0;
177 #endif
178
179 /* Pool of the scheduler-specific structures */
180 pool_init(&sil_pool, sizeof(sched_info_lwp_t), 0, 0, 0,
181 "lwpsd", &pool_allocator_nointr, IPL_NONE);
182
183 /* Attach the primary CPU here */
184 sched_cpuattach(ci);
185
186 /* Initialize the scheduler structure of the primary LWP */
187 lwp0.l_mutex = &ci->ci_schedstate.spc_lwplock;
188 sched_lwp_fork(&lwp0);
189 sched_newts(&lwp0);
190 }
191
192 void
193 sched_setup(void)
194 {
195
196 #ifdef MULTIPROCESSOR
197 /* Minimal count of LWPs for catching: log2(count of CPUs) */
198 min_catch = min(ilog2(ncpu), 4);
199
200 /* Initialize balancing callout and run it */
201 callout_init(&balance_ch, CALLOUT_MPSAFE);
202 callout_setfunc(&balance_ch, sched_balance, NULL);
203 callout_schedule(&balance_ch, balance_period);
204 #endif
205 }
206
207 void
208 sched_cpuattach(struct cpu_info *ci)
209 {
210 runqueue_t *ci_rq;
211 void *rq_ptr;
212 u_int i, size;
213
214 /*
215 * Allocate the run queue.
216 * XXX: Estimate cache behaviour more..
217 */
218 size = roundup(sizeof(runqueue_t), CACHE_LINE_SIZE) + CACHE_LINE_SIZE;
219 rq_ptr = kmem_zalloc(size, KM_NOSLEEP);
220 if (rq_ptr == NULL) {
221 panic("scheduler: could not allocate the runqueue");
222 }
223 /* XXX: Save the original pointer for future.. */
224 ci_rq = (void *)(roundup((intptr_t)(rq_ptr), CACHE_LINE_SIZE));
225
226 /* Initialize run queues */
227 mutex_init(&ci_rq->r_rq_mutex, MUTEX_SPIN, IPL_SCHED);
228 for (i = 0; i < PRI_RT_COUNT; i++)
229 TAILQ_INIT(&ci_rq->r_rt_queue[i].q_head);
230 for (i = 0; i < PRI_TS_COUNT; i++)
231 TAILQ_INIT(&ci_rq->r_ts_queue[i].q_head);
232 ci_rq->r_highest_pri = PRI_MAX;
233
234 ci->ci_schedstate.spc_sched_info = ci_rq;
235 ci->ci_schedstate.spc_mutex = &ci_rq->r_rq_mutex;
236 }
237
238 /* Pre-calculate the time-slices for the priorities */
239 static void
240 sched_precalcts(void)
241 {
242 pri_t p;
243 u_int i;
244
245 for (p = 0; p < PRI_REALTIME; p++) {
246 ts_map[p] = rt_ts;
247 high_pri[p] = p;
248 }
249
250 for (p = PRI_REALTIME, i = 0; p < PRI_COUNT; p++, i++) {
251 ts_map[p] = min_ts +
252 (i * 100 / (PRI_TS_COUNT - 1) * (max_ts - min_ts) / 100);
253 high_pri[p] = PRI_REALTIME + (i * PRI_HTS_RANGE /
254 (PRI_MAX - PRI_REALTIME));
255 }
256 }
257
258 /*
259 * Hooks.
260 */
261
262 void
263 sched_proc_fork(struct proc *parent, struct proc *child)
264 {
265 struct lwp *l;
266
267 LIST_FOREACH(l, &child->p_lwps, l_sibling) {
268 lwp_lock(l);
269 sched_newts(l);
270 lwp_unlock(l);
271 }
272 }
273
274 void
275 sched_proc_exit(struct proc *child, struct proc *parent)
276 {
277
278 /* Dummy */
279 }
280
281 void
282 sched_lwp_fork(struct lwp *l)
283 {
284
285 KASSERT(l->l_sched_info == NULL);
286 l->l_sched_info = pool_get(&sil_pool, PR_WAITOK);
287 memset(l->l_sched_info, 0, sizeof(sched_info_lwp_t));
288 if (l->l_usrpri >= PRI_REALTIME) /* XXX: For now only.. */
289 l->l_usrpri = l->l_priority = PRI_DEFAULT;
290 }
291
292 void
293 sched_lwp_exit(struct lwp *l)
294 {
295
296 KASSERT(l->l_sched_info != NULL);
297 pool_put(&sil_pool, l->l_sched_info);
298 l->l_sched_info = NULL;
299 }
300
301 void
302 sched_setrunnable(struct lwp *l)
303 {
304
305 /* Dummy */
306 }
307
308 void
309 sched_schedclock(struct lwp *l)
310 {
311
312 /* Dummy */
313 }
314
315 /*
316 * Priorities and time-slice.
317 */
318
319 void
320 sched_nice(struct proc *p, int prio)
321 {
322 int nprio;
323 struct lwp *l;
324
325 KASSERT(mutex_owned(&p->p_stmutex));
326
327 p->p_nice = prio;
328 nprio = max(PRI_DEFAULT + p->p_nice, PRI_REALTIME);
329
330 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
331 lwp_lock(l);
332 lwp_changepri(l, nprio);
333 lwp_unlock(l);
334 }
335 }
336
337 /* Recalculate the time-slice */
338 static inline void
339 sched_newts(struct lwp *l)
340 {
341 sched_info_lwp_t *sil = l->l_sched_info;
342
343 sil->sl_timeslice = ts_map[lwp_eprio(l)];
344 }
345
346 /*
347 * Control of the runqueue.
348 */
349
350 static inline void *
351 sched_getrq(runqueue_t *ci_rq, const pri_t prio)
352 {
353
354 KASSERT(prio < PRI_COUNT);
355 return (prio < PRI_REALTIME) ?
356 &ci_rq->r_rt_queue[prio].q_head :
357 &ci_rq->r_ts_queue[prio - PRI_REALTIME].q_head;
358 }
359
360 void
361 sched_enqueue(struct lwp *l, bool swtch)
362 {
363 runqueue_t *ci_rq;
364 sched_info_lwp_t *sil = l->l_sched_info;
365 TAILQ_HEAD(, lwp) *q_head;
366 const pri_t eprio = lwp_eprio(l);
367
368 ci_rq = l->l_cpu->ci_schedstate.spc_sched_info;
369 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
370
371 /* Update the last run time on switch */
372 if (swtch == true) {
373 sil->sl_lrtime = hardclock_ticks;
374 sil->sl_rtsum += (hardclock_ticks - sil->sl_rtime);
375 } else if (sil->sl_lrtime == 0)
376 sil->sl_lrtime = hardclock_ticks;
377
378 /* Enqueue the thread */
379 q_head = sched_getrq(ci_rq, eprio);
380 if (TAILQ_EMPTY(q_head)) {
381 u_int i;
382 uint32_t q;
383
384 /* Mark bit */
385 i = eprio >> BITMAP_SHIFT;
386 q = eprio - (i << BITMAP_SHIFT);
387 KASSERT((ci_rq->r_bitmap[i] & (1 << q)) == 0);
388 ci_rq->r_bitmap[i] |= 1 << q;
389 }
390 TAILQ_INSERT_TAIL(q_head, l, l_runq);
391 ci_rq->r_count++;
392 if ((l->l_flag & LW_BOUND) == 0)
393 ci_rq->r_mcount++;
394
395 /*
396 * Update the value of highest priority in the runqueue,
397 * if priority of this thread is higher.
398 */
399 if (eprio < ci_rq->r_highest_pri)
400 ci_rq->r_highest_pri = eprio;
401
402 sched_newts(l);
403 }
404
405 void
406 sched_dequeue(struct lwp *l)
407 {
408 runqueue_t *ci_rq;
409 TAILQ_HEAD(, lwp) *q_head;
410 const pri_t eprio = lwp_eprio(l);
411
412 ci_rq = l->l_cpu->ci_schedstate.spc_sched_info;
413 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
414 KASSERT(ci_rq->r_highest_pri <= eprio);
415 KASSERT(ci_rq->r_bitmap[eprio >> BITMAP_SHIFT] != 0);
416 KASSERT(ci_rq->r_count > 0);
417
418 ci_rq->r_count--;
419 if ((l->l_flag & LW_BOUND) == 0)
420 ci_rq->r_mcount--;
421
422 q_head = sched_getrq(ci_rq, eprio);
423 TAILQ_REMOVE(q_head, l, l_runq);
424 if (TAILQ_EMPTY(q_head)) {
425 u_int i;
426 uint32_t q;
427
428 /* Unmark bit */
429 i = eprio >> BITMAP_SHIFT;
430 q = eprio - (i << BITMAP_SHIFT);
431 KASSERT((ci_rq->r_bitmap[i] & (1 << q)) != 0);
432 ci_rq->r_bitmap[i] &= ~(1 << q);
433
434 /*
435 * Update the value of highest priority in the runqueue, in a
436 * case it was a last thread in the queue of highest priority.
437 */
438 if (eprio != ci_rq->r_highest_pri)
439 return;
440
441 do {
442 q = ffs(ci_rq->r_bitmap[i]);
443 if (q) {
444 ci_rq->r_highest_pri =
445 (i << BITMAP_SHIFT) + q - 1;
446 return;
447 }
448 } while (++i < BITMAP_SIZE);
449
450 /* If not found - set the maximal value */
451 ci_rq->r_highest_pri = PRI_MAX;
452 }
453 }
454
455 void
456 sched_slept(struct lwp *l)
457 {
458 sched_info_lwp_t *sil = l->l_sched_info;
459
460 /* Save the time when thread has slept */
461 sil->sl_slept = hardclock_ticks;
462
463 /*
464 * If thread is not a real-time and batch flag is not marked,
465 * increase the the priority, and run with lower time-quantum.
466 */
467 if (l->l_usrpri > PRI_REALTIME && (sil->sl_flags & SL_BATCH) == 0)
468 l->l_usrpri--;
469 }
470
471 void
472 sched_wakeup(struct lwp *l)
473 {
474 sched_info_lwp_t *sil = l->l_sched_info;
475
476 /* Update sleep time delta */
477 sil->sl_slpsum += (l->l_slptime == 0) ?
478 (hardclock_ticks - sil->sl_slept) : hz;
479
480 /* If thread was sleeping a second or more - set a high priority */
481 if (l->l_slptime > 1 || (hardclock_ticks - sil->sl_slept) >= hz)
482 l->l_usrpri = l->l_priority = high_pri[l->l_usrpri];
483
484 /* Also, consider looking for a better CPU to wake up */
485 if ((l->l_flag & (LW_BOUND | LW_SYSTEM)) == 0)
486 l->l_cpu = sched_takecpu(l);
487 }
488
489 void
490 sched_pstats_hook(struct lwp *l)
491 {
492 sched_info_lwp_t *sil = l->l_sched_info;
493
494 /*
495 * Set that thread is more CPU-bound, if sum of run time exceeds the
496 * sum of sleep time. If it is CPU-bound not a first time - decrease
497 * the priority.
498 */
499 if (sil->sl_rtsum > sil->sl_slpsum) {
500 if ((sil->sl_flags & SL_BATCH) && (l->l_usrpri < PRI_MAX))
501 l->l_usrpri++;
502 sil->sl_flags |= SL_BATCH;
503 } else {
504 sil->sl_flags &= ~SL_BATCH;
505 }
506 sil->sl_slpsum = 0;
507 sil->sl_rtsum = 0;
508
509 /*
510 * Estimate only threads on time-sharing run queue, also,
511 * ignore the highest time-sharing priority.
512 */
513 if (l->l_stat != LSRUN || l->l_usrpri <= PRI_REALTIME)
514 return;
515
516 /* If thread was not ran a second or more - set a high priority */
517 if (sil->sl_lrtime && (hardclock_ticks - sil->sl_lrtime >= hz))
518 lwp_changepri(l, high_pri[l->l_usrpri]);
519 }
520
521 /*
522 * Migration and balancing.
523 */
524
525 #ifdef MULTIPROCESSOR
526
527 /* Check if LWP can migrate to the chosen CPU */
528 static inline bool
529 sched_migratable(const struct lwp *l, const struct cpu_info *ci)
530 {
531
532 if (ci->ci_schedstate.spc_flags & SPCF_OFFLINE)
533 return false;
534
535 if ((l->l_flag & LW_BOUND) == 0)
536 return true;
537 #if 0
538 return cpu_in_pset(ci, l->l_psid);
539 #else
540 return false;
541 #endif
542 }
543
544 /*
545 * Estimate the migration of LWP to the other CPU.
546 * Take and return the CPU, if migration is needed.
547 */
548 struct cpu_info *
549 sched_takecpu(struct lwp *l)
550 {
551 struct cpu_info *ci, *tci = NULL;
552 struct schedstate_percpu *spc;
553 runqueue_t *ci_rq;
554 sched_info_lwp_t *sil;
555 CPU_INFO_ITERATOR cii;
556 pri_t eprio, lpri;
557
558 ci = l->l_cpu;
559 spc = &ci->ci_schedstate;
560 ci_rq = spc->spc_sched_info;
561
562 /* CPU of this thread is idling - run there */
563 if (ci_rq->r_count == 0)
564 return ci;
565
566 eprio = lwp_eprio(l);
567 sil = l->l_sched_info;
568
569 /* Stay if thread is cache-hot */
570 if (l->l_stat == LSSLEEP && l->l_slptime <= 1 &&
571 CACHE_HOT(sil) && eprio <= spc->spc_curpriority)
572 return ci;
573
574 /* Run on current CPU if priority of thread is higher */
575 ci = curcpu();
576 spc = &ci->ci_schedstate;
577 if (eprio < spc->spc_curpriority && sched_migratable(l, ci))
578 return ci;
579
580 /*
581 * Look for the CPU with the lowest priority thread. In case of
582 * equal the priority - check the lower count of the threads.
583 */
584 lpri = 0;
585 ci_rq = NULL;
586 tci = l->l_cpu;
587 for (CPU_INFO_FOREACH(cii, ci)) {
588 runqueue_t *ici_rq;
589 pri_t pri;
590
591 spc = &ci->ci_schedstate;
592 ici_rq = spc->spc_sched_info;
593 pri = min(spc->spc_curpriority, ici_rq->r_highest_pri);
594 if (pri < lpri)
595 continue;
596
597 if (pri == lpri && ci_rq && ci_rq->r_count < ici_rq->r_count)
598 continue;
599
600 if (sched_migratable(l, ci) == false)
601 continue;
602
603 lpri = pri;
604 tci = ci;
605 ci_rq = ici_rq;
606 }
607
608 return tci;
609 }
610
611 /*
612 * Tries to catch an LWP from the runqueue of other CPU.
613 */
614 static struct lwp *
615 sched_catchlwp(void)
616 {
617 struct cpu_info *curci = curcpu(), *ci = worker_ci;
618 TAILQ_HEAD(, lwp) *q_head;
619 runqueue_t *ci_rq;
620 struct lwp *l;
621
622 if (curci == ci)
623 return NULL;
624
625 /* Lockless check */
626 ci_rq = ci->ci_schedstate.spc_sched_info;
627 if (ci_rq->r_count < min_catch)
628 return NULL;
629
630 /*
631 * Double-lock the runqueues.
632 */
633 if (curci < ci) {
634 spc_lock(ci);
635 } else if (!mutex_tryenter(ci->ci_schedstate.spc_mutex)) {
636 const runqueue_t *cur_rq = curci->ci_schedstate.spc_sched_info;
637
638 spc_unlock(curci);
639 spc_lock(ci);
640 spc_lock(curci);
641
642 if (cur_rq->r_count) {
643 spc_unlock(ci);
644 return NULL;
645 }
646 }
647
648 if (ci_rq->r_count < min_catch) {
649 spc_unlock(ci);
650 return NULL;
651 }
652
653 /* Take the highest priority thread */
654 q_head = sched_getrq(ci_rq, ci_rq->r_highest_pri);
655 l = TAILQ_FIRST(q_head);
656
657 for (;;) {
658 sched_info_lwp_t *sil;
659
660 /* Check the first and next result from the queue */
661 if (l == NULL)
662 break;
663
664 /* Look for threads, whose are allowed to migrate */
665 sil = l->l_sched_info;
666 if ((l->l_flag & LW_SYSTEM) || CACHE_HOT(sil) ||
667 sched_migratable(l, curci) == false) {
668 l = TAILQ_NEXT(l, l_runq);
669 continue;
670 }
671 /* Recheck if chosen thread is still on the runqueue */
672 if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM)) {
673 sched_dequeue(l);
674 l->l_cpu = curci;
675 lwp_setlock(l, curci->ci_schedstate.spc_mutex);
676 sched_enqueue(l, false);
677 break;
678 }
679 l = TAILQ_NEXT(l, l_runq);
680 }
681 spc_unlock(ci);
682
683 return l;
684 }
685
686 /*
687 * Periodical calculations for balancing.
688 */
689 static void
690 sched_balance(void *nocallout)
691 {
692 struct cpu_info *ci, *hci;
693 runqueue_t *ci_rq;
694 CPU_INFO_ITERATOR cii;
695 u_int highest;
696
697 hci = curcpu();
698 highest = 0;
699
700 /* Make lockless countings */
701 for (CPU_INFO_FOREACH(cii, ci)) {
702 ci_rq = ci->ci_schedstate.spc_sched_info;
703
704 /* Average count of the threads */
705 ci_rq->r_avgcount = (ci_rq->r_avgcount + ci_rq->r_mcount) >> 1;
706
707 /* Look for CPU with the highest average */
708 if (ci_rq->r_avgcount > highest) {
709 hci = ci;
710 highest = ci_rq->r_avgcount;
711 }
712 }
713
714 /* Update the worker */
715 worker_ci = hci;
716
717 if (nocallout == NULL)
718 callout_schedule(&balance_ch, balance_period);
719 }
720
721 #else
722
723 struct cpu_info *
724 sched_takecpu(struct lwp *l)
725 {
726
727 return l->l_cpu;
728 }
729
730 #endif /* MULTIPROCESSOR */
731
732 /*
733 * Scheduler mill.
734 */
735 struct lwp *
736 sched_nextlwp(void)
737 {
738 struct cpu_info *ci = curcpu();
739 struct schedstate_percpu *spc;
740 TAILQ_HEAD(, lwp) *q_head;
741 sched_info_lwp_t *sil;
742 runqueue_t *ci_rq;
743 struct lwp *l;
744
745 spc = &ci->ci_schedstate;
746 ci_rq = ci->ci_schedstate.spc_sched_info;
747
748 #ifdef MULTIPROCESSOR
749 /* If runqueue is empty, try to catch some thread from other CPU */
750 if (spc->spc_flags & SPCF_OFFLINE) {
751 if ((ci_rq->r_count - ci_rq->r_mcount) == 0)
752 return NULL;
753 } else if (ci_rq->r_count == 0) {
754 /* Reset the counter, and call the balancer */
755 ci_rq->r_avgcount = 0;
756 sched_balance(ci);
757
758 /* The re-locking will be done inside */
759 return sched_catchlwp();
760 }
761 #else
762 if (ci_rq->r_count == 0)
763 return NULL;
764 #endif
765
766 /* Take the highest priority thread */
767 KASSERT(ci_rq->r_bitmap[ci_rq->r_highest_pri >> BITMAP_SHIFT]);
768 q_head = sched_getrq(ci_rq, ci_rq->r_highest_pri);
769 l = TAILQ_FIRST(q_head);
770 KASSERT(l != NULL);
771
772 /* Update the counters */
773 sil = l->l_sched_info;
774 KASSERT(sil->sl_timeslice >= min_ts);
775 KASSERT(sil->sl_timeslice <= max_ts);
776 spc->spc_ticks = sil->sl_timeslice;
777 sil->sl_rtime = hardclock_ticks;
778
779 return l;
780 }
781
782 bool
783 sched_curcpu_runnable_p(void)
784 {
785 const struct cpu_info *ci = curcpu();
786 const runqueue_t *ci_rq = ci->ci_schedstate.spc_sched_info;
787
788 if (ci->ci_schedstate.spc_flags & SPCF_OFFLINE)
789 return (ci_rq->r_count - ci_rq->r_mcount);
790
791 return ci_rq->r_count;
792 }
793
794 /*
795 * Time-driven events.
796 */
797
798 /*
799 * Called once per time-quantum. This routine is CPU-local and runs at
800 * IPL_SCHED, thus the locking is not needed.
801 */
802 void
803 sched_tick(struct cpu_info *ci)
804 {
805 const runqueue_t *ci_rq = ci->ci_schedstate.spc_sched_info;
806 struct schedstate_percpu *spc = &ci->ci_schedstate;
807 struct lwp *l = curlwp;
808 sched_info_lwp_t *sil = l->l_sched_info;
809
810 if (CURCPU_IDLE_P())
811 return;
812
813 switch (l->l_policy) {
814 case SCHED_FIFO:
815 /*
816 * Update the time-quantum, and continue running,
817 * if thread runs on FIFO real-time policy.
818 */
819 spc->spc_ticks = sil->sl_timeslice;
820 return;
821 case SCHED_OTHER:
822 /* Decrease the priority, and run with a higher time-quantum */
823 if (l->l_usrpri < PRI_REALTIME)
824 break;
825 l->l_usrpri = min(l->l_usrpri + 1, PRI_MAX);
826 l->l_priority = l->l_usrpri;
827 break;
828 }
829
830 /*
831 * If there are higher priority threads or threads in the same queue,
832 * mark that thread should yield, otherwise, continue running.
833 */
834 if (lwp_eprio(l) >= ci_rq->r_highest_pri) {
835 spc->spc_flags |= SPCF_SHOULDYIELD;
836 cpu_need_resched(ci, 0);
837 } else
838 spc->spc_ticks = sil->sl_timeslice;
839 }
840
841 /*
842 * Sysctl nodes and initialization.
843 */
844
845 static int
846 sysctl_sched_mints(SYSCTLFN_ARGS)
847 {
848 struct sysctlnode node;
849 struct cpu_info *ci;
850 int error, newsize;
851 CPU_INFO_ITERATOR cii;
852
853 node = *rnode;
854 node.sysctl_data = &newsize;
855
856 newsize = hztoms(min_ts);
857 error = sysctl_lookup(SYSCTLFN_CALL(&node));
858 if (error || newp == NULL)
859 return error;
860
861 newsize = mstohz(newsize);
862 if (newsize < 1 || newsize > hz || newsize >= max_ts)
863 return EINVAL;
864
865 /* It is safe to do this in such order */
866 for (CPU_INFO_FOREACH(cii, ci))
867 spc_lock(ci);
868
869 min_ts = newsize;
870 sched_precalcts();
871
872 for (CPU_INFO_FOREACH(cii, ci))
873 spc_unlock(ci);
874
875 return 0;
876 }
877
878 static int
879 sysctl_sched_maxts(SYSCTLFN_ARGS)
880 {
881 struct sysctlnode node;
882 struct cpu_info *ci;
883 int error, newsize;
884 CPU_INFO_ITERATOR cii;
885
886 node = *rnode;
887 node.sysctl_data = &newsize;
888
889 newsize = hztoms(max_ts);
890 error = sysctl_lookup(SYSCTLFN_CALL(&node));
891 if (error || newp == NULL)
892 return error;
893
894 newsize = mstohz(newsize);
895 if (newsize < 10 || newsize > hz || newsize <= min_ts)
896 return EINVAL;
897
898 /* It is safe to do this in such order */
899 for (CPU_INFO_FOREACH(cii, ci))
900 spc_lock(ci);
901
902 max_ts = newsize;
903 sched_precalcts();
904
905 for (CPU_INFO_FOREACH(cii, ci))
906 spc_unlock(ci);
907
908 return 0;
909 }
910
911 SYSCTL_SETUP(sysctl_sched_setup, "sysctl kern.sched subtree setup")
912 {
913 const struct sysctlnode *node = NULL;
914
915 sysctl_createv(clog, 0, NULL, NULL,
916 CTLFLAG_PERMANENT,
917 CTLTYPE_NODE, "kern", NULL,
918 NULL, 0, NULL, 0,
919 CTL_KERN, CTL_EOL);
920 sysctl_createv(clog, 0, NULL, &node,
921 CTLFLAG_PERMANENT,
922 CTLTYPE_NODE, "sched",
923 SYSCTL_DESCR("Scheduler options"),
924 NULL, 0, NULL, 0,
925 CTL_KERN, CTL_CREATE, CTL_EOL);
926
927 if (node == NULL)
928 return;
929
930 sysctl_createv(clog, 0, &node, NULL,
931 CTLFLAG_PERMANENT,
932 CTLTYPE_STRING, "name", NULL,
933 NULL, 0, __UNCONST("M2"), 0,
934 CTL_CREATE, CTL_EOL);
935 sysctl_createv(clog, 0, &node, NULL,
936 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
937 CTLTYPE_INT, "maxts",
938 SYSCTL_DESCR("Maximal time quantum (in miliseconds)"),
939 sysctl_sched_maxts, 0, &max_ts, 0,
940 CTL_CREATE, CTL_EOL);
941 sysctl_createv(clog, 0, &node, NULL,
942 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
943 CTLTYPE_INT, "mints",
944 SYSCTL_DESCR("Minimal time quantum (in miliseconds)"),
945 sysctl_sched_mints, 0, &min_ts, 0,
946 CTL_CREATE, CTL_EOL);
947
948 #ifdef MULTIPROCESSOR
949 sysctl_createv(clog, 0, &node, NULL,
950 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
951 CTLTYPE_INT, "cacheht_time",
952 SYSCTL_DESCR("Cache hotness time (in ticks)"),
953 NULL, 0, &cacheht_time, 0,
954 CTL_CREATE, CTL_EOL);
955 sysctl_createv(clog, 0, &node, NULL,
956 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
957 CTLTYPE_INT, "balance_period",
958 SYSCTL_DESCR("Balance period (in ticks)"),
959 NULL, 0, &balance_period, 0,
960 CTL_CREATE, CTL_EOL);
961 sysctl_createv(clog, 0, &node, NULL,
962 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
963 CTLTYPE_INT, "min_catch",
964 SYSCTL_DESCR("Minimal count of the threads for catching"),
965 NULL, 0, &min_catch, 0,
966 CTL_CREATE, CTL_EOL);
967 #endif
968 }
969
970 /*
971 * Debugging.
972 */
973
974 #ifdef DDB
975
976 void
977 sched_print_runqueue(void (*pr)(const char *, ...))
978 {
979 runqueue_t *ci_rq;
980 sched_info_lwp_t *sil;
981 struct lwp *l;
982 struct proc *p;
983 int i;
984
985 struct cpu_info *ci;
986 CPU_INFO_ITERATOR cii;
987
988 for (CPU_INFO_FOREACH(cii, ci)) {
989 ci_rq = ci->ci_schedstate.spc_sched_info;
990
991 (*pr)("Run-queue (CPU = %d):\n", ci->ci_cpuid);
992 (*pr)(" pid.lid = %d.%d, threads count = %u, "
993 "avgcount = %u, highest pri = %d\n",
994 ci->ci_curlwp->l_proc->p_pid, ci->ci_curlwp->l_lid,
995 ci_rq->r_count, ci_rq->r_avgcount, ci_rq->r_highest_pri);
996 i = 0;
997 do {
998 int b;
999 b = ci_rq->r_bitmap[i];
1000 (*pr)(" bitmap[%d] => [ %d (0x%x) ]\n", i, ffs(b), b);
1001 } while (++i < BITMAP_SIZE);
1002 }
1003
1004 (*pr)(" %5s %4s %4s %10s %3s %4s %11s %3s %s\n",
1005 "LID", "PRI", "UPRI", "FL", "ST", "TS", "LWP", "CPU", "LRTIME");
1006
1007 PROCLIST_FOREACH(p, &allproc) {
1008 (*pr)(" /- %d (%s)\n", (int)p->p_pid, p->p_comm);
1009 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1010 sil = l->l_sched_info;
1011 ci = l->l_cpu;
1012 (*pr)(" | %5d %4u %4u 0x%8.8x %3s %4u %11p %3d "
1013 "%u ST=%d RT=%d %d\n",
1014 (int)l->l_lid, l->l_priority, l->l_usrpri,
1015 l->l_flag, l->l_stat == LSRUN ? "RQ" :
1016 (l->l_stat == LSSLEEP ? "SQ" : "-"),
1017 sil->sl_timeslice, l, ci->ci_cpuid,
1018 (u_int)(hardclock_ticks - sil->sl_lrtime),
1019 sil->sl_slpsum, sil->sl_rtsum, sil->sl_flags);
1020 }
1021 }
1022 }
1023
1024 #endif /* defined(DDB) */
1025