sched_m2.c revision 1.18 1 /* $NetBSD: sched_m2.c,v 1.18 2008/01/15 18:41:37 rmind Exp $ */
2
3 /*
4 * Copyright (c) 2007, 2008 Mindaugas Rasiukevicius <rmind at NetBSD org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * TODO:
31 * - Implementation of fair share queue;
32 * - Support for NUMA;
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: sched_m2.c,v 1.18 2008/01/15 18:41:37 rmind Exp $");
37
38 #include <sys/param.h>
39
40 #include <sys/bitops.h>
41 #include <sys/cpu.h>
42 #include <sys/callout.h>
43 #include <sys/errno.h>
44 #include <sys/kernel.h>
45 #include <sys/kmem.h>
46 #include <sys/lwp.h>
47 #include <sys/mutex.h>
48 #include <sys/pool.h>
49 #include <sys/proc.h>
50 #include <sys/pset.h>
51 #include <sys/resource.h>
52 #include <sys/resourcevar.h>
53 #include <sys/sched.h>
54 #include <sys/syscallargs.h>
55 #include <sys/sysctl.h>
56 #include <sys/types.h>
57
58 /*
59 * Priority related defintions.
60 */
61 #define PRI_TS_COUNT (NPRI_USER)
62 #define PRI_RT_COUNT (PRI_COUNT - PRI_TS_COUNT)
63 #define PRI_HTS_RANGE (PRI_TS_COUNT / 10)
64
65 #define PRI_HIGHEST_TS (MAXPRI_USER)
66
67 const int schedppq = 1;
68
69 /*
70 * Bits per map.
71 */
72 #define BITMAP_BITS (32)
73 #define BITMAP_SHIFT (5)
74 #define BITMAP_MSB (0x80000000U)
75 #define BITMAP_MASK (BITMAP_BITS - 1)
76
77 /*
78 * Time-slices and priorities.
79 */
80 static u_int min_ts; /* Minimal time-slice */
81 static u_int max_ts; /* Maximal time-slice */
82 static u_int rt_ts; /* Real-time time-slice */
83 static u_int ts_map[PRI_COUNT]; /* Map of time-slices */
84 static pri_t high_pri[PRI_COUNT]; /* Map for priority increase */
85
86 /*
87 * Migration and balancing.
88 */
89 #ifdef MULTIPROCESSOR
90
91 static u_int cacheht_time; /* Cache hotness time */
92 static u_int min_catch; /* Minimal LWP count for catching */
93
94 static u_int balance_period; /* Balance period */
95 static struct callout balance_ch; /* Callout of balancer */
96
97 static struct cpu_info * volatile worker_ci;
98
99 #endif
100
101 /*
102 * Structures, runqueue.
103 */
104
105 typedef struct {
106 TAILQ_HEAD(, lwp) q_head;
107 } queue_t;
108
109 typedef struct {
110 /* Lock and bitmap */
111 kmutex_t r_rq_mutex;
112 uint32_t r_bitmap[PRI_COUNT >> BITMAP_SHIFT];
113 /* Counters */
114 u_int r_count; /* Count of the threads */
115 pri_t r_highest_pri; /* Highest priority */
116 u_int r_avgcount; /* Average count of threads */
117 u_int r_mcount; /* Count of migratable threads */
118 /* Runqueues */
119 queue_t r_rt_queue[PRI_RT_COUNT];
120 queue_t r_ts_queue[PRI_TS_COUNT];
121 } runqueue_t;
122
123 typedef struct {
124 u_int sl_flags;
125 u_int sl_timeslice; /* Time-slice of thread */
126 u_int sl_slept; /* Saved sleep time for sleep sum */
127 u_int sl_slpsum; /* Sum of sleep time */
128 u_int sl_rtime; /* Saved start time of run */
129 u_int sl_rtsum; /* Sum of the run time */
130 u_int sl_lrtime; /* Last run time */
131 } sched_info_lwp_t;
132
133 /* Flags */
134 #define SL_BATCH 0x01
135
136 /* Pool of the scheduler-specific structures for threads */
137 static struct pool sil_pool;
138
139 /*
140 * Prototypes.
141 */
142
143 static inline void * sched_getrq(runqueue_t *, const pri_t);
144 static inline void sched_newts(struct lwp *);
145 static void sched_precalcts(void);
146
147 #ifdef MULTIPROCESSOR
148 static struct lwp * sched_catchlwp(void);
149 static void sched_balance(void *);
150 #endif
151
152 /*
153 * Initialization and setup.
154 */
155
156 void
157 sched_rqinit(void)
158 {
159 struct cpu_info *ci = curcpu();
160
161 if (hz < 100) {
162 panic("sched_rqinit: value of HZ is too low\n");
163 }
164
165 /* Default timing ranges */
166 min_ts = mstohz(50); /* ~50ms */
167 max_ts = mstohz(150); /* ~150ms */
168 rt_ts = mstohz(100); /* ~100ms */
169 sched_precalcts();
170
171 #ifdef MULTIPROCESSOR
172 /* Balancing */
173 worker_ci = ci;
174 cacheht_time = mstohz(5); /* ~5 ms */
175 balance_period = mstohz(300); /* ~300ms */
176 min_catch = ~0;
177 #endif
178
179 /* Pool of the scheduler-specific structures */
180 pool_init(&sil_pool, sizeof(sched_info_lwp_t), 0, 0, 0,
181 "lwpsd", &pool_allocator_nointr, IPL_NONE);
182
183 /* Attach the primary CPU here */
184 sched_cpuattach(ci);
185
186 /* Initialize the scheduler structure of the primary LWP */
187 lwp0.l_mutex = &ci->ci_schedstate.spc_lwplock;
188 sched_lwp_fork(NULL, &lwp0);
189 sched_newts(&lwp0);
190 }
191
192 void
193 sched_setup(void)
194 {
195
196 #ifdef MULTIPROCESSOR
197 /* Minimal count of LWPs for catching: log2(count of CPUs) */
198 min_catch = min(ilog2(ncpu), 4);
199
200 /* Initialize balancing callout and run it */
201 callout_init(&balance_ch, CALLOUT_MPSAFE);
202 callout_setfunc(&balance_ch, sched_balance, NULL);
203 callout_schedule(&balance_ch, balance_period);
204 #endif
205 }
206
207 void
208 sched_cpuattach(struct cpu_info *ci)
209 {
210 runqueue_t *ci_rq;
211 void *rq_ptr;
212 u_int i, size;
213
214 /*
215 * Allocate the run queue.
216 * XXX: Estimate cache behaviour more..
217 */
218 size = roundup(sizeof(runqueue_t), CACHE_LINE_SIZE) + CACHE_LINE_SIZE;
219 rq_ptr = kmem_zalloc(size, KM_SLEEP);
220 if (rq_ptr == NULL) {
221 panic("scheduler: could not allocate the runqueue");
222 }
223 /* XXX: Save the original pointer for future.. */
224 ci_rq = (void *)(roundup((intptr_t)(rq_ptr), CACHE_LINE_SIZE));
225
226 /* Initialize run queues */
227 mutex_init(&ci_rq->r_rq_mutex, MUTEX_DEFAULT, IPL_SCHED);
228 for (i = 0; i < PRI_RT_COUNT; i++)
229 TAILQ_INIT(&ci_rq->r_rt_queue[i].q_head);
230 for (i = 0; i < PRI_TS_COUNT; i++)
231 TAILQ_INIT(&ci_rq->r_ts_queue[i].q_head);
232 ci_rq->r_highest_pri = 0;
233
234 ci->ci_schedstate.spc_sched_info = ci_rq;
235 ci->ci_schedstate.spc_mutex = &ci_rq->r_rq_mutex;
236 }
237
238 /* Pre-calculate the time-slices for the priorities */
239 static void
240 sched_precalcts(void)
241 {
242 pri_t p;
243
244 /* Time-sharing range */
245 for (p = 0; p <= PRI_HIGHEST_TS; p++) {
246 ts_map[p] = max_ts -
247 (p * 100 / (PRI_TS_COUNT - 1) * (max_ts - min_ts) / 100);
248 high_pri[p] = (PRI_HIGHEST_TS - PRI_HTS_RANGE) +
249 ((p * PRI_HTS_RANGE) / (PRI_TS_COUNT - 1));
250 }
251
252 /* Real-time range */
253 for (p = (PRI_HIGHEST_TS + 1); p < PRI_COUNT; p++) {
254 ts_map[p] = rt_ts;
255 high_pri[p] = p;
256 }
257 }
258
259 /*
260 * Hooks.
261 */
262
263 void
264 sched_proc_fork(struct proc *parent, struct proc *child)
265 {
266 struct lwp *l;
267
268 LIST_FOREACH(l, &child->p_lwps, l_sibling) {
269 lwp_lock(l);
270 sched_newts(l);
271 lwp_unlock(l);
272 }
273 }
274
275 void
276 sched_proc_exit(struct proc *child, struct proc *parent)
277 {
278
279 /* Dummy */
280 }
281
282 void
283 sched_lwp_fork(struct lwp *l1, struct lwp *l2)
284 {
285
286 KASSERT(l2->l_sched_info == NULL);
287 l2->l_sched_info = pool_get(&sil_pool, PR_WAITOK);
288 memset(l2->l_sched_info, 0, sizeof(sched_info_lwp_t));
289 }
290
291 void
292 sched_lwp_exit(struct lwp *l)
293 {
294
295 KASSERT(l->l_sched_info != NULL);
296 pool_put(&sil_pool, l->l_sched_info);
297 l->l_sched_info = NULL;
298 }
299
300 void
301 sched_lwp_collect(struct lwp *l)
302 {
303
304 }
305
306 void
307 sched_setrunnable(struct lwp *l)
308 {
309
310 /* Dummy */
311 }
312
313 void
314 sched_schedclock(struct lwp *l)
315 {
316
317 /* Dummy */
318 }
319
320 /*
321 * Priorities and time-slice.
322 */
323
324 void
325 sched_nice(struct proc *p, int prio)
326 {
327
328 /* TODO: implement as SCHED_IA */
329 }
330
331 /* Recalculate the time-slice */
332 static inline void
333 sched_newts(struct lwp *l)
334 {
335 sched_info_lwp_t *sil = l->l_sched_info;
336
337 sil->sl_timeslice = ts_map[lwp_eprio(l)];
338 }
339
340 /*
341 * Control of the runqueue.
342 */
343
344 static inline void *
345 sched_getrq(runqueue_t *ci_rq, const pri_t prio)
346 {
347
348 KASSERT(prio < PRI_COUNT);
349 return (prio <= PRI_HIGHEST_TS) ?
350 &ci_rq->r_ts_queue[prio].q_head :
351 &ci_rq->r_rt_queue[prio - PRI_HIGHEST_TS - 1].q_head;
352 }
353
354 void
355 sched_enqueue(struct lwp *l, bool swtch)
356 {
357 runqueue_t *ci_rq;
358 sched_info_lwp_t *sil = l->l_sched_info;
359 TAILQ_HEAD(, lwp) *q_head;
360 const pri_t eprio = lwp_eprio(l);
361
362 ci_rq = l->l_cpu->ci_schedstate.spc_sched_info;
363 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
364
365 /* Update the last run time on switch */
366 if (__predict_true(swtch == true)) {
367 sil->sl_lrtime = hardclock_ticks;
368 sil->sl_rtsum += (hardclock_ticks - sil->sl_rtime);
369 } else if (sil->sl_lrtime == 0)
370 sil->sl_lrtime = hardclock_ticks;
371
372 /* Enqueue the thread */
373 q_head = sched_getrq(ci_rq, eprio);
374 if (TAILQ_EMPTY(q_head)) {
375 u_int i;
376 uint32_t q;
377
378 /* Mark bit */
379 i = eprio >> BITMAP_SHIFT;
380 q = BITMAP_MSB >> (eprio & BITMAP_MASK);
381 KASSERT((ci_rq->r_bitmap[i] & q) == 0);
382 ci_rq->r_bitmap[i] |= q;
383 }
384 TAILQ_INSERT_TAIL(q_head, l, l_runq);
385 ci_rq->r_count++;
386 if ((l->l_flag & LW_BOUND) == 0)
387 ci_rq->r_mcount++;
388
389 /*
390 * Update the value of highest priority in the runqueue,
391 * if priority of this thread is higher.
392 */
393 if (eprio > ci_rq->r_highest_pri)
394 ci_rq->r_highest_pri = eprio;
395
396 sched_newts(l);
397 }
398
399 void
400 sched_dequeue(struct lwp *l)
401 {
402 runqueue_t *ci_rq;
403 TAILQ_HEAD(, lwp) *q_head;
404 const pri_t eprio = lwp_eprio(l);
405
406 ci_rq = l->l_cpu->ci_schedstate.spc_sched_info;
407 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
408
409 KASSERT(eprio <= ci_rq->r_highest_pri);
410 KASSERT(ci_rq->r_bitmap[eprio >> BITMAP_SHIFT] != 0);
411 KASSERT(ci_rq->r_count > 0);
412
413 ci_rq->r_count--;
414 if ((l->l_flag & LW_BOUND) == 0)
415 ci_rq->r_mcount--;
416
417 q_head = sched_getrq(ci_rq, eprio);
418 TAILQ_REMOVE(q_head, l, l_runq);
419 if (TAILQ_EMPTY(q_head)) {
420 u_int i;
421 uint32_t q;
422
423 /* Unmark bit */
424 i = eprio >> BITMAP_SHIFT;
425 q = BITMAP_MSB >> (eprio & BITMAP_MASK);
426 KASSERT((ci_rq->r_bitmap[i] & q) != 0);
427 ci_rq->r_bitmap[i] &= ~q;
428
429 /*
430 * Update the value of highest priority in the runqueue, in a
431 * case it was a last thread in the queue of highest priority.
432 */
433 if (eprio != ci_rq->r_highest_pri)
434 return;
435
436 do {
437 q = ffs(ci_rq->r_bitmap[i]);
438 if (q) {
439 ci_rq->r_highest_pri =
440 (i << BITMAP_SHIFT) + (BITMAP_BITS - q);
441 return;
442 }
443 } while (i--);
444
445 /* If not found - set the lowest value */
446 ci_rq->r_highest_pri = 0;
447 }
448 }
449
450 void
451 sched_slept(struct lwp *l)
452 {
453 sched_info_lwp_t *sil = l->l_sched_info;
454
455 /* Save the time when thread has slept */
456 sil->sl_slept = hardclock_ticks;
457
458 /*
459 * If thread is in time-sharing queue and batch flag is not marked,
460 * increase the the priority, and run with the lower time-quantum.
461 */
462 if (l->l_priority < PRI_HIGHEST_TS &&
463 (sil->sl_flags & SL_BATCH) == 0) {
464 KASSERT(l->l_class == SCHED_OTHER);
465 l->l_priority++;
466 }
467 }
468
469 void
470 sched_wakeup(struct lwp *l)
471 {
472 sched_info_lwp_t *sil = l->l_sched_info;
473
474 /* Update sleep time delta */
475 sil->sl_slpsum += (l->l_slptime == 0) ?
476 (hardclock_ticks - sil->sl_slept) : hz;
477
478 /* If thread was sleeping a second or more - set a high priority */
479 if (l->l_slptime > 1 || (hardclock_ticks - sil->sl_slept) >= hz)
480 l->l_priority = high_pri[l->l_priority];
481
482 /* Also, consider looking for a better CPU to wake up */
483 if ((l->l_flag & (LW_BOUND | LW_SYSTEM)) == 0)
484 l->l_cpu = sched_takecpu(l);
485 }
486
487 void
488 sched_pstats_hook(struct lwp *l)
489 {
490 sched_info_lwp_t *sil = l->l_sched_info;
491 pri_t prio;
492 bool batch;
493
494 if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
495 l->l_stat == LSSUSPENDED)
496 l->l_slptime++;
497
498 /*
499 * Set that thread is more CPU-bound, if sum of run time exceeds the
500 * sum of sleep time. Check if thread is CPU-bound a first time.
501 */
502 batch = (sil->sl_rtsum > sil->sl_slpsum);
503 if (batch) {
504 if ((sil->sl_flags & SL_BATCH) == 0)
505 batch = false;
506 sil->sl_flags |= SL_BATCH;
507 } else
508 sil->sl_flags &= ~SL_BATCH;
509
510 /* Reset the time sums */
511 sil->sl_slpsum = 0;
512 sil->sl_rtsum = 0;
513
514 /* Estimate threads on time-sharing queue only */
515 if (l->l_priority >= PRI_HIGHEST_TS)
516 return;
517 KASSERT(l->l_class == SCHED_OTHER);
518
519 /* If it is CPU-bound not a first time - decrease the priority */
520 prio = l->l_priority;
521 if (batch && prio != 0)
522 prio--;
523
524 /* If thread was not ran a second or more - set a high priority */
525 if (l->l_stat == LSRUN) {
526 if (sil->sl_lrtime && (hardclock_ticks - sil->sl_lrtime >= hz))
527 prio = high_pri[prio];
528 /* Re-enqueue the thread if priority has changed */
529 if (prio != l->l_priority)
530 lwp_changepri(l, prio);
531 } else {
532 /* In other states, change the priority directly */
533 l->l_priority = prio;
534 }
535 }
536
537 /*
538 * Migration and balancing.
539 */
540
541 #ifdef MULTIPROCESSOR
542
543 /* Estimate if LWP is cache-hot */
544 static inline bool
545 lwp_cache_hot(const struct lwp *l)
546 {
547 const sched_info_lwp_t *sil = l->l_sched_info;
548
549 if (l->l_slptime || sil->sl_lrtime == 0)
550 return false;
551
552 return (hardclock_ticks - sil->sl_lrtime < cacheht_time);
553 }
554
555 /* Check if LWP can migrate to the chosen CPU */
556 static inline bool
557 sched_migratable(const struct lwp *l, struct cpu_info *ci)
558 {
559 const struct schedstate_percpu *spc = &ci->ci_schedstate;
560
561 /* CPU is offline */
562 if (__predict_false(spc->spc_flags & SPCF_OFFLINE))
563 return false;
564
565 /* Affinity bind */
566 if (__predict_false(l->l_flag & LW_AFFINITY))
567 return CPU_ISSET(cpu_index(ci), &l->l_affinity);
568
569 /* Processor-set */
570 return (spc->spc_psid == l->l_psid);
571 }
572
573 /*
574 * Estimate the migration of LWP to the other CPU.
575 * Take and return the CPU, if migration is needed.
576 */
577 struct cpu_info *
578 sched_takecpu(struct lwp *l)
579 {
580 struct cpu_info *ci, *tci;
581 struct schedstate_percpu *spc;
582 runqueue_t *ci_rq;
583 CPU_INFO_ITERATOR cii;
584 pri_t eprio, lpri;
585
586 KASSERT(lwp_locked(l, NULL));
587
588 ci = l->l_cpu;
589 spc = &ci->ci_schedstate;
590 ci_rq = spc->spc_sched_info;
591
592 /* If thread is strictly bound, do not estimate other CPUs */
593 if (l->l_flag & LW_BOUND)
594 return ci;
595
596 /* CPU of this thread is idling - run there */
597 if (ci_rq->r_count == 0)
598 return ci;
599
600 eprio = lwp_eprio(l);
601
602 /* Stay if thread is cache-hot */
603 if (__predict_true(l->l_stat != LSIDL) &&
604 lwp_cache_hot(l) && eprio >= spc->spc_curpriority)
605 return ci;
606
607 /* Run on current CPU if priority of thread is higher */
608 ci = curcpu();
609 spc = &ci->ci_schedstate;
610 if (eprio > spc->spc_curpriority && sched_migratable(l, ci))
611 return ci;
612
613 /*
614 * Look for the CPU with the lowest priority thread. In case of
615 * equal the priority - check the lower count of the threads.
616 */
617 tci = l->l_cpu;
618 lpri = PRI_COUNT;
619 for (CPU_INFO_FOREACH(cii, ci)) {
620 runqueue_t *ici_rq;
621 pri_t pri;
622
623 spc = &ci->ci_schedstate;
624 ici_rq = spc->spc_sched_info;
625 pri = max(spc->spc_curpriority, ici_rq->r_highest_pri);
626 if (pri > lpri)
627 continue;
628
629 if (pri == lpri && ci_rq->r_count < ici_rq->r_count)
630 continue;
631
632 if (!sched_migratable(l, ci))
633 continue;
634
635 lpri = pri;
636 tci = ci;
637 ci_rq = ici_rq;
638 }
639 return tci;
640 }
641
642 /*
643 * Tries to catch an LWP from the runqueue of other CPU.
644 */
645 static struct lwp *
646 sched_catchlwp(void)
647 {
648 struct cpu_info *curci = curcpu(), *ci = worker_ci;
649 TAILQ_HEAD(, lwp) *q_head;
650 runqueue_t *ci_rq;
651 struct lwp *l;
652
653 if (curci == ci)
654 return NULL;
655
656 /* Lockless check */
657 ci_rq = ci->ci_schedstate.spc_sched_info;
658 if (ci_rq->r_count < min_catch)
659 return NULL;
660
661 /*
662 * Double-lock the runqueues.
663 */
664 if (curci < ci) {
665 spc_lock(ci);
666 } else if (!mutex_tryenter(ci->ci_schedstate.spc_mutex)) {
667 const runqueue_t *cur_rq = curci->ci_schedstate.spc_sched_info;
668
669 spc_unlock(curci);
670 spc_lock(ci);
671 spc_lock(curci);
672
673 if (cur_rq->r_count) {
674 spc_unlock(ci);
675 return NULL;
676 }
677 }
678
679 if (ci_rq->r_count < min_catch) {
680 spc_unlock(ci);
681 return NULL;
682 }
683
684 /* Take the highest priority thread */
685 q_head = sched_getrq(ci_rq, ci_rq->r_highest_pri);
686 l = TAILQ_FIRST(q_head);
687
688 for (;;) {
689 /* Check the first and next result from the queue */
690 if (l == NULL)
691 break;
692
693 /* Look for threads, whose are allowed to migrate */
694 if ((l->l_flag & LW_SYSTEM) || lwp_cache_hot(l) ||
695 !sched_migratable(l, curci)) {
696 l = TAILQ_NEXT(l, l_runq);
697 continue;
698 }
699 /* Recheck if chosen thread is still on the runqueue */
700 if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM)) {
701 sched_dequeue(l);
702 l->l_cpu = curci;
703 lwp_setlock(l, curci->ci_schedstate.spc_mutex);
704 sched_enqueue(l, false);
705 break;
706 }
707 l = TAILQ_NEXT(l, l_runq);
708 }
709 spc_unlock(ci);
710
711 return l;
712 }
713
714 /*
715 * Periodical calculations for balancing.
716 */
717 static void
718 sched_balance(void *nocallout)
719 {
720 struct cpu_info *ci, *hci;
721 runqueue_t *ci_rq;
722 CPU_INFO_ITERATOR cii;
723 u_int highest;
724
725 hci = curcpu();
726 highest = 0;
727
728 /* Make lockless countings */
729 for (CPU_INFO_FOREACH(cii, ci)) {
730 ci_rq = ci->ci_schedstate.spc_sched_info;
731
732 /* Average count of the threads */
733 ci_rq->r_avgcount = (ci_rq->r_avgcount + ci_rq->r_mcount) >> 1;
734
735 /* Look for CPU with the highest average */
736 if (ci_rq->r_avgcount > highest) {
737 hci = ci;
738 highest = ci_rq->r_avgcount;
739 }
740 }
741
742 /* Update the worker */
743 worker_ci = hci;
744
745 if (nocallout == NULL)
746 callout_schedule(&balance_ch, balance_period);
747 }
748
749 #else
750
751 struct cpu_info *
752 sched_takecpu(struct lwp *l)
753 {
754
755 return l->l_cpu;
756 }
757
758 #endif /* MULTIPROCESSOR */
759
760 /*
761 * Scheduler mill.
762 */
763 struct lwp *
764 sched_nextlwp(void)
765 {
766 struct cpu_info *ci = curcpu();
767 struct schedstate_percpu *spc;
768 TAILQ_HEAD(, lwp) *q_head;
769 sched_info_lwp_t *sil;
770 runqueue_t *ci_rq;
771 struct lwp *l;
772
773 spc = &ci->ci_schedstate;
774 ci_rq = ci->ci_schedstate.spc_sched_info;
775
776 #ifdef MULTIPROCESSOR
777 /* If runqueue is empty, try to catch some thread from other CPU */
778 if (__predict_false(spc->spc_flags & SPCF_OFFLINE)) {
779 if ((ci_rq->r_count - ci_rq->r_mcount) == 0)
780 return NULL;
781 } else if (ci_rq->r_count == 0) {
782 /* Reset the counter, and call the balancer */
783 ci_rq->r_avgcount = 0;
784 sched_balance(ci);
785
786 /* The re-locking will be done inside */
787 return sched_catchlwp();
788 }
789 #else
790 if (ci_rq->r_count == 0)
791 return NULL;
792 #endif
793
794 /* Take the highest priority thread */
795 KASSERT(ci_rq->r_bitmap[ci_rq->r_highest_pri >> BITMAP_SHIFT]);
796 q_head = sched_getrq(ci_rq, ci_rq->r_highest_pri);
797 l = TAILQ_FIRST(q_head);
798 KASSERT(l != NULL);
799
800 /* Update the counters */
801 sil = l->l_sched_info;
802 KASSERT(sil->sl_timeslice >= min_ts);
803 KASSERT(sil->sl_timeslice <= max_ts);
804 spc->spc_ticks = sil->sl_timeslice;
805 sil->sl_rtime = hardclock_ticks;
806
807 return l;
808 }
809
810 bool
811 sched_curcpu_runnable_p(void)
812 {
813 const struct cpu_info *ci = curcpu();
814 const runqueue_t *ci_rq = ci->ci_schedstate.spc_sched_info;
815
816 #ifndef __HAVE_FAST_SOFTINTS
817 if (ci->ci_data.cpu_softints)
818 return true;
819 #endif
820
821 if (ci->ci_schedstate.spc_flags & SPCF_OFFLINE)
822 return (ci_rq->r_count - ci_rq->r_mcount);
823
824 return ci_rq->r_count;
825 }
826
827 /*
828 * Time-driven events.
829 */
830
831 /*
832 * Called once per time-quantum. This routine is CPU-local and runs at
833 * IPL_SCHED, thus the locking is not needed.
834 */
835 void
836 sched_tick(struct cpu_info *ci)
837 {
838 const runqueue_t *ci_rq = ci->ci_schedstate.spc_sched_info;
839 struct schedstate_percpu *spc = &ci->ci_schedstate;
840 struct lwp *l = curlwp;
841 const sched_info_lwp_t *sil = l->l_sched_info;
842
843 if (CURCPU_IDLE_P())
844 return;
845
846 switch (l->l_class) {
847 case SCHED_FIFO:
848 /*
849 * Update the time-quantum, and continue running,
850 * if thread runs on FIFO real-time policy.
851 */
852 KASSERT(l->l_priority > PRI_HIGHEST_TS);
853 spc->spc_ticks = sil->sl_timeslice;
854 return;
855 case SCHED_OTHER:
856 /*
857 * If thread is in time-sharing queue, decrease the priority,
858 * and run with a higher time-quantum.
859 */
860 KASSERT(l->l_priority <= PRI_HIGHEST_TS);
861 if (l->l_priority != 0)
862 l->l_priority--;
863 break;
864 }
865
866 /*
867 * If there are higher priority threads or threads in the same queue,
868 * mark that thread should yield, otherwise, continue running.
869 */
870 if (lwp_eprio(l) <= ci_rq->r_highest_pri || l->l_target_cpu) {
871 spc->spc_flags |= SPCF_SHOULDYIELD;
872 cpu_need_resched(ci, 0);
873 } else
874 spc->spc_ticks = sil->sl_timeslice;
875 }
876
877 /*
878 * Sysctl nodes and initialization.
879 */
880
881 static int
882 sysctl_sched_rtts(SYSCTLFN_ARGS)
883 {
884 struct sysctlnode node;
885 int rttsms = hztoms(rt_ts);
886
887 node = *rnode;
888 node.sysctl_data = &rttsms;
889 return sysctl_lookup(SYSCTLFN_CALL(&node));
890 }
891
892 static int
893 sysctl_sched_mints(SYSCTLFN_ARGS)
894 {
895 struct sysctlnode node;
896 struct cpu_info *ci;
897 int error, newsize;
898 CPU_INFO_ITERATOR cii;
899
900 node = *rnode;
901 node.sysctl_data = &newsize;
902
903 newsize = hztoms(min_ts);
904 error = sysctl_lookup(SYSCTLFN_CALL(&node));
905 if (error || newp == NULL)
906 return error;
907
908 newsize = mstohz(newsize);
909 if (newsize < 1 || newsize > hz || newsize >= max_ts)
910 return EINVAL;
911
912 /* It is safe to do this in such order */
913 for (CPU_INFO_FOREACH(cii, ci))
914 spc_lock(ci);
915
916 min_ts = newsize;
917 sched_precalcts();
918
919 for (CPU_INFO_FOREACH(cii, ci))
920 spc_unlock(ci);
921
922 return 0;
923 }
924
925 static int
926 sysctl_sched_maxts(SYSCTLFN_ARGS)
927 {
928 struct sysctlnode node;
929 struct cpu_info *ci;
930 int error, newsize;
931 CPU_INFO_ITERATOR cii;
932
933 node = *rnode;
934 node.sysctl_data = &newsize;
935
936 newsize = hztoms(max_ts);
937 error = sysctl_lookup(SYSCTLFN_CALL(&node));
938 if (error || newp == NULL)
939 return error;
940
941 newsize = mstohz(newsize);
942 if (newsize < 10 || newsize > hz || newsize <= min_ts)
943 return EINVAL;
944
945 /* It is safe to do this in such order */
946 for (CPU_INFO_FOREACH(cii, ci))
947 spc_lock(ci);
948
949 max_ts = newsize;
950 sched_precalcts();
951
952 for (CPU_INFO_FOREACH(cii, ci))
953 spc_unlock(ci);
954
955 return 0;
956 }
957
958 SYSCTL_SETUP(sysctl_sched_setup, "sysctl kern.sched subtree setup")
959 {
960 const struct sysctlnode *node = NULL;
961
962 sysctl_createv(clog, 0, NULL, NULL,
963 CTLFLAG_PERMANENT,
964 CTLTYPE_NODE, "kern", NULL,
965 NULL, 0, NULL, 0,
966 CTL_KERN, CTL_EOL);
967 sysctl_createv(clog, 0, NULL, &node,
968 CTLFLAG_PERMANENT,
969 CTLTYPE_NODE, "sched",
970 SYSCTL_DESCR("Scheduler options"),
971 NULL, 0, NULL, 0,
972 CTL_KERN, CTL_CREATE, CTL_EOL);
973
974 if (node == NULL)
975 return;
976
977 sysctl_createv(clog, 0, &node, NULL,
978 CTLFLAG_PERMANENT,
979 CTLTYPE_STRING, "name", NULL,
980 NULL, 0, __UNCONST("M2"), 0,
981 CTL_CREATE, CTL_EOL);
982 sysctl_createv(clog, 0, &node, NULL,
983 CTLFLAG_PERMANENT,
984 CTLTYPE_INT, "rtts",
985 SYSCTL_DESCR("Round-robin time quantum (in miliseconds)"),
986 sysctl_sched_rtts, 0, NULL, 0,
987 CTL_CREATE, CTL_EOL);
988 sysctl_createv(clog, 0, &node, NULL,
989 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
990 CTLTYPE_INT, "maxts",
991 SYSCTL_DESCR("Maximal time quantum (in miliseconds)"),
992 sysctl_sched_maxts, 0, &max_ts, 0,
993 CTL_CREATE, CTL_EOL);
994 sysctl_createv(clog, 0, &node, NULL,
995 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
996 CTLTYPE_INT, "mints",
997 SYSCTL_DESCR("Minimal time quantum (in miliseconds)"),
998 sysctl_sched_mints, 0, &min_ts, 0,
999 CTL_CREATE, CTL_EOL);
1000
1001 #ifdef MULTIPROCESSOR
1002 sysctl_createv(clog, 0, &node, NULL,
1003 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1004 CTLTYPE_INT, "cacheht_time",
1005 SYSCTL_DESCR("Cache hotness time (in ticks)"),
1006 NULL, 0, &cacheht_time, 0,
1007 CTL_CREATE, CTL_EOL);
1008 sysctl_createv(clog, 0, &node, NULL,
1009 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1010 CTLTYPE_INT, "balance_period",
1011 SYSCTL_DESCR("Balance period (in ticks)"),
1012 NULL, 0, &balance_period, 0,
1013 CTL_CREATE, CTL_EOL);
1014 sysctl_createv(clog, 0, &node, NULL,
1015 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1016 CTLTYPE_INT, "min_catch",
1017 SYSCTL_DESCR("Minimal count of the threads for catching"),
1018 NULL, 0, &min_catch, 0,
1019 CTL_CREATE, CTL_EOL);
1020 #endif
1021 }
1022
1023 /*
1024 * Debugging.
1025 */
1026
1027 #ifdef DDB
1028
1029 void
1030 sched_print_runqueue(void (*pr)(const char *, ...))
1031 {
1032 runqueue_t *ci_rq;
1033 sched_info_lwp_t *sil;
1034 struct lwp *l;
1035 struct proc *p;
1036 int i;
1037
1038 struct cpu_info *ci;
1039 CPU_INFO_ITERATOR cii;
1040
1041 for (CPU_INFO_FOREACH(cii, ci)) {
1042 ci_rq = ci->ci_schedstate.spc_sched_info;
1043
1044 (*pr)("Run-queue (CPU = %d):\n", ci->ci_cpuid);
1045 (*pr)(" pid.lid = %d.%d, threads count = %u, "
1046 "avgcount = %u, highest pri = %d\n",
1047 ci->ci_curlwp->l_proc->p_pid, ci->ci_curlwp->l_lid,
1048 ci_rq->r_count, ci_rq->r_avgcount, ci_rq->r_highest_pri);
1049 i = (PRI_COUNT >> BITMAP_SHIFT) - 1;
1050 do {
1051 uint32_t q;
1052 q = ci_rq->r_bitmap[i];
1053 (*pr)(" bitmap[%d] => [ %d (0x%x) ]\n", i, ffs(q), q);
1054 } while (i--);
1055 }
1056
1057 (*pr)(" %5s %4s %4s %10s %3s %4s %11s %3s %s\n",
1058 "LID", "PRI", "EPRI", "FL", "ST", "TS", "LWP", "CPU", "LRTIME");
1059
1060 PROCLIST_FOREACH(p, &allproc) {
1061 (*pr)(" /- %d (%s)\n", (int)p->p_pid, p->p_comm);
1062 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1063 sil = l->l_sched_info;
1064 ci = l->l_cpu;
1065 (*pr)(" | %5d %4u %4u 0x%8.8x %3s %4u %11p %3d "
1066 "%u ST=%d RT=%d %d\n",
1067 (int)l->l_lid, l->l_priority, lwp_eprio(l),
1068 l->l_flag, l->l_stat == LSRUN ? "RQ" :
1069 (l->l_stat == LSSLEEP ? "SQ" : "-"),
1070 sil->sl_timeslice, l, ci->ci_cpuid,
1071 (u_int)(hardclock_ticks - sil->sl_lrtime),
1072 sil->sl_slpsum, sil->sl_rtsum, sil->sl_flags);
1073 }
1074 }
1075 }
1076
1077 #endif
1078