sched_m2.c revision 1.1 1 /* $NetBSD: sched_m2.c,v 1.1 2007/10/09 19:00:15 rmind Exp $ */
2
3 /*
4 * Copyright (c) 2007, Mindaugas Rasiukevicius
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
17 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
19 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 /*
29 * TODO:
30 * - Implementation of fair share queue;
31 * - Support for NUMA;
32 */
33
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: sched_m2.c,v 1.1 2007/10/09 19:00:15 rmind Exp $");
36
37 #include <sys/param.h>
38
39 #include <sys/cpu.h>
40 #include <sys/callout.h>
41 #include <sys/errno.h>
42 #include <sys/kernel.h>
43 #include <sys/kmem.h>
44 #include <sys/lwp.h>
45 #include <sys/mutex.h>
46 #include <sys/pool.h>
47 #include <sys/proc.h>
48 #include <sys/resource.h>
49 #include <sys/resourcevar.h>
50 #include <sys/sched.h>
51 #include <sys/syscallargs.h>
52 #include <sys/sysctl.h>
53 #include <sys/types.h>
54
55 #include <machine/cpu.h>
56
57 /*
58 * XXX: Some defintions below will dissapear
59 * XXX: with the merge of vmlocking branch.
60 */
61 #define PRI_MAX MAXPRI
62 #define PRI_COUNT (PRI_MAX + 1) /* 0 .. 127 -> 128 */
63 #define PRI_RT_COUNT (50) /* 0 .. 49 -> 50 */
64 #define PRI_TS_COUNT (PRI_COUNT - PRI_RT_COUNT) /* 50 .. 127 -> 78 */
65
66 #define PRI_DEFAULT 70 /* 70 */
67 #define PRI_REALTIME 50 /* 50 */
68 #define PRI_HTS_RANGE 10 /* 50 .. 60 -> 10 */
69
70 /*
71 * Bits per map.
72 */
73 #define BITMAP_SHIFT 5 /* 32 bits */
74 #define BITMAP_SIZE PRI_COUNT >> BITMAP_SHIFT
75
76 /*
77 * Time-slices and priorities.
78 */
79 static u_int min_ts; /* Minimal time-slice */
80 static u_int max_ts; /* Maximal time-slice */
81 static u_int rt_ts; /* Real-time time-slice */
82 static u_int ts_map[PRI_COUNT]; /* Map of time-slices */
83 static pri_t high_pri[PRI_COUNT]; /* Map for priority increase */
84
85 /*
86 * Migration and balancing.
87 */
88 #ifdef MULTIPROCESSOR
89 static u_int cacheht_time; /* Cache hotness time */
90 static u_int min_catch; /* Minimal LWP count for catching */
91
92 static u_int balance_period; /* Balance period */
93 static struct callout balance_ch; /* Callout of balancer */
94
95 static struct cpu_info * volatile worker_ci;
96
97 #define CACHE_HOT(sil) (sil->sl_lrtime && \
98 (hardclock_ticks - sil->sl_lrtime < cacheht_time))
99
100 #endif
101
102 /*
103 * Structures, runqueue.
104 */
105
106 typedef struct {
107 TAILQ_HEAD(, lwp) q_head;
108 } queue_t;
109
110 typedef struct {
111 /* Lock and bitmap */
112 kmutex_t r_rq_mutex;
113 uint32_t r_bitmap[BITMAP_SIZE];
114 /* Counters */
115 u_int r_count; /* Count of the threads */
116 pri_t r_highest_pri; /* Highest priority */
117 u_int r_avgcount; /* Average count of threads */
118 u_int r_mcount; /* Count of migratable threads */
119 /* Runqueues */
120 queue_t r_rt_queue[PRI_RT_COUNT];
121 queue_t r_ts_queue[PRI_TS_COUNT];
122 } runqueue_t;
123
124 typedef struct {
125 u_int sl_flags;
126 u_int sl_timeslice; /* Time-slice of thread */
127 u_int sl_slept; /* Saved sleep time for sleep sum */
128 u_int sl_slpsum; /* Sum of sleep time */
129 u_int sl_rtime; /* Saved start time of run */
130 u_int sl_rtsum; /* Sum of the run time */
131 u_int sl_lrtime; /* Last run time */
132 } sched_info_lwp_t;
133
134 /* Flags */
135 #define SL_BATCH 0x01
136
137 /* Pool of the scheduler-specific structures for threads */
138 static struct pool sil_pool;
139
140 /*
141 * Prototypes.
142 */
143
144 static inline void * sched_getrq(runqueue_t *, const pri_t);
145 static inline void sched_newts(struct lwp *);
146 static void sched_precalcts(void);
147
148 #ifdef MULTIPROCESSOR
149 static struct lwp * sched_catchlwp(void);
150 static void sched_balance(void *);
151 #endif
152
153 /*
154 * Initialization and setup.
155 */
156
157 void
158 sched_rqinit(void)
159 {
160 struct cpu_info *ci = curcpu();
161
162 if (hz < 100) {
163 panic("sched_rqinit: value of HZ is too low\n");
164 }
165
166 /* Default timing ranges */
167 min_ts = mstohz(50); /* ~50ms */
168 max_ts = mstohz(150); /* ~150ms */
169 rt_ts = mstohz(100); /* ~100ms */
170 sched_precalcts();
171
172 #ifdef MULTIPROCESSOR
173 /* Balancing */
174 worker_ci = ci;
175 cacheht_time = mstohz(5); /* ~5 ms */
176 balance_period = mstohz(300); /* ~300ms */
177 min_catch = ~0;
178 #endif
179
180 /* Pool of the scheduler-specific structures */
181 pool_init(&sil_pool, sizeof(sched_info_lwp_t), 0, 0, 0,
182 "lwpsd", &pool_allocator_nointr, IPL_NONE);
183
184 /* Attach the primary CPU here */
185 sched_cpuattach(ci);
186
187 /* Initialize the scheduler structure of the primary LWP */
188 lwp0.l_mutex = &ci->ci_schedstate.spc_lwplock;
189 sched_lwp_fork(&lwp0);
190 sched_newts(&lwp0);
191 }
192
193 void
194 sched_setup(void)
195 {
196
197 #ifdef MULTIPROCESSOR
198 /* Minimal count of LWPs for catching: log2(count of CPUs) */
199 min_catch = min(ffs(ncpu) - 1, 4);
200
201 /* Initialize balancing callout and run it */
202 callout_init(&balance_ch, CALLOUT_MPSAFE);
203 callout_setfunc(&balance_ch, sched_balance, NULL);
204 callout_schedule(&balance_ch, balance_period);
205 #endif
206 }
207
208 void
209 sched_cpuattach(struct cpu_info *ci)
210 {
211 runqueue_t *ci_rq;
212 void *rq_ptr;
213 u_int i, size;
214
215 /*
216 * Allocate the run queue.
217 * XXX: Estimate cache behaviour more..
218 */
219 size = roundup(sizeof(runqueue_t), CACHE_LINE_SIZE) + CACHE_LINE_SIZE;
220 rq_ptr = kmem_zalloc(size, KM_NOSLEEP);
221 if (rq_ptr == NULL) {
222 panic("scheduler: could not allocate the runqueue");
223 }
224 /* XXX: Save the original pointer for future.. */
225 ci_rq = (void *)(roundup((intptr_t)(rq_ptr), CACHE_LINE_SIZE));
226
227 /* Initialize run queues */
228 mutex_init(&ci_rq->r_rq_mutex, MUTEX_SPIN, IPL_SCHED);
229 for (i = 0; i < PRI_RT_COUNT; i++)
230 TAILQ_INIT(&ci_rq->r_rt_queue[i].q_head);
231 for (i = 0; i < PRI_TS_COUNT; i++)
232 TAILQ_INIT(&ci_rq->r_ts_queue[i].q_head);
233 ci_rq->r_highest_pri = PRI_MAX;
234
235 ci->ci_schedstate.spc_sched_info = ci_rq;
236 ci->ci_schedstate.spc_mutex = &ci_rq->r_rq_mutex;
237 }
238
239 /* Pre-calculate the time-slices for the priorities */
240 static void
241 sched_precalcts(void)
242 {
243 pri_t p;
244 u_int i;
245
246 for (p = 0; p < PRI_REALTIME; p++) {
247 ts_map[p] = rt_ts;
248 high_pri[p] = p;
249 }
250
251 for (p = PRI_REALTIME, i = 0; p < PRI_COUNT; p++, i++) {
252 ts_map[p] = min_ts +
253 (i * 100 / (PRI_TS_COUNT - 1) * (max_ts - min_ts) / 100);
254 high_pri[p] = PRI_REALTIME + (i * PRI_HTS_RANGE /
255 (PRI_MAX - PRI_REALTIME));
256 }
257 }
258
259 /*
260 * Hooks.
261 */
262
263 void
264 sched_proc_fork(struct proc *parent, struct proc *child)
265 {
266 struct lwp *l;
267
268 LIST_FOREACH(l, &child->p_lwps, l_sibling) {
269 lwp_lock(l);
270 sched_newts(l);
271 lwp_unlock(l);
272 }
273 }
274
275 void
276 sched_proc_exit(struct proc *child, struct proc *parent)
277 {
278
279 /* Dummy */
280 }
281
282 void
283 sched_lwp_fork(struct lwp *l)
284 {
285
286 KASSERT(l->l_sched_info == NULL);
287 l->l_sched_info = pool_get(&sil_pool, PR_WAITOK);
288 memset(l->l_sched_info, 0, sizeof(sched_info_lwp_t));
289 if (l->l_usrpri >= PRI_REALTIME) /* XXX: For now only.. */
290 l->l_usrpri = l->l_priority = PRI_DEFAULT;
291 }
292
293 void
294 sched_lwp_exit(struct lwp *l)
295 {
296
297 KASSERT(l->l_sched_info != NULL);
298 pool_put(&sil_pool, l->l_sched_info);
299 l->l_sched_info = NULL;
300 }
301
302 void
303 sched_setrunnable(struct lwp *l)
304 {
305
306 /* Dummy */
307 }
308
309 void
310 sched_schedclock(struct lwp *l)
311 {
312
313 /* Dummy */
314 }
315
316 /*
317 * Priorities and time-slice.
318 */
319
320 void
321 sched_nice(struct proc *p, int prio)
322 {
323 int nprio;
324 struct lwp *l;
325
326 KASSERT(mutex_owned(&p->p_stmutex));
327
328 p->p_nice = prio;
329 nprio = max(PRI_DEFAULT + p->p_nice, PRI_REALTIME);
330
331 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
332 lwp_lock(l);
333 lwp_changepri(l, nprio);
334 lwp_unlock(l);
335 }
336 }
337
338 /* Recalculate the time-slice */
339 static inline void
340 sched_newts(struct lwp *l)
341 {
342 sched_info_lwp_t *sil = l->l_sched_info;
343
344 sil->sl_timeslice = ts_map[lwp_eprio(l)];
345 }
346
347 /*
348 * Control of the runqueue.
349 */
350
351 static inline void *
352 sched_getrq(runqueue_t *ci_rq, const pri_t prio)
353 {
354
355 KASSERT(prio < PRI_COUNT);
356 return (prio < PRI_REALTIME) ?
357 &ci_rq->r_rt_queue[prio].q_head :
358 &ci_rq->r_ts_queue[prio - PRI_REALTIME].q_head;
359 }
360
361 void
362 sched_enqueue(struct lwp *l, bool swtch)
363 {
364 runqueue_t *ci_rq;
365 sched_info_lwp_t *sil = l->l_sched_info;
366 TAILQ_HEAD(, lwp) *q_head;
367 const pri_t eprio = lwp_eprio(l);
368
369 ci_rq = l->l_cpu->ci_schedstate.spc_sched_info;
370 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
371
372 /* Update the last run time on switch */
373 if (swtch == true) {
374 sil->sl_lrtime = hardclock_ticks;
375 sil->sl_rtsum += (hardclock_ticks - sil->sl_rtime);
376 } else
377 sil->sl_lrtime = 0;
378
379 /* Enqueue the thread */
380 q_head = sched_getrq(ci_rq, eprio);
381 if (TAILQ_EMPTY(q_head)) {
382 u_int i;
383 uint32_t q;
384
385 /* Mark bit */
386 i = eprio >> BITMAP_SHIFT;
387 q = eprio - (i << BITMAP_SHIFT);
388 KASSERT((ci_rq->r_bitmap[i] & (1 << q)) == 0);
389 ci_rq->r_bitmap[i] |= 1 << q;
390 }
391 TAILQ_INSERT_TAIL(q_head, l, l_runq);
392 ci_rq->r_count++;
393 if ((l->l_flag & LW_BOUND) == 0)
394 ci_rq->r_mcount++;
395
396 /*
397 * Update the value of highest priority in the runqueue,
398 * if priority of this thread is higher.
399 */
400 if (eprio < ci_rq->r_highest_pri)
401 ci_rq->r_highest_pri = eprio;
402
403 sched_newts(l);
404 }
405
406 void
407 sched_dequeue(struct lwp *l)
408 {
409 runqueue_t *ci_rq;
410 TAILQ_HEAD(, lwp) *q_head;
411 const pri_t eprio = lwp_eprio(l);
412
413 ci_rq = l->l_cpu->ci_schedstate.spc_sched_info;
414 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
415 KASSERT(ci_rq->r_highest_pri <= eprio);
416 KASSERT(ci_rq->r_bitmap[eprio >> BITMAP_SHIFT] != 0);
417 KASSERT(ci_rq->r_count > 0);
418
419 ci_rq->r_count--;
420 if ((l->l_flag & LW_BOUND) == 0)
421 ci_rq->r_mcount--;
422
423 q_head = sched_getrq(ci_rq, eprio);
424 TAILQ_REMOVE(q_head, l, l_runq);
425 if (TAILQ_EMPTY(q_head)) {
426 u_int i;
427 uint32_t q;
428
429 /* Unmark bit */
430 i = eprio >> BITMAP_SHIFT;
431 q = eprio - (i << BITMAP_SHIFT);
432 KASSERT((ci_rq->r_bitmap[i] & (1 << q)) != 0);
433 ci_rq->r_bitmap[i] &= ~(1 << q);
434
435 /*
436 * Update the value of highest priority in the runqueue, in a
437 * case it was a last thread in the queue of highest priority.
438 */
439 if (eprio != ci_rq->r_highest_pri)
440 return;
441
442 do {
443 q = ffs(ci_rq->r_bitmap[i]);
444 if (q) {
445 ci_rq->r_highest_pri =
446 (i << BITMAP_SHIFT) + q - 1;
447 return;
448 }
449 } while (++i < BITMAP_SIZE);
450
451 /* If not found - set the maximal value */
452 ci_rq->r_highest_pri = PRI_MAX;
453 }
454 }
455
456 void
457 sched_slept(struct lwp *l)
458 {
459 sched_info_lwp_t *sil = l->l_sched_info;
460
461 /* Save the time when thread has slept */
462 sil->sl_slept = hardclock_ticks;
463
464 /*
465 * If thread is not a real-time and batch flag is not marked,
466 * increase the the priority, and run with lower time-quantum.
467 */
468 if (l->l_usrpri > PRI_REALTIME && (sil->sl_flags & SL_BATCH) == 0)
469 l->l_usrpri--;
470 }
471
472 void
473 sched_wakeup(struct lwp *l)
474 {
475 sched_info_lwp_t *sil = l->l_sched_info;
476
477 /* Update sleep time delta */
478 sil->sl_slpsum += (l->l_slptime == 0) ?
479 (hardclock_ticks - sil->sl_slept) : hz;
480
481 /* If thread was sleeping a second or more - set a high priority */
482 if (l->l_slptime > 1 || (hardclock_ticks - sil->sl_slept) >= hz)
483 l->l_usrpri = l->l_priority = high_pri[l->l_usrpri];
484 KASSERT(sil->sl_slept > 0);
485
486 /* Also, consider looking for a better CPU to wake up */
487 if ((l->l_flag & (LW_BOUND | LW_SYSTEM)) == 0)
488 l->l_cpu = sched_takecpu(l);
489 }
490
491 void
492 sched_pstats_hook(struct lwp *l)
493 {
494 sched_info_lwp_t *sil = l->l_sched_info;
495
496 /*
497 * Set that thread is more CPU-bound, if sum of run time exceeds the
498 * sum of sleep time. If it is CPU-bound not a first time - decrease
499 * the priority.
500 */
501 if (sil->sl_rtsum > sil->sl_slpsum) {
502 if ((sil->sl_flags & SL_BATCH) && (l->l_usrpri < PRI_MAX))
503 l->l_usrpri++;
504 sil->sl_flags |= SL_BATCH;
505 } else {
506 sil->sl_flags &= ~SL_BATCH;
507 }
508 sil->sl_slpsum = 0;
509 sil->sl_rtsum = 0;
510
511 /*
512 * Estimate only threads on time-sharing run queue, also,
513 * ignore the highest time-sharing priority.
514 */
515 if (l->l_stat != LSRUN || l->l_usrpri <= PRI_REALTIME)
516 return;
517
518 /* If thread was not ran a second or more - set a high priority */
519 if (sil->sl_lrtime && (hardclock_ticks - sil->sl_lrtime >= hz))
520 lwp_changepri(l, high_pri[l->l_usrpri]);
521 }
522
523 /*
524 * Migration and balancing.
525 */
526
527 #ifdef MULTIPROCESSOR
528
529 /* Check if LWP can migrate to the chosen CPU */
530 static inline bool
531 sched_migratable(const struct lwp *l, const struct cpu_info *ci)
532 {
533
534 if (ci->ci_schedstate.spc_flags & SPCF_OFFLINE)
535 return false;
536
537 if ((l->l_flag & LW_BOUND) == 0)
538 return true;
539 #if 0
540 return cpu_in_pset(ci, l->l_psid);
541 #else
542 return false;
543 #endif
544 }
545
546 /*
547 * Estimate the migration of LWP to the other CPU.
548 * Take and return the CPU, if migration is needed.
549 */
550 struct cpu_info *
551 sched_takecpu(struct lwp *l)
552 {
553 struct cpu_info *ci, *tci = NULL;
554 struct schedstate_percpu *spc;
555 runqueue_t *ci_rq;
556 sched_info_lwp_t *sil;
557 CPU_INFO_ITERATOR cii;
558 pri_t eprio, lpri;
559
560 ci = l->l_cpu;
561 spc = &ci->ci_schedstate;
562 ci_rq = spc->spc_sched_info;
563
564 /* CPU of this thread is idling - run there */
565 if (ci_rq->r_count == 0)
566 return ci;
567
568 eprio = lwp_eprio(l);
569 sil = l->l_sched_info;
570
571 /* Stay if thread is cache-hot */
572 if (l->l_stat == LSSLEEP && l->l_slptime <= 1 &&
573 CACHE_HOT(sil) && eprio <= spc->spc_curpriority)
574 return ci;
575
576 /* Run on current CPU if priority of thread is higher */
577 ci = curcpu();
578 spc = &ci->ci_schedstate;
579 if (eprio < spc->spc_curpriority && sched_migratable(l, ci))
580 return ci;
581
582 /*
583 * Look for the CPU with the lowest priority thread. In case of
584 * equal the priority - check the lower count of the threads.
585 */
586 lpri = 0;
587 ci_rq = NULL;
588 tci = l->l_cpu;
589 for (CPU_INFO_FOREACH(cii, ci)) {
590 runqueue_t *ici_rq;
591 pri_t pri;
592
593 spc = &ci->ci_schedstate;
594 ici_rq = spc->spc_sched_info;
595 pri = min(spc->spc_curpriority, ici_rq->r_highest_pri);
596 if (pri < lpri)
597 continue;
598
599 if (pri == lpri && ci_rq && ci_rq->r_count < ici_rq->r_count)
600 continue;
601
602 if (sched_migratable(l, ci) == false)
603 continue;
604
605 lpri = pri;
606 tci = ci;
607 ci_rq = ici_rq;
608 }
609
610 return tci;
611 }
612
613 /*
614 * Tries to catch an LWP from the runqueue of other CPU.
615 */
616 static struct lwp *
617 sched_catchlwp(void)
618 {
619 struct cpu_info *curci = curcpu(), *ci = worker_ci;
620 TAILQ_HEAD(, lwp) *q_head;
621 runqueue_t *ci_rq;
622 struct lwp *l;
623
624 if (curci == ci)
625 return NULL;
626
627 /* Lockless check */
628 ci_rq = ci->ci_schedstate.spc_sched_info;
629 if (ci_rq->r_count < min_catch)
630 return NULL;
631
632 /*
633 * Double-lock the runqueues.
634 */
635 if (curci->ci_schedstate.spc_mutex < ci->ci_schedstate.spc_mutex) {
636 spc_lock(ci);
637 } else if (!mutex_tryenter(ci->ci_schedstate.spc_mutex)) {
638 const runqueue_t *cur_rq = curci->ci_schedstate.spc_sched_info;
639
640 spc_unlock(curci);
641 spc_lock(ci);
642 spc_lock(curci);
643
644 if (cur_rq->r_count) {
645 spc_unlock(ci);
646 return NULL;
647 }
648 }
649
650 if (ci_rq->r_count < min_catch) {
651 spc_unlock(ci);
652 return NULL;
653 }
654
655 /* Take the highest priority thread */
656 q_head = sched_getrq(ci_rq, ci_rq->r_highest_pri);
657 l = TAILQ_FIRST(q_head);
658
659 for (;;) {
660 sched_info_lwp_t *sil;
661
662 /* Check the first and next result from the queue */
663 if (l == NULL)
664 break;
665
666 /* Look for threads, whose are allowed to migrate */
667 sil = l->l_sched_info;
668 if ((l->l_flag & LW_SYSTEM) || CACHE_HOT(sil) ||
669 sched_migratable(l, curci) == false) {
670 l = TAILQ_NEXT(l, l_runq);
671 continue;
672 }
673 /* Recheck if chosen thread is still on the runqueue */
674 if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM)) {
675 sched_dequeue(l);
676 l->l_cpu = curci;
677 lwp_setlock(l, curci->ci_schedstate.spc_mutex);
678 sched_enqueue(l, false);
679 break;
680 }
681 l = TAILQ_NEXT(l, l_runq);
682 }
683 spc_unlock(ci);
684
685 return l;
686 }
687
688 /*
689 * Periodical calculations for balancing.
690 */
691 static void
692 sched_balance(void *nocallout)
693 {
694 struct cpu_info *ci, *hci;
695 runqueue_t *ci_rq;
696 CPU_INFO_ITERATOR cii;
697 u_int highest;
698
699 hci = curcpu();
700 highest = 0;
701
702 /* Make lockless countings */
703 for (CPU_INFO_FOREACH(cii, ci)) {
704 ci_rq = ci->ci_schedstate.spc_sched_info;
705
706 /* Average count of the threads */
707 ci_rq->r_avgcount = (ci_rq->r_avgcount + ci_rq->r_mcount) >> 1;
708
709 /* Look for CPU with the highest average */
710 if (ci_rq->r_avgcount > highest) {
711 hci = ci;
712 highest = ci_rq->r_avgcount;
713 }
714 }
715
716 /* Update the worker */
717 worker_ci = hci;
718
719 if (nocallout == NULL)
720 callout_schedule(&balance_ch, balance_period);
721 }
722
723 #else
724
725 struct cpu_info *
726 sched_takecpu(struct lwp *l)
727 {
728
729 return l->l_cpu;
730 }
731
732 #endif /* MULTIPROCESSOR */
733
734 /*
735 * Scheduler mill.
736 */
737 struct lwp *
738 sched_nextlwp(void)
739 {
740 struct cpu_info *ci = curcpu();
741 struct schedstate_percpu *spc;
742 TAILQ_HEAD(, lwp) *q_head;
743 sched_info_lwp_t *sil;
744 runqueue_t *ci_rq;
745 struct lwp *l;
746
747 spc = &ci->ci_schedstate;
748 ci_rq = ci->ci_schedstate.spc_sched_info;
749
750 #ifdef MULTIPROCESSOR
751 /* If runqueue is empty, try to catch some thread from other CPU */
752 if (spc->spc_flags & SPCF_OFFLINE) {
753 if (ci_rq->r_mcount == 0)
754 return NULL;
755 } else if (ci_rq->r_count == 0) {
756 /* Reset the counter, and call the balancer */
757 ci_rq->r_avgcount = 0;
758 sched_balance(ci);
759
760 /* The re-locking will be done inside */
761 return sched_catchlwp();
762 }
763 #else
764 if (ci_rq->r_count == 0)
765 return NULL;
766 #endif
767
768 /* Take the highest priority thread */
769 KASSERT(ci_rq->r_bitmap[ci_rq->r_highest_pri >> BITMAP_SHIFT]);
770 q_head = sched_getrq(ci_rq, ci_rq->r_highest_pri);
771 l = TAILQ_FIRST(q_head);
772 KASSERT(l != NULL);
773
774 /* Update the counters */
775 sil = l->l_sched_info;
776 KASSERT(sil->sl_timeslice >= min_ts);
777 KASSERT(sil->sl_timeslice <= max_ts);
778 spc->spc_ticks = sil->sl_timeslice;
779 sil->sl_rtime = hardclock_ticks;
780
781 return l;
782 }
783
784 bool
785 sched_curcpu_runnable_p(void)
786 {
787 const struct cpu_info *ci = curcpu();
788 const runqueue_t *ci_rq = ci->ci_schedstate.spc_sched_info;
789
790 if (ci->ci_schedstate.spc_flags & SPCF_OFFLINE)
791 return ci_rq->r_mcount;
792
793 return ci_rq->r_count;
794 }
795
796 /*
797 * Time-driven events.
798 */
799
800 /*
801 * Called once per time-quantum. This routine is CPU-local and runs at
802 * IPL_SCHED, thus the locking is not needed.
803 */
804 void
805 sched_tick(struct cpu_info *ci)
806 {
807 const runqueue_t *ci_rq = ci->ci_schedstate.spc_sched_info;
808 struct schedstate_percpu *spc = &ci->ci_schedstate;
809 struct lwp *l = curlwp;
810 sched_info_lwp_t *sil = l->l_sched_info;
811
812 /* Decrease the priority, and run with a higher time-quantum */
813 if (!CURCPU_IDLE_P() && l->l_policy == SCHED_OTHER) {
814 if (l->l_usrpri >= PRI_REALTIME) {
815 l->l_usrpri = min(l->l_usrpri + 1, PRI_MAX);
816 l->l_priority = l->l_usrpri;
817 }
818 }
819
820 /*
821 * Update the time-quantum, and continue running,
822 * if thread runs on FIFO real-time policy.
823 */
824 if (l->l_policy == SCHED_FIFO) {
825 spc->spc_ticks = sil->sl_timeslice;
826 return;
827 }
828
829 /*
830 * If there are higher priority threads with or threads in the same
831 * queue, mark that thread should yield, otherwise, continue running.
832 */
833 if (CURCPU_IDLE_P() || lwp_eprio(l) >= ci_rq->r_highest_pri) {
834 spc->spc_flags |= SPCF_SHOULDYIELD;
835 cpu_need_resched(ci, 0);
836 } else
837 spc->spc_ticks = sil->sl_timeslice;
838 }
839
840 /*
841 * Sysctl nodes and initialization.
842 */
843
844 static int
845 sysctl_sched_mints(SYSCTLFN_ARGS)
846 {
847 struct sysctlnode node;
848 struct cpu_info *ci;
849 int error, newsize;
850 CPU_INFO_ITERATOR cii;
851
852 node = *rnode;
853 node.sysctl_data = &newsize;
854
855 newsize = hztoms(min_ts);
856 error = sysctl_lookup(SYSCTLFN_CALL(&node));
857 if (error || newp == NULL)
858 return error;
859
860 if (newsize < 1 || newsize > hz || newsize >= max_ts)
861 return EINVAL;
862
863 /* It is safe to do this in such order */
864 for (CPU_INFO_FOREACH(cii, ci))
865 spc_lock(ci);
866
867 min_ts = mstohz(newsize);
868 sched_precalcts();
869
870 for (CPU_INFO_FOREACH(cii, ci))
871 spc_unlock(ci);
872
873 return 0;
874 }
875
876 static int
877 sysctl_sched_maxts(SYSCTLFN_ARGS)
878 {
879 struct sysctlnode node;
880 struct cpu_info *ci;
881 int error, newsize;
882 CPU_INFO_ITERATOR cii;
883
884 node = *rnode;
885 node.sysctl_data = &newsize;
886
887 newsize = hztoms(max_ts);
888 error = sysctl_lookup(SYSCTLFN_CALL(&node));
889 if (error || newp == NULL)
890 return error;
891
892 if (newsize < 10 || newsize > hz || newsize <= min_ts)
893 return EINVAL;
894
895 /* It is safe to do this in such order */
896 for (CPU_INFO_FOREACH(cii, ci))
897 spc_lock(ci);
898
899 max_ts = mstohz(newsize);
900 sched_precalcts();
901
902 for (CPU_INFO_FOREACH(cii, ci))
903 spc_unlock(ci);
904
905 return 0;
906 }
907
908 SYSCTL_SETUP(sysctl_sched_setup, "sysctl kern.sched subtree setup")
909 {
910 const struct sysctlnode *node = NULL;
911
912 sysctl_createv(clog, 0, NULL, NULL,
913 CTLFLAG_PERMANENT,
914 CTLTYPE_NODE, "kern", NULL,
915 NULL, 0, NULL, 0,
916 CTL_KERN, CTL_EOL);
917 sysctl_createv(clog, 0, NULL, &node,
918 CTLFLAG_PERMANENT,
919 CTLTYPE_NODE, "sched",
920 SYSCTL_DESCR("Scheduler options"),
921 NULL, 0, NULL, 0,
922 CTL_KERN, CTL_CREATE, CTL_EOL);
923
924 if (node == NULL)
925 return;
926
927 sysctl_createv(clog, 0, &node, NULL,
928 CTLFLAG_PERMANENT,
929 CTLTYPE_STRING, "name", NULL,
930 NULL, 0, __UNCONST("M2"), 0,
931 CTL_CREATE, CTL_EOL);
932 sysctl_createv(clog, 0, &node, NULL,
933 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
934 CTLTYPE_INT, "maxts",
935 SYSCTL_DESCR("Maximal time quantum (in microseconds)"),
936 sysctl_sched_maxts, 0, &max_ts, 0,
937 CTL_CREATE, CTL_EOL);
938 sysctl_createv(clog, 0, &node, NULL,
939 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
940 CTLTYPE_INT, "mints",
941 SYSCTL_DESCR("Minimal time quantum (in microseconds)"),
942 sysctl_sched_mints, 0, &min_ts, 0,
943 CTL_CREATE, CTL_EOL);
944
945 #ifdef MULTIPROCESSOR
946 sysctl_createv(clog, 0, &node, NULL,
947 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
948 CTLTYPE_INT, "cacheht_time",
949 SYSCTL_DESCR("Cache hotness time"),
950 NULL, 0, &cacheht_time, 0,
951 CTL_CREATE, CTL_EOL);
952 sysctl_createv(clog, 0, &node, NULL,
953 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
954 CTLTYPE_INT, "balance_period",
955 SYSCTL_DESCR("Balance period"),
956 NULL, 0, &balance_period, 0,
957 CTL_CREATE, CTL_EOL);
958 sysctl_createv(clog, 0, &node, NULL,
959 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
960 CTLTYPE_INT, "min_catch",
961 SYSCTL_DESCR("Minimal count of threads for catching"),
962 NULL, 0, &min_catch, 0,
963 CTL_CREATE, CTL_EOL);
964 #endif
965 }
966
967 /*
968 * Debugging.
969 */
970
971 #ifdef DDB
972
973 void
974 sched_print_runqueue(void (*pr)(const char *, ...))
975 {
976 runqueue_t *ci_rq;
977 sched_info_lwp_t *sil;
978 struct lwp *l;
979 struct proc *p;
980 int i;
981
982 struct cpu_info *ci;
983 CPU_INFO_ITERATOR cii;
984
985 for (CPU_INFO_FOREACH(cii, ci)) {
986 ci_rq = ci->ci_schedstate.spc_sched_info;
987
988 (*pr)("Run-queue (CPU = %d):\n", ci->ci_cpuid);
989 (*pr)(" pid.lid = %d.%d, threads count = %u, "
990 "avgcount = %u, highest pri = %d\n",
991 ci->ci_curlwp->l_proc->p_pid, ci->ci_curlwp->l_lid,
992 ci_rq->r_count, ci_rq->r_avgcount, ci_rq->r_highest_pri);
993 i = 0;
994 do {
995 int b;
996 b = ci_rq->r_bitmap[i];
997 (*pr)(" bitmap[%d] => [ %d (0x%x) ]\n", i, ffs(b), b);
998 } while (++i < BITMAP_SIZE);
999 }
1000
1001 (*pr)(" %5s %4s %4s %10s %3s %4s %11s %3s %s\n",
1002 "LID", "PRI", "UPRI", "FL", "ST", "TS", "LWP", "CPU", "LRTIME");
1003
1004 PROCLIST_FOREACH(p, &allproc) {
1005 (*pr)(" /- %d (%s)\n", (int)p->p_pid, p->p_comm);
1006 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1007 sil = l->l_sched_info;
1008 ci = l->l_cpu;
1009 (*pr)(" | %5d %4u %4u 0x%8.8x %3s %4u %11p %3d "
1010 "%u ST=%d RT=%d %d\n",
1011 (int)l->l_lid, l->l_priority, l->l_usrpri,
1012 l->l_flag, l->l_stat == LSRUN ? "RQ" :
1013 (l->l_stat == LSSLEEP ? "SQ" : "-"),
1014 sil->sl_timeslice, l, ci->ci_cpuid,
1015 (u_int)(hardclock_ticks - sil->sl_lrtime),
1016 sil->sl_slpsum, sil->sl_rtsum, sil->sl_flags);
1017 }
1018 }
1019 }
1020
1021 #endif /* defined(DDB) */
1022