sched_m2.c revision 1.11 1 /* $NetBSD: sched_m2.c,v 1.11 2007/11/07 03:07:14 rmind Exp $ */
2
3 /*
4 * Copyright (c) 2007, Mindaugas Rasiukevicius
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
17 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
19 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 /*
29 * TODO:
30 * - Implementation of fair share queue;
31 * - Support for NUMA;
32 */
33
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: sched_m2.c,v 1.11 2007/11/07 03:07:14 rmind Exp $");
36
37 #include <sys/param.h>
38
39 #include <sys/bitops.h>
40 #include <sys/cpu.h>
41 #include <sys/callout.h>
42 #include <sys/errno.h>
43 #include <sys/kernel.h>
44 #include <sys/kmem.h>
45 #include <sys/lwp.h>
46 #include <sys/mutex.h>
47 #include <sys/pool.h>
48 #include <sys/proc.h>
49 #include <sys/resource.h>
50 #include <sys/resourcevar.h>
51 #include <sys/sched.h>
52 #include <sys/syscallargs.h>
53 #include <sys/sysctl.h>
54 #include <sys/types.h>
55
56 /*
57 * Priority related defintions.
58 */
59 #define PRI_TS_COUNT (NPRI_USER)
60 #define PRI_RT_COUNT (PRI_COUNT - PRI_TS_COUNT)
61 #define PRI_HTS_RANGE (PRI_TS_COUNT / 10)
62
63 #define PRI_HIGHEST_TS (MAXPRI_USER)
64 #define PRI_DEFAULT (NPRI_USER >> 1)
65
66 const int schedppq = 1;
67
68 /*
69 * Bits per map.
70 */
71 #define BITMAP_BITS (32)
72 #define BITMAP_SHIFT (5)
73 #define BITMAP_MSB (0x80000000U)
74 #define BITMAP_MASK (BITMAP_BITS - 1)
75
76 /*
77 * Time-slices and priorities.
78 */
79 static u_int min_ts; /* Minimal time-slice */
80 static u_int max_ts; /* Maximal time-slice */
81 static u_int rt_ts; /* Real-time time-slice */
82 static u_int ts_map[PRI_COUNT]; /* Map of time-slices */
83 static pri_t high_pri[PRI_COUNT]; /* Map for priority increase */
84
85 /*
86 * Migration and balancing.
87 */
88 #ifdef MULTIPROCESSOR
89 static u_int cacheht_time; /* Cache hotness time */
90 static u_int min_catch; /* Minimal LWP count for catching */
91
92 static u_int balance_period; /* Balance period */
93 static struct callout balance_ch; /* Callout of balancer */
94
95 static struct cpu_info * volatile worker_ci;
96
97 #define CACHE_HOT(sil) (sil->sl_lrtime && \
98 (hardclock_ticks - sil->sl_lrtime < cacheht_time))
99
100 #endif
101
102 /*
103 * Structures, runqueue.
104 */
105
106 typedef struct {
107 TAILQ_HEAD(, lwp) q_head;
108 } queue_t;
109
110 typedef struct {
111 /* Lock and bitmap */
112 kmutex_t r_rq_mutex;
113 uint32_t r_bitmap[PRI_COUNT >> BITMAP_SHIFT];
114 /* Counters */
115 u_int r_count; /* Count of the threads */
116 pri_t r_highest_pri; /* Highest priority */
117 u_int r_avgcount; /* Average count of threads */
118 u_int r_mcount; /* Count of migratable threads */
119 /* Runqueues */
120 queue_t r_rt_queue[PRI_RT_COUNT];
121 queue_t r_ts_queue[PRI_TS_COUNT];
122 } runqueue_t;
123
124 typedef struct {
125 u_int sl_flags;
126 u_int sl_timeslice; /* Time-slice of thread */
127 u_int sl_slept; /* Saved sleep time for sleep sum */
128 u_int sl_slpsum; /* Sum of sleep time */
129 u_int sl_rtime; /* Saved start time of run */
130 u_int sl_rtsum; /* Sum of the run time */
131 u_int sl_lrtime; /* Last run time */
132 } sched_info_lwp_t;
133
134 /* Flags */
135 #define SL_BATCH 0x01
136
137 /* Pool of the scheduler-specific structures for threads */
138 static struct pool sil_pool;
139
140 /*
141 * Prototypes.
142 */
143
144 static inline void * sched_getrq(runqueue_t *, const pri_t);
145 static inline void sched_newts(struct lwp *);
146 static void sched_precalcts(void);
147
148 #ifdef MULTIPROCESSOR
149 static struct lwp * sched_catchlwp(void);
150 static void sched_balance(void *);
151 #endif
152
153 /*
154 * Initialization and setup.
155 */
156
157 void
158 sched_rqinit(void)
159 {
160 struct cpu_info *ci = curcpu();
161
162 if (hz < 100) {
163 panic("sched_rqinit: value of HZ is too low\n");
164 }
165
166 /* Default timing ranges */
167 min_ts = mstohz(50); /* ~50ms */
168 max_ts = mstohz(150); /* ~150ms */
169 rt_ts = mstohz(100); /* ~100ms */
170 sched_precalcts();
171
172 #ifdef MULTIPROCESSOR
173 /* Balancing */
174 worker_ci = ci;
175 cacheht_time = mstohz(5); /* ~5 ms */
176 balance_period = mstohz(300); /* ~300ms */
177 min_catch = ~0;
178 #endif
179
180 /* Pool of the scheduler-specific structures */
181 pool_init(&sil_pool, sizeof(sched_info_lwp_t), 0, 0, 0,
182 "lwpsd", &pool_allocator_nointr, IPL_NONE);
183
184 /* Attach the primary CPU here */
185 sched_cpuattach(ci);
186
187 /* Initialize the scheduler structure of the primary LWP */
188 lwp0.l_mutex = &ci->ci_schedstate.spc_lwplock;
189 sched_lwp_fork(NULL, &lwp0);
190 sched_newts(&lwp0);
191 }
192
193 void
194 sched_setup(void)
195 {
196
197 #ifdef MULTIPROCESSOR
198 /* Minimal count of LWPs for catching: log2(count of CPUs) */
199 min_catch = min(ilog2(ncpu), 4);
200
201 /* Initialize balancing callout and run it */
202 callout_init(&balance_ch, CALLOUT_MPSAFE);
203 callout_setfunc(&balance_ch, sched_balance, NULL);
204 callout_schedule(&balance_ch, balance_period);
205 #endif
206 }
207
208 void
209 sched_cpuattach(struct cpu_info *ci)
210 {
211 runqueue_t *ci_rq;
212 void *rq_ptr;
213 u_int i, size;
214
215 /*
216 * Allocate the run queue.
217 * XXX: Estimate cache behaviour more..
218 */
219 size = roundup(sizeof(runqueue_t), CACHE_LINE_SIZE) + CACHE_LINE_SIZE;
220 rq_ptr = kmem_zalloc(size, KM_NOSLEEP);
221 if (rq_ptr == NULL) {
222 panic("scheduler: could not allocate the runqueue");
223 }
224 /* XXX: Save the original pointer for future.. */
225 ci_rq = (void *)(roundup((intptr_t)(rq_ptr), CACHE_LINE_SIZE));
226
227 /* Initialize run queues */
228 mutex_init(&ci_rq->r_rq_mutex, MUTEX_SPIN, IPL_SCHED);
229 for (i = 0; i < PRI_RT_COUNT; i++)
230 TAILQ_INIT(&ci_rq->r_rt_queue[i].q_head);
231 for (i = 0; i < PRI_TS_COUNT; i++)
232 TAILQ_INIT(&ci_rq->r_ts_queue[i].q_head);
233 ci_rq->r_highest_pri = 0;
234
235 ci->ci_schedstate.spc_sched_info = ci_rq;
236 ci->ci_schedstate.spc_mutex = &ci_rq->r_rq_mutex;
237 }
238
239 /* Pre-calculate the time-slices for the priorities */
240 static void
241 sched_precalcts(void)
242 {
243 pri_t p;
244
245 /* Time-sharing range */
246 for (p = 0; p <= PRI_HIGHEST_TS; p++) {
247 ts_map[p] = max_ts -
248 (p * 100 / (PRI_TS_COUNT - 1) * (max_ts - min_ts) / 100);
249 high_pri[p] = (PRI_HIGHEST_TS - PRI_HTS_RANGE) +
250 ((p * PRI_HTS_RANGE) / (PRI_TS_COUNT - 1));
251 }
252
253 /* Real-time range */
254 for (p = (PRI_HIGHEST_TS + 1); p < PRI_COUNT; p++) {
255 ts_map[p] = rt_ts;
256 high_pri[p] = p;
257 }
258 }
259
260 /*
261 * Hooks.
262 */
263
264 void
265 sched_proc_fork(struct proc *parent, struct proc *child)
266 {
267 struct lwp *l;
268
269 LIST_FOREACH(l, &child->p_lwps, l_sibling) {
270 lwp_lock(l);
271 sched_newts(l);
272 lwp_unlock(l);
273 }
274 }
275
276 void
277 sched_proc_exit(struct proc *child, struct proc *parent)
278 {
279
280 /* Dummy */
281 }
282
283 void
284 sched_lwp_fork(struct lwp *l1, struct lwp *l2)
285 {
286
287 KASSERT(l2->l_sched_info == NULL);
288 l2->l_sched_info = pool_get(&sil_pool, PR_WAITOK);
289 memset(l2->l_sched_info, 0, sizeof(sched_info_lwp_t));
290 if (l2->l_priority <= PRI_HIGHEST_TS) /* XXX: For now only.. */
291 l2->l_priority = PRI_DEFAULT;
292 }
293
294 void
295 sched_lwp_exit(struct lwp *l)
296 {
297
298 KASSERT(l->l_sched_info != NULL);
299 pool_put(&sil_pool, l->l_sched_info);
300 l->l_sched_info = NULL;
301 }
302
303 void
304 sched_lwp_collect(struct lwp *l)
305 {
306
307 }
308
309 void
310 sched_setrunnable(struct lwp *l)
311 {
312
313 /* Dummy */
314 }
315
316 void
317 sched_schedclock(struct lwp *l)
318 {
319
320 /* Dummy */
321 }
322
323 /*
324 * Priorities and time-slice.
325 */
326
327 void
328 sched_nice(struct proc *p, int prio)
329 {
330 int nprio;
331 struct lwp *l;
332
333 KASSERT(mutex_owned(&p->p_smutex));
334
335 p->p_nice = prio;
336 nprio = max(min(PRI_DEFAULT + p->p_nice, PRI_HIGHEST_TS), 0);
337
338 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
339 lwp_lock(l);
340 lwp_changepri(l, nprio);
341 lwp_unlock(l);
342 }
343 }
344
345 /* Recalculate the time-slice */
346 static inline void
347 sched_newts(struct lwp *l)
348 {
349 sched_info_lwp_t *sil = l->l_sched_info;
350
351 sil->sl_timeslice = ts_map[lwp_eprio(l)];
352 }
353
354 /*
355 * Control of the runqueue.
356 */
357
358 static inline void *
359 sched_getrq(runqueue_t *ci_rq, const pri_t prio)
360 {
361
362 KASSERT(prio < PRI_COUNT);
363 return (prio <= PRI_HIGHEST_TS) ?
364 &ci_rq->r_ts_queue[prio].q_head :
365 &ci_rq->r_rt_queue[prio - PRI_HIGHEST_TS - 1].q_head;
366 }
367
368 void
369 sched_enqueue(struct lwp *l, bool swtch)
370 {
371 runqueue_t *ci_rq;
372 sched_info_lwp_t *sil = l->l_sched_info;
373 TAILQ_HEAD(, lwp) *q_head;
374 const pri_t eprio = lwp_eprio(l);
375
376 ci_rq = l->l_cpu->ci_schedstate.spc_sched_info;
377 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
378
379 /* Update the last run time on switch */
380 if (__predict_true(swtch == true)) {
381 sil->sl_lrtime = hardclock_ticks;
382 sil->sl_rtsum += (hardclock_ticks - sil->sl_rtime);
383 } else if (sil->sl_lrtime == 0)
384 sil->sl_lrtime = hardclock_ticks;
385
386 /* Enqueue the thread */
387 q_head = sched_getrq(ci_rq, eprio);
388 if (TAILQ_EMPTY(q_head)) {
389 u_int i;
390 uint32_t q;
391
392 /* Mark bit */
393 i = eprio >> BITMAP_SHIFT;
394 q = BITMAP_MSB >> (eprio & BITMAP_MASK);
395 KASSERT((ci_rq->r_bitmap[i] & q) == 0);
396 ci_rq->r_bitmap[i] |= q;
397 }
398 TAILQ_INSERT_TAIL(q_head, l, l_runq);
399 ci_rq->r_count++;
400 if ((l->l_flag & LW_BOUND) == 0)
401 ci_rq->r_mcount++;
402
403 /*
404 * Update the value of highest priority in the runqueue,
405 * if priority of this thread is higher.
406 */
407 if (eprio > ci_rq->r_highest_pri)
408 ci_rq->r_highest_pri = eprio;
409
410 sched_newts(l);
411 }
412
413 void
414 sched_dequeue(struct lwp *l)
415 {
416 runqueue_t *ci_rq;
417 TAILQ_HEAD(, lwp) *q_head;
418 const pri_t eprio = lwp_eprio(l);
419
420 ci_rq = l->l_cpu->ci_schedstate.spc_sched_info;
421 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
422 KASSERT(eprio <= ci_rq->r_highest_pri);
423 KASSERT(ci_rq->r_bitmap[eprio >> BITMAP_SHIFT] != 0);
424 KASSERT(ci_rq->r_count > 0);
425
426 ci_rq->r_count--;
427 if ((l->l_flag & LW_BOUND) == 0)
428 ci_rq->r_mcount--;
429
430 q_head = sched_getrq(ci_rq, eprio);
431 TAILQ_REMOVE(q_head, l, l_runq);
432 if (TAILQ_EMPTY(q_head)) {
433 u_int i;
434 uint32_t q;
435
436 /* Unmark bit */
437 i = eprio >> BITMAP_SHIFT;
438 q = BITMAP_MSB >> (eprio & BITMAP_MASK);
439 KASSERT((ci_rq->r_bitmap[i] & q) != 0);
440 ci_rq->r_bitmap[i] &= ~q;
441
442 /*
443 * Update the value of highest priority in the runqueue, in a
444 * case it was a last thread in the queue of highest priority.
445 */
446 if (eprio != ci_rq->r_highest_pri)
447 return;
448
449 do {
450 q = ffs(ci_rq->r_bitmap[i]);
451 if (q) {
452 ci_rq->r_highest_pri =
453 (i << BITMAP_SHIFT) + (BITMAP_BITS - q);
454 return;
455 }
456 } while (i--);
457
458 /* If not found - set the lowest value */
459 ci_rq->r_highest_pri = 0;
460 }
461 }
462
463 void
464 sched_slept(struct lwp *l)
465 {
466 sched_info_lwp_t *sil = l->l_sched_info;
467
468 /* Save the time when thread has slept */
469 sil->sl_slept = hardclock_ticks;
470
471 /*
472 * If thread is in time-sharing queue and batch flag is not marked,
473 * increase the the priority, and run with the lower time-quantum.
474 */
475 if (l->l_priority < PRI_HIGHEST_TS && (sil->sl_flags & SL_BATCH) == 0) {
476 KASSERT(l->l_class == SCHED_OTHER);
477 l->l_priority++;
478 }
479 }
480
481 void
482 sched_wakeup(struct lwp *l)
483 {
484 sched_info_lwp_t *sil = l->l_sched_info;
485
486 /* Update sleep time delta */
487 sil->sl_slpsum += (l->l_slptime == 0) ?
488 (hardclock_ticks - sil->sl_slept) : hz;
489
490 /* If thread was sleeping a second or more - set a high priority */
491 if (l->l_slptime > 1 || (hardclock_ticks - sil->sl_slept) >= hz)
492 l->l_priority = high_pri[l->l_priority];
493
494 /* Also, consider looking for a better CPU to wake up */
495 if ((l->l_flag & (LW_BOUND | LW_SYSTEM)) == 0)
496 l->l_cpu = sched_takecpu(l);
497 }
498
499 void
500 sched_pstats_hook(struct lwp *l)
501 {
502 sched_info_lwp_t *sil = l->l_sched_info;
503 pri_t prio;
504 bool batch;
505
506 if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
507 l->l_stat == LSSUSPENDED)
508 l->l_slptime++;
509
510 /*
511 * Set that thread is more CPU-bound, if sum of run time exceeds the
512 * sum of sleep time. Check if thread is CPU-bound a first time.
513 */
514 batch = (sil->sl_rtsum > sil->sl_slpsum);
515 if (batch) {
516 if ((sil->sl_flags & SL_BATCH) == 0)
517 batch = false;
518 sil->sl_flags |= SL_BATCH;
519 } else
520 sil->sl_flags &= ~SL_BATCH;
521
522 /* Reset the time sums */
523 sil->sl_slpsum = 0;
524 sil->sl_rtsum = 0;
525
526 /* Estimate threads on time-sharing queue only */
527 if (l->l_priority >= PRI_HIGHEST_TS)
528 return;
529
530 /* If it is CPU-bound not a first time - decrease the priority */
531 prio = l->l_priority;
532 if (batch && prio != 0)
533 prio--;
534
535 /* If thread was not ran a second or more - set a high priority */
536 if (l->l_stat == LSRUN) {
537 if (sil->sl_lrtime && (hardclock_ticks - sil->sl_lrtime >= hz))
538 prio = high_pri[prio];
539 /* Re-enqueue the thread if priority has changed */
540 if (prio != l->l_priority)
541 lwp_changepri(l, prio);
542 } else {
543 /* In other states, change the priority directly */
544 l->l_priority = prio;
545 }
546 }
547
548 /*
549 * Migration and balancing.
550 */
551
552 #ifdef MULTIPROCESSOR
553
554 /* Check if LWP can migrate to the chosen CPU */
555 static inline bool
556 sched_migratable(const struct lwp *l, const struct cpu_info *ci)
557 {
558
559 if (ci->ci_schedstate.spc_flags & SPCF_OFFLINE)
560 return false;
561
562 if ((l->l_flag & LW_BOUND) == 0)
563 return true;
564 #if 0
565 return cpu_in_pset(ci, l->l_psid);
566 #else
567 return false;
568 #endif
569 }
570
571 /*
572 * Estimate the migration of LWP to the other CPU.
573 * Take and return the CPU, if migration is needed.
574 */
575 struct cpu_info *
576 sched_takecpu(struct lwp *l)
577 {
578 struct cpu_info *ci, *tci = NULL;
579 struct schedstate_percpu *spc;
580 runqueue_t *ci_rq;
581 sched_info_lwp_t *sil;
582 CPU_INFO_ITERATOR cii;
583 pri_t eprio, lpri;
584
585 ci = l->l_cpu;
586 spc = &ci->ci_schedstate;
587 ci_rq = spc->spc_sched_info;
588
589 /* CPU of this thread is idling - run there */
590 if (ci_rq->r_count == 0)
591 return ci;
592
593 eprio = lwp_eprio(l);
594 sil = l->l_sched_info;
595
596 /* Stay if thread is cache-hot */
597 if (l->l_stat == LSSLEEP && l->l_slptime <= 1 &&
598 CACHE_HOT(sil) && eprio >= spc->spc_curpriority)
599 return ci;
600
601 /* Run on current CPU if priority of thread is higher */
602 ci = curcpu();
603 spc = &ci->ci_schedstate;
604 if (eprio > spc->spc_curpriority && sched_migratable(l, ci))
605 return ci;
606
607 /*
608 * Look for the CPU with the lowest priority thread. In case of
609 * equal the priority - check the lower count of the threads.
610 */
611 lpri = PRI_COUNT;
612 for (CPU_INFO_FOREACH(cii, ci)) {
613 runqueue_t *ici_rq;
614 pri_t pri;
615
616 spc = &ci->ci_schedstate;
617 ici_rq = spc->spc_sched_info;
618 pri = max(spc->spc_curpriority, ici_rq->r_highest_pri);
619 if (pri > lpri)
620 continue;
621
622 if (pri == lpri && tci && ci_rq->r_count < ici_rq->r_count)
623 continue;
624
625 if (sched_migratable(l, ci) == false)
626 continue;
627
628 lpri = pri;
629 tci = ci;
630 ci_rq = ici_rq;
631 }
632
633 KASSERT(tci != NULL);
634 return tci;
635 }
636
637 /*
638 * Tries to catch an LWP from the runqueue of other CPU.
639 */
640 static struct lwp *
641 sched_catchlwp(void)
642 {
643 struct cpu_info *curci = curcpu(), *ci = worker_ci;
644 TAILQ_HEAD(, lwp) *q_head;
645 runqueue_t *ci_rq;
646 struct lwp *l;
647
648 if (curci == ci)
649 return NULL;
650
651 /* Lockless check */
652 ci_rq = ci->ci_schedstate.spc_sched_info;
653 if (ci_rq->r_count < min_catch)
654 return NULL;
655
656 /*
657 * Double-lock the runqueues.
658 */
659 if (curci < ci) {
660 spc_lock(ci);
661 } else if (!mutex_tryenter(ci->ci_schedstate.spc_mutex)) {
662 const runqueue_t *cur_rq = curci->ci_schedstate.spc_sched_info;
663
664 spc_unlock(curci);
665 spc_lock(ci);
666 spc_lock(curci);
667
668 if (cur_rq->r_count) {
669 spc_unlock(ci);
670 return NULL;
671 }
672 }
673
674 if (ci_rq->r_count < min_catch) {
675 spc_unlock(ci);
676 return NULL;
677 }
678
679 /* Take the highest priority thread */
680 q_head = sched_getrq(ci_rq, ci_rq->r_highest_pri);
681 l = TAILQ_FIRST(q_head);
682
683 for (;;) {
684 sched_info_lwp_t *sil;
685
686 /* Check the first and next result from the queue */
687 if (l == NULL)
688 break;
689
690 /* Look for threads, whose are allowed to migrate */
691 sil = l->l_sched_info;
692 if ((l->l_flag & LW_SYSTEM) || CACHE_HOT(sil) ||
693 sched_migratable(l, curci) == false) {
694 l = TAILQ_NEXT(l, l_runq);
695 continue;
696 }
697 /* Recheck if chosen thread is still on the runqueue */
698 if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM)) {
699 sched_dequeue(l);
700 l->l_cpu = curci;
701 lwp_setlock(l, curci->ci_schedstate.spc_mutex);
702 sched_enqueue(l, false);
703 break;
704 }
705 l = TAILQ_NEXT(l, l_runq);
706 }
707 spc_unlock(ci);
708
709 return l;
710 }
711
712 /*
713 * Periodical calculations for balancing.
714 */
715 static void
716 sched_balance(void *nocallout)
717 {
718 struct cpu_info *ci, *hci;
719 runqueue_t *ci_rq;
720 CPU_INFO_ITERATOR cii;
721 u_int highest;
722
723 hci = curcpu();
724 highest = 0;
725
726 /* Make lockless countings */
727 for (CPU_INFO_FOREACH(cii, ci)) {
728 ci_rq = ci->ci_schedstate.spc_sched_info;
729
730 /* Average count of the threads */
731 ci_rq->r_avgcount = (ci_rq->r_avgcount + ci_rq->r_mcount) >> 1;
732
733 /* Look for CPU with the highest average */
734 if (ci_rq->r_avgcount > highest) {
735 hci = ci;
736 highest = ci_rq->r_avgcount;
737 }
738 }
739
740 /* Update the worker */
741 worker_ci = hci;
742
743 if (nocallout == NULL)
744 callout_schedule(&balance_ch, balance_period);
745 }
746
747 #else
748
749 struct cpu_info *
750 sched_takecpu(struct lwp *l)
751 {
752
753 return l->l_cpu;
754 }
755
756 #endif /* MULTIPROCESSOR */
757
758 /*
759 * Scheduler mill.
760 */
761 struct lwp *
762 sched_nextlwp(void)
763 {
764 struct cpu_info *ci = curcpu();
765 struct schedstate_percpu *spc;
766 TAILQ_HEAD(, lwp) *q_head;
767 sched_info_lwp_t *sil;
768 runqueue_t *ci_rq;
769 struct lwp *l;
770
771 spc = &ci->ci_schedstate;
772 ci_rq = ci->ci_schedstate.spc_sched_info;
773
774 #ifdef MULTIPROCESSOR
775 /* If runqueue is empty, try to catch some thread from other CPU */
776 if (__predict_false(spc->spc_flags & SPCF_OFFLINE)) {
777 if ((ci_rq->r_count - ci_rq->r_mcount) == 0)
778 return NULL;
779 } else if (ci_rq->r_count == 0) {
780 /* Reset the counter, and call the balancer */
781 ci_rq->r_avgcount = 0;
782 sched_balance(ci);
783
784 /* The re-locking will be done inside */
785 return sched_catchlwp();
786 }
787 #else
788 if (ci_rq->r_count == 0)
789 return NULL;
790 #endif
791
792 /* Take the highest priority thread */
793 KASSERT(ci_rq->r_bitmap[ci_rq->r_highest_pri >> BITMAP_SHIFT]);
794 q_head = sched_getrq(ci_rq, ci_rq->r_highest_pri);
795 l = TAILQ_FIRST(q_head);
796 KASSERT(l != NULL);
797
798 /* Update the counters */
799 sil = l->l_sched_info;
800 KASSERT(sil->sl_timeslice >= min_ts);
801 KASSERT(sil->sl_timeslice <= max_ts);
802 spc->spc_ticks = sil->sl_timeslice;
803 sil->sl_rtime = hardclock_ticks;
804
805 return l;
806 }
807
808 bool
809 sched_curcpu_runnable_p(void)
810 {
811 const struct cpu_info *ci = curcpu();
812 const runqueue_t *ci_rq = ci->ci_schedstate.spc_sched_info;
813
814 #ifndef __HAVE_FAST_SOFTINTS
815 if (ci->ci_data.cpu_softints)
816 return true;
817 #endif
818
819 if (ci->ci_schedstate.spc_flags & SPCF_OFFLINE)
820 return (ci_rq->r_count - ci_rq->r_mcount);
821
822 return ci_rq->r_count;
823 }
824
825 /*
826 * Time-driven events.
827 */
828
829 /*
830 * Called once per time-quantum. This routine is CPU-local and runs at
831 * IPL_SCHED, thus the locking is not needed.
832 */
833 void
834 sched_tick(struct cpu_info *ci)
835 {
836 const runqueue_t *ci_rq = ci->ci_schedstate.spc_sched_info;
837 struct schedstate_percpu *spc = &ci->ci_schedstate;
838 struct lwp *l = curlwp;
839 sched_info_lwp_t *sil = l->l_sched_info;
840
841 if (CURCPU_IDLE_P())
842 return;
843
844 switch (l->l_class) {
845 case SCHED_FIFO:
846 /*
847 * Update the time-quantum, and continue running,
848 * if thread runs on FIFO real-time policy.
849 */
850 spc->spc_ticks = sil->sl_timeslice;
851 return;
852 case SCHED_OTHER:
853 /*
854 * If thread is in time-sharing queue, decrease the priority,
855 * and run with a higher time-quantum.
856 */
857 if (l->l_priority > PRI_HIGHEST_TS)
858 break;
859 if (l->l_priority != 0)
860 l->l_priority--;
861 break;
862 }
863
864 /*
865 * If there are higher priority threads or threads in the same queue,
866 * mark that thread should yield, otherwise, continue running.
867 */
868 if (lwp_eprio(l) <= ci_rq->r_highest_pri) {
869 spc->spc_flags |= SPCF_SHOULDYIELD;
870 cpu_need_resched(ci, 0);
871 } else
872 spc->spc_ticks = sil->sl_timeslice;
873 }
874
875 /*
876 * Sysctl nodes and initialization.
877 */
878
879 static int
880 sysctl_sched_mints(SYSCTLFN_ARGS)
881 {
882 struct sysctlnode node;
883 struct cpu_info *ci;
884 int error, newsize;
885 CPU_INFO_ITERATOR cii;
886
887 node = *rnode;
888 node.sysctl_data = &newsize;
889
890 newsize = hztoms(min_ts);
891 error = sysctl_lookup(SYSCTLFN_CALL(&node));
892 if (error || newp == NULL)
893 return error;
894
895 newsize = mstohz(newsize);
896 if (newsize < 1 || newsize > hz || newsize >= max_ts)
897 return EINVAL;
898
899 /* It is safe to do this in such order */
900 for (CPU_INFO_FOREACH(cii, ci))
901 spc_lock(ci);
902
903 min_ts = newsize;
904 sched_precalcts();
905
906 for (CPU_INFO_FOREACH(cii, ci))
907 spc_unlock(ci);
908
909 return 0;
910 }
911
912 static int
913 sysctl_sched_maxts(SYSCTLFN_ARGS)
914 {
915 struct sysctlnode node;
916 struct cpu_info *ci;
917 int error, newsize;
918 CPU_INFO_ITERATOR cii;
919
920 node = *rnode;
921 node.sysctl_data = &newsize;
922
923 newsize = hztoms(max_ts);
924 error = sysctl_lookup(SYSCTLFN_CALL(&node));
925 if (error || newp == NULL)
926 return error;
927
928 newsize = mstohz(newsize);
929 if (newsize < 10 || newsize > hz || newsize <= min_ts)
930 return EINVAL;
931
932 /* It is safe to do this in such order */
933 for (CPU_INFO_FOREACH(cii, ci))
934 spc_lock(ci);
935
936 max_ts = newsize;
937 sched_precalcts();
938
939 for (CPU_INFO_FOREACH(cii, ci))
940 spc_unlock(ci);
941
942 return 0;
943 }
944
945 SYSCTL_SETUP(sysctl_sched_setup, "sysctl kern.sched subtree setup")
946 {
947 const struct sysctlnode *node = NULL;
948
949 sysctl_createv(clog, 0, NULL, NULL,
950 CTLFLAG_PERMANENT,
951 CTLTYPE_NODE, "kern", NULL,
952 NULL, 0, NULL, 0,
953 CTL_KERN, CTL_EOL);
954 sysctl_createv(clog, 0, NULL, &node,
955 CTLFLAG_PERMANENT,
956 CTLTYPE_NODE, "sched",
957 SYSCTL_DESCR("Scheduler options"),
958 NULL, 0, NULL, 0,
959 CTL_KERN, CTL_CREATE, CTL_EOL);
960
961 if (node == NULL)
962 return;
963
964 sysctl_createv(clog, 0, &node, NULL,
965 CTLFLAG_PERMANENT,
966 CTLTYPE_STRING, "name", NULL,
967 NULL, 0, __UNCONST("M2"), 0,
968 CTL_CREATE, CTL_EOL);
969 sysctl_createv(clog, 0, &node, NULL,
970 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
971 CTLTYPE_INT, "maxts",
972 SYSCTL_DESCR("Maximal time quantum (in miliseconds)"),
973 sysctl_sched_maxts, 0, &max_ts, 0,
974 CTL_CREATE, CTL_EOL);
975 sysctl_createv(clog, 0, &node, NULL,
976 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
977 CTLTYPE_INT, "mints",
978 SYSCTL_DESCR("Minimal time quantum (in miliseconds)"),
979 sysctl_sched_mints, 0, &min_ts, 0,
980 CTL_CREATE, CTL_EOL);
981
982 #ifdef MULTIPROCESSOR
983 sysctl_createv(clog, 0, &node, NULL,
984 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
985 CTLTYPE_INT, "cacheht_time",
986 SYSCTL_DESCR("Cache hotness time (in ticks)"),
987 NULL, 0, &cacheht_time, 0,
988 CTL_CREATE, CTL_EOL);
989 sysctl_createv(clog, 0, &node, NULL,
990 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
991 CTLTYPE_INT, "balance_period",
992 SYSCTL_DESCR("Balance period (in ticks)"),
993 NULL, 0, &balance_period, 0,
994 CTL_CREATE, CTL_EOL);
995 sysctl_createv(clog, 0, &node, NULL,
996 CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
997 CTLTYPE_INT, "min_catch",
998 SYSCTL_DESCR("Minimal count of the threads for catching"),
999 NULL, 0, &min_catch, 0,
1000 CTL_CREATE, CTL_EOL);
1001 #endif
1002 }
1003
1004 /*
1005 * Debugging.
1006 */
1007
1008 #ifdef DDB
1009
1010 void
1011 sched_print_runqueue(void (*pr)(const char *, ...))
1012 {
1013 runqueue_t *ci_rq;
1014 sched_info_lwp_t *sil;
1015 struct lwp *l;
1016 struct proc *p;
1017 int i;
1018
1019 struct cpu_info *ci;
1020 CPU_INFO_ITERATOR cii;
1021
1022 for (CPU_INFO_FOREACH(cii, ci)) {
1023 ci_rq = ci->ci_schedstate.spc_sched_info;
1024
1025 (*pr)("Run-queue (CPU = %d):\n", ci->ci_cpuid);
1026 (*pr)(" pid.lid = %d.%d, threads count = %u, "
1027 "avgcount = %u, highest pri = %d\n",
1028 ci->ci_curlwp->l_proc->p_pid, ci->ci_curlwp->l_lid,
1029 ci_rq->r_count, ci_rq->r_avgcount, ci_rq->r_highest_pri);
1030 i = (PRI_COUNT >> BITMAP_SHIFT) - 1;
1031 do {
1032 uint32_t q;
1033 q = ci_rq->r_bitmap[i];
1034 (*pr)(" bitmap[%d] => [ %d (0x%x) ]\n", i, ffs(q), q);
1035 } while (i--);
1036 }
1037
1038 (*pr)(" %5s %4s %4s %10s %3s %4s %11s %3s %s\n",
1039 "LID", "PRI", "EPRI", "FL", "ST", "TS", "LWP", "CPU", "LRTIME");
1040
1041 PROCLIST_FOREACH(p, &allproc) {
1042 (*pr)(" /- %d (%s)\n", (int)p->p_pid, p->p_comm);
1043 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1044 sil = l->l_sched_info;
1045 ci = l->l_cpu;
1046 (*pr)(" | %5d %4u %4u 0x%8.8x %3s %4u %11p %3d "
1047 "%u ST=%d RT=%d %d\n",
1048 (int)l->l_lid, l->l_priority, lwp_eprio(l),
1049 l->l_flag, l->l_stat == LSRUN ? "RQ" :
1050 (l->l_stat == LSSLEEP ? "SQ" : "-"),
1051 sil->sl_timeslice, l, ci->ci_cpuid,
1052 (u_int)(hardclock_ticks - sil->sl_lrtime),
1053 sil->sl_slpsum, sil->sl_rtsum, sil->sl_flags);
1054 }
1055 }
1056 }
1057
1058 #endif /* defined(DDB) */
1059