sched_m2.c revision 1.10.2.3 1 1.10.2.3 matt /* $NetBSD: sched_m2.c,v 1.10.2.3 2007/11/08 11:00:04 matt Exp $ */
2 1.10.2.2 matt
3 1.10.2.2 matt /*
4 1.10.2.2 matt * Copyright (c) 2007, Mindaugas Rasiukevicius
5 1.10.2.2 matt *
6 1.10.2.2 matt * Redistribution and use in source and binary forms, with or without
7 1.10.2.2 matt * modification, are permitted provided that the following conditions
8 1.10.2.2 matt * are met:
9 1.10.2.2 matt * 1. Redistributions of source code must retain the above copyright
10 1.10.2.2 matt * notice, this list of conditions and the following disclaimer.
11 1.10.2.2 matt * 2. Redistributions in binary form must reproduce the above copyright
12 1.10.2.2 matt * notice, this list of conditions and the following disclaimer in the
13 1.10.2.2 matt * documentation and/or other materials provided with the distribution.
14 1.10.2.2 matt *
15 1.10.2.2 matt * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 1.10.2.2 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
17 1.10.2.2 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 1.10.2.2 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
19 1.10.2.2 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 1.10.2.2 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 1.10.2.2 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 1.10.2.2 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 1.10.2.2 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 1.10.2.2 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 1.10.2.2 matt * POSSIBILITY OF SUCH DAMAGE.
26 1.10.2.2 matt */
27 1.10.2.2 matt
28 1.10.2.2 matt /*
29 1.10.2.2 matt * TODO:
30 1.10.2.2 matt * - Implementation of fair share queue;
31 1.10.2.2 matt * - Support for NUMA;
32 1.10.2.2 matt */
33 1.10.2.2 matt
34 1.10.2.2 matt #include <sys/cdefs.h>
35 1.10.2.3 matt __KERNEL_RCSID(0, "$NetBSD: sched_m2.c,v 1.10.2.3 2007/11/08 11:00:04 matt Exp $");
36 1.10.2.2 matt
37 1.10.2.2 matt #include <sys/param.h>
38 1.10.2.2 matt
39 1.10.2.2 matt #include <sys/bitops.h>
40 1.10.2.2 matt #include <sys/cpu.h>
41 1.10.2.2 matt #include <sys/callout.h>
42 1.10.2.2 matt #include <sys/errno.h>
43 1.10.2.2 matt #include <sys/kernel.h>
44 1.10.2.2 matt #include <sys/kmem.h>
45 1.10.2.2 matt #include <sys/lwp.h>
46 1.10.2.2 matt #include <sys/mutex.h>
47 1.10.2.2 matt #include <sys/pool.h>
48 1.10.2.2 matt #include <sys/proc.h>
49 1.10.2.2 matt #include <sys/resource.h>
50 1.10.2.2 matt #include <sys/resourcevar.h>
51 1.10.2.2 matt #include <sys/sched.h>
52 1.10.2.2 matt #include <sys/syscallargs.h>
53 1.10.2.2 matt #include <sys/sysctl.h>
54 1.10.2.2 matt #include <sys/types.h>
55 1.10.2.2 matt
56 1.10.2.2 matt /*
57 1.10.2.2 matt * Priority related defintions.
58 1.10.2.2 matt */
59 1.10.2.2 matt #define PRI_TS_COUNT (NPRI_USER)
60 1.10.2.2 matt #define PRI_RT_COUNT (PRI_COUNT - PRI_TS_COUNT)
61 1.10.2.2 matt #define PRI_HTS_RANGE (PRI_TS_COUNT / 10)
62 1.10.2.2 matt
63 1.10.2.3 matt #define PRI_HIGHEST_TS (MAXPRI_USER)
64 1.10.2.2 matt #define PRI_DEFAULT (NPRI_USER >> 1)
65 1.10.2.2 matt
66 1.10.2.2 matt const int schedppq = 1;
67 1.10.2.2 matt
68 1.10.2.2 matt /*
69 1.10.2.2 matt * Bits per map.
70 1.10.2.2 matt */
71 1.10.2.2 matt #define BITMAP_BITS (32)
72 1.10.2.2 matt #define BITMAP_SHIFT (5)
73 1.10.2.3 matt #define BITMAP_MSB (0x80000000U)
74 1.10.2.2 matt #define BITMAP_MASK (BITMAP_BITS - 1)
75 1.10.2.2 matt
76 1.10.2.2 matt /*
77 1.10.2.2 matt * Time-slices and priorities.
78 1.10.2.2 matt */
79 1.10.2.2 matt static u_int min_ts; /* Minimal time-slice */
80 1.10.2.2 matt static u_int max_ts; /* Maximal time-slice */
81 1.10.2.2 matt static u_int rt_ts; /* Real-time time-slice */
82 1.10.2.2 matt static u_int ts_map[PRI_COUNT]; /* Map of time-slices */
83 1.10.2.2 matt static pri_t high_pri[PRI_COUNT]; /* Map for priority increase */
84 1.10.2.2 matt
85 1.10.2.2 matt /*
86 1.10.2.2 matt * Migration and balancing.
87 1.10.2.2 matt */
88 1.10.2.2 matt #ifdef MULTIPROCESSOR
89 1.10.2.2 matt static u_int cacheht_time; /* Cache hotness time */
90 1.10.2.2 matt static u_int min_catch; /* Minimal LWP count for catching */
91 1.10.2.2 matt
92 1.10.2.2 matt static u_int balance_period; /* Balance period */
93 1.10.2.2 matt static struct callout balance_ch; /* Callout of balancer */
94 1.10.2.2 matt
95 1.10.2.2 matt static struct cpu_info * volatile worker_ci;
96 1.10.2.2 matt
97 1.10.2.2 matt #define CACHE_HOT(sil) (sil->sl_lrtime && \
98 1.10.2.2 matt (hardclock_ticks - sil->sl_lrtime < cacheht_time))
99 1.10.2.2 matt
100 1.10.2.2 matt #endif
101 1.10.2.2 matt
102 1.10.2.2 matt /*
103 1.10.2.2 matt * Structures, runqueue.
104 1.10.2.2 matt */
105 1.10.2.2 matt
106 1.10.2.2 matt typedef struct {
107 1.10.2.2 matt TAILQ_HEAD(, lwp) q_head;
108 1.10.2.2 matt } queue_t;
109 1.10.2.2 matt
110 1.10.2.2 matt typedef struct {
111 1.10.2.2 matt /* Lock and bitmap */
112 1.10.2.2 matt kmutex_t r_rq_mutex;
113 1.10.2.2 matt uint32_t r_bitmap[PRI_COUNT >> BITMAP_SHIFT];
114 1.10.2.2 matt /* Counters */
115 1.10.2.2 matt u_int r_count; /* Count of the threads */
116 1.10.2.2 matt pri_t r_highest_pri; /* Highest priority */
117 1.10.2.2 matt u_int r_avgcount; /* Average count of threads */
118 1.10.2.2 matt u_int r_mcount; /* Count of migratable threads */
119 1.10.2.2 matt /* Runqueues */
120 1.10.2.2 matt queue_t r_rt_queue[PRI_RT_COUNT];
121 1.10.2.2 matt queue_t r_ts_queue[PRI_TS_COUNT];
122 1.10.2.2 matt } runqueue_t;
123 1.10.2.2 matt
124 1.10.2.2 matt typedef struct {
125 1.10.2.2 matt u_int sl_flags;
126 1.10.2.2 matt u_int sl_timeslice; /* Time-slice of thread */
127 1.10.2.2 matt u_int sl_slept; /* Saved sleep time for sleep sum */
128 1.10.2.2 matt u_int sl_slpsum; /* Sum of sleep time */
129 1.10.2.2 matt u_int sl_rtime; /* Saved start time of run */
130 1.10.2.2 matt u_int sl_rtsum; /* Sum of the run time */
131 1.10.2.2 matt u_int sl_lrtime; /* Last run time */
132 1.10.2.2 matt } sched_info_lwp_t;
133 1.10.2.2 matt
134 1.10.2.2 matt /* Flags */
135 1.10.2.2 matt #define SL_BATCH 0x01
136 1.10.2.2 matt
137 1.10.2.2 matt /* Pool of the scheduler-specific structures for threads */
138 1.10.2.2 matt static struct pool sil_pool;
139 1.10.2.2 matt
140 1.10.2.2 matt /*
141 1.10.2.2 matt * Prototypes.
142 1.10.2.2 matt */
143 1.10.2.2 matt
144 1.10.2.2 matt static inline void * sched_getrq(runqueue_t *, const pri_t);
145 1.10.2.2 matt static inline void sched_newts(struct lwp *);
146 1.10.2.2 matt static void sched_precalcts(void);
147 1.10.2.2 matt
148 1.10.2.2 matt #ifdef MULTIPROCESSOR
149 1.10.2.2 matt static struct lwp * sched_catchlwp(void);
150 1.10.2.2 matt static void sched_balance(void *);
151 1.10.2.2 matt #endif
152 1.10.2.2 matt
153 1.10.2.2 matt /*
154 1.10.2.2 matt * Initialization and setup.
155 1.10.2.2 matt */
156 1.10.2.2 matt
157 1.10.2.2 matt void
158 1.10.2.2 matt sched_rqinit(void)
159 1.10.2.2 matt {
160 1.10.2.2 matt struct cpu_info *ci = curcpu();
161 1.10.2.2 matt
162 1.10.2.2 matt if (hz < 100) {
163 1.10.2.2 matt panic("sched_rqinit: value of HZ is too low\n");
164 1.10.2.2 matt }
165 1.10.2.2 matt
166 1.10.2.2 matt /* Default timing ranges */
167 1.10.2.2 matt min_ts = mstohz(50); /* ~50ms */
168 1.10.2.2 matt max_ts = mstohz(150); /* ~150ms */
169 1.10.2.2 matt rt_ts = mstohz(100); /* ~100ms */
170 1.10.2.2 matt sched_precalcts();
171 1.10.2.2 matt
172 1.10.2.2 matt #ifdef MULTIPROCESSOR
173 1.10.2.2 matt /* Balancing */
174 1.10.2.2 matt worker_ci = ci;
175 1.10.2.2 matt cacheht_time = mstohz(5); /* ~5 ms */
176 1.10.2.2 matt balance_period = mstohz(300); /* ~300ms */
177 1.10.2.2 matt min_catch = ~0;
178 1.10.2.2 matt #endif
179 1.10.2.2 matt
180 1.10.2.2 matt /* Pool of the scheduler-specific structures */
181 1.10.2.2 matt pool_init(&sil_pool, sizeof(sched_info_lwp_t), 0, 0, 0,
182 1.10.2.2 matt "lwpsd", &pool_allocator_nointr, IPL_NONE);
183 1.10.2.2 matt
184 1.10.2.2 matt /* Attach the primary CPU here */
185 1.10.2.2 matt sched_cpuattach(ci);
186 1.10.2.2 matt
187 1.10.2.2 matt /* Initialize the scheduler structure of the primary LWP */
188 1.10.2.2 matt lwp0.l_mutex = &ci->ci_schedstate.spc_lwplock;
189 1.10.2.2 matt sched_lwp_fork(NULL, &lwp0);
190 1.10.2.2 matt sched_newts(&lwp0);
191 1.10.2.2 matt }
192 1.10.2.2 matt
193 1.10.2.2 matt void
194 1.10.2.2 matt sched_setup(void)
195 1.10.2.2 matt {
196 1.10.2.2 matt
197 1.10.2.2 matt #ifdef MULTIPROCESSOR
198 1.10.2.2 matt /* Minimal count of LWPs for catching: log2(count of CPUs) */
199 1.10.2.2 matt min_catch = min(ilog2(ncpu), 4);
200 1.10.2.2 matt
201 1.10.2.2 matt /* Initialize balancing callout and run it */
202 1.10.2.2 matt callout_init(&balance_ch, CALLOUT_MPSAFE);
203 1.10.2.2 matt callout_setfunc(&balance_ch, sched_balance, NULL);
204 1.10.2.2 matt callout_schedule(&balance_ch, balance_period);
205 1.10.2.2 matt #endif
206 1.10.2.2 matt }
207 1.10.2.2 matt
208 1.10.2.2 matt void
209 1.10.2.2 matt sched_cpuattach(struct cpu_info *ci)
210 1.10.2.2 matt {
211 1.10.2.2 matt runqueue_t *ci_rq;
212 1.10.2.2 matt void *rq_ptr;
213 1.10.2.2 matt u_int i, size;
214 1.10.2.2 matt
215 1.10.2.2 matt /*
216 1.10.2.2 matt * Allocate the run queue.
217 1.10.2.2 matt * XXX: Estimate cache behaviour more..
218 1.10.2.2 matt */
219 1.10.2.2 matt size = roundup(sizeof(runqueue_t), CACHE_LINE_SIZE) + CACHE_LINE_SIZE;
220 1.10.2.2 matt rq_ptr = kmem_zalloc(size, KM_NOSLEEP);
221 1.10.2.2 matt if (rq_ptr == NULL) {
222 1.10.2.2 matt panic("scheduler: could not allocate the runqueue");
223 1.10.2.2 matt }
224 1.10.2.2 matt /* XXX: Save the original pointer for future.. */
225 1.10.2.2 matt ci_rq = (void *)(roundup((intptr_t)(rq_ptr), CACHE_LINE_SIZE));
226 1.10.2.2 matt
227 1.10.2.2 matt /* Initialize run queues */
228 1.10.2.2 matt mutex_init(&ci_rq->r_rq_mutex, MUTEX_SPIN, IPL_SCHED);
229 1.10.2.2 matt for (i = 0; i < PRI_RT_COUNT; i++)
230 1.10.2.2 matt TAILQ_INIT(&ci_rq->r_rt_queue[i].q_head);
231 1.10.2.2 matt for (i = 0; i < PRI_TS_COUNT; i++)
232 1.10.2.2 matt TAILQ_INIT(&ci_rq->r_ts_queue[i].q_head);
233 1.10.2.2 matt ci_rq->r_highest_pri = 0;
234 1.10.2.2 matt
235 1.10.2.2 matt ci->ci_schedstate.spc_sched_info = ci_rq;
236 1.10.2.2 matt ci->ci_schedstate.spc_mutex = &ci_rq->r_rq_mutex;
237 1.10.2.2 matt }
238 1.10.2.2 matt
239 1.10.2.2 matt /* Pre-calculate the time-slices for the priorities */
240 1.10.2.2 matt static void
241 1.10.2.2 matt sched_precalcts(void)
242 1.10.2.2 matt {
243 1.10.2.2 matt pri_t p;
244 1.10.2.2 matt
245 1.10.2.2 matt /* Time-sharing range */
246 1.10.2.2 matt for (p = 0; p <= PRI_HIGHEST_TS; p++) {
247 1.10.2.2 matt ts_map[p] = max_ts -
248 1.10.2.2 matt (p * 100 / (PRI_TS_COUNT - 1) * (max_ts - min_ts) / 100);
249 1.10.2.2 matt high_pri[p] = (PRI_HIGHEST_TS - PRI_HTS_RANGE) +
250 1.10.2.2 matt ((p * PRI_HTS_RANGE) / (PRI_TS_COUNT - 1));
251 1.10.2.2 matt }
252 1.10.2.2 matt
253 1.10.2.2 matt /* Real-time range */
254 1.10.2.2 matt for (p = (PRI_HIGHEST_TS + 1); p < PRI_COUNT; p++) {
255 1.10.2.2 matt ts_map[p] = rt_ts;
256 1.10.2.2 matt high_pri[p] = p;
257 1.10.2.2 matt }
258 1.10.2.2 matt }
259 1.10.2.2 matt
260 1.10.2.2 matt /*
261 1.10.2.2 matt * Hooks.
262 1.10.2.2 matt */
263 1.10.2.2 matt
264 1.10.2.2 matt void
265 1.10.2.2 matt sched_proc_fork(struct proc *parent, struct proc *child)
266 1.10.2.2 matt {
267 1.10.2.2 matt struct lwp *l;
268 1.10.2.2 matt
269 1.10.2.2 matt LIST_FOREACH(l, &child->p_lwps, l_sibling) {
270 1.10.2.2 matt lwp_lock(l);
271 1.10.2.2 matt sched_newts(l);
272 1.10.2.2 matt lwp_unlock(l);
273 1.10.2.2 matt }
274 1.10.2.2 matt }
275 1.10.2.2 matt
276 1.10.2.2 matt void
277 1.10.2.2 matt sched_proc_exit(struct proc *child, struct proc *parent)
278 1.10.2.2 matt {
279 1.10.2.2 matt
280 1.10.2.2 matt /* Dummy */
281 1.10.2.2 matt }
282 1.10.2.2 matt
283 1.10.2.2 matt void
284 1.10.2.2 matt sched_lwp_fork(struct lwp *l1, struct lwp *l2)
285 1.10.2.2 matt {
286 1.10.2.2 matt
287 1.10.2.2 matt KASSERT(l2->l_sched_info == NULL);
288 1.10.2.2 matt l2->l_sched_info = pool_get(&sil_pool, PR_WAITOK);
289 1.10.2.2 matt memset(l2->l_sched_info, 0, sizeof(sched_info_lwp_t));
290 1.10.2.2 matt if (l2->l_priority <= PRI_HIGHEST_TS) /* XXX: For now only.. */
291 1.10.2.2 matt l2->l_priority = PRI_DEFAULT;
292 1.10.2.2 matt }
293 1.10.2.2 matt
294 1.10.2.2 matt void
295 1.10.2.2 matt sched_lwp_exit(struct lwp *l)
296 1.10.2.2 matt {
297 1.10.2.2 matt
298 1.10.2.2 matt KASSERT(l->l_sched_info != NULL);
299 1.10.2.2 matt pool_put(&sil_pool, l->l_sched_info);
300 1.10.2.2 matt l->l_sched_info = NULL;
301 1.10.2.2 matt }
302 1.10.2.2 matt
303 1.10.2.2 matt void
304 1.10.2.2 matt sched_lwp_collect(struct lwp *l)
305 1.10.2.2 matt {
306 1.10.2.2 matt
307 1.10.2.2 matt }
308 1.10.2.2 matt
309 1.10.2.2 matt void
310 1.10.2.2 matt sched_setrunnable(struct lwp *l)
311 1.10.2.2 matt {
312 1.10.2.2 matt
313 1.10.2.2 matt /* Dummy */
314 1.10.2.2 matt }
315 1.10.2.2 matt
316 1.10.2.2 matt void
317 1.10.2.2 matt sched_schedclock(struct lwp *l)
318 1.10.2.2 matt {
319 1.10.2.2 matt
320 1.10.2.2 matt /* Dummy */
321 1.10.2.2 matt }
322 1.10.2.2 matt
323 1.10.2.2 matt /*
324 1.10.2.2 matt * Priorities and time-slice.
325 1.10.2.2 matt */
326 1.10.2.2 matt
327 1.10.2.2 matt void
328 1.10.2.2 matt sched_nice(struct proc *p, int prio)
329 1.10.2.2 matt {
330 1.10.2.2 matt int nprio;
331 1.10.2.2 matt struct lwp *l;
332 1.10.2.2 matt
333 1.10.2.2 matt KASSERT(mutex_owned(&p->p_smutex));
334 1.10.2.2 matt
335 1.10.2.2 matt p->p_nice = prio;
336 1.10.2.2 matt nprio = max(min(PRI_DEFAULT + p->p_nice, PRI_HIGHEST_TS), 0);
337 1.10.2.2 matt
338 1.10.2.2 matt LIST_FOREACH(l, &p->p_lwps, l_sibling) {
339 1.10.2.2 matt lwp_lock(l);
340 1.10.2.2 matt lwp_changepri(l, nprio);
341 1.10.2.2 matt lwp_unlock(l);
342 1.10.2.2 matt }
343 1.10.2.2 matt }
344 1.10.2.2 matt
345 1.10.2.2 matt /* Recalculate the time-slice */
346 1.10.2.2 matt static inline void
347 1.10.2.2 matt sched_newts(struct lwp *l)
348 1.10.2.2 matt {
349 1.10.2.2 matt sched_info_lwp_t *sil = l->l_sched_info;
350 1.10.2.2 matt
351 1.10.2.2 matt sil->sl_timeslice = ts_map[lwp_eprio(l)];
352 1.10.2.2 matt }
353 1.10.2.2 matt
354 1.10.2.2 matt /*
355 1.10.2.2 matt * Control of the runqueue.
356 1.10.2.2 matt */
357 1.10.2.2 matt
358 1.10.2.2 matt static inline void *
359 1.10.2.2 matt sched_getrq(runqueue_t *ci_rq, const pri_t prio)
360 1.10.2.2 matt {
361 1.10.2.2 matt
362 1.10.2.2 matt KASSERT(prio < PRI_COUNT);
363 1.10.2.2 matt return (prio <= PRI_HIGHEST_TS) ?
364 1.10.2.2 matt &ci_rq->r_ts_queue[prio].q_head :
365 1.10.2.2 matt &ci_rq->r_rt_queue[prio - PRI_HIGHEST_TS - 1].q_head;
366 1.10.2.2 matt }
367 1.10.2.2 matt
368 1.10.2.2 matt void
369 1.10.2.2 matt sched_enqueue(struct lwp *l, bool swtch)
370 1.10.2.2 matt {
371 1.10.2.2 matt runqueue_t *ci_rq;
372 1.10.2.2 matt sched_info_lwp_t *sil = l->l_sched_info;
373 1.10.2.2 matt TAILQ_HEAD(, lwp) *q_head;
374 1.10.2.2 matt const pri_t eprio = lwp_eprio(l);
375 1.10.2.2 matt
376 1.10.2.2 matt ci_rq = l->l_cpu->ci_schedstate.spc_sched_info;
377 1.10.2.2 matt KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
378 1.10.2.2 matt
379 1.10.2.2 matt /* Update the last run time on switch */
380 1.10.2.3 matt if (__predict_true(swtch == true)) {
381 1.10.2.2 matt sil->sl_lrtime = hardclock_ticks;
382 1.10.2.2 matt sil->sl_rtsum += (hardclock_ticks - sil->sl_rtime);
383 1.10.2.2 matt } else if (sil->sl_lrtime == 0)
384 1.10.2.2 matt sil->sl_lrtime = hardclock_ticks;
385 1.10.2.2 matt
386 1.10.2.2 matt /* Enqueue the thread */
387 1.10.2.2 matt q_head = sched_getrq(ci_rq, eprio);
388 1.10.2.2 matt if (TAILQ_EMPTY(q_head)) {
389 1.10.2.2 matt u_int i;
390 1.10.2.2 matt uint32_t q;
391 1.10.2.2 matt
392 1.10.2.2 matt /* Mark bit */
393 1.10.2.2 matt i = eprio >> BITMAP_SHIFT;
394 1.10.2.2 matt q = BITMAP_MSB >> (eprio & BITMAP_MASK);
395 1.10.2.2 matt KASSERT((ci_rq->r_bitmap[i] & q) == 0);
396 1.10.2.2 matt ci_rq->r_bitmap[i] |= q;
397 1.10.2.2 matt }
398 1.10.2.2 matt TAILQ_INSERT_TAIL(q_head, l, l_runq);
399 1.10.2.2 matt ci_rq->r_count++;
400 1.10.2.2 matt if ((l->l_flag & LW_BOUND) == 0)
401 1.10.2.2 matt ci_rq->r_mcount++;
402 1.10.2.2 matt
403 1.10.2.2 matt /*
404 1.10.2.2 matt * Update the value of highest priority in the runqueue,
405 1.10.2.2 matt * if priority of this thread is higher.
406 1.10.2.2 matt */
407 1.10.2.2 matt if (eprio > ci_rq->r_highest_pri)
408 1.10.2.2 matt ci_rq->r_highest_pri = eprio;
409 1.10.2.2 matt
410 1.10.2.2 matt sched_newts(l);
411 1.10.2.2 matt }
412 1.10.2.2 matt
413 1.10.2.2 matt void
414 1.10.2.2 matt sched_dequeue(struct lwp *l)
415 1.10.2.2 matt {
416 1.10.2.2 matt runqueue_t *ci_rq;
417 1.10.2.2 matt TAILQ_HEAD(, lwp) *q_head;
418 1.10.2.2 matt const pri_t eprio = lwp_eprio(l);
419 1.10.2.2 matt
420 1.10.2.2 matt ci_rq = l->l_cpu->ci_schedstate.spc_sched_info;
421 1.10.2.2 matt KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
422 1.10.2.2 matt KASSERT(eprio <= ci_rq->r_highest_pri);
423 1.10.2.2 matt KASSERT(ci_rq->r_bitmap[eprio >> BITMAP_SHIFT] != 0);
424 1.10.2.2 matt KASSERT(ci_rq->r_count > 0);
425 1.10.2.2 matt
426 1.10.2.2 matt ci_rq->r_count--;
427 1.10.2.2 matt if ((l->l_flag & LW_BOUND) == 0)
428 1.10.2.2 matt ci_rq->r_mcount--;
429 1.10.2.2 matt
430 1.10.2.2 matt q_head = sched_getrq(ci_rq, eprio);
431 1.10.2.2 matt TAILQ_REMOVE(q_head, l, l_runq);
432 1.10.2.2 matt if (TAILQ_EMPTY(q_head)) {
433 1.10.2.2 matt u_int i;
434 1.10.2.2 matt uint32_t q;
435 1.10.2.2 matt
436 1.10.2.2 matt /* Unmark bit */
437 1.10.2.2 matt i = eprio >> BITMAP_SHIFT;
438 1.10.2.2 matt q = BITMAP_MSB >> (eprio & BITMAP_MASK);
439 1.10.2.2 matt KASSERT((ci_rq->r_bitmap[i] & q) != 0);
440 1.10.2.2 matt ci_rq->r_bitmap[i] &= ~q;
441 1.10.2.2 matt
442 1.10.2.2 matt /*
443 1.10.2.2 matt * Update the value of highest priority in the runqueue, in a
444 1.10.2.2 matt * case it was a last thread in the queue of highest priority.
445 1.10.2.2 matt */
446 1.10.2.2 matt if (eprio != ci_rq->r_highest_pri)
447 1.10.2.2 matt return;
448 1.10.2.2 matt
449 1.10.2.2 matt do {
450 1.10.2.2 matt q = ffs(ci_rq->r_bitmap[i]);
451 1.10.2.2 matt if (q) {
452 1.10.2.2 matt ci_rq->r_highest_pri =
453 1.10.2.2 matt (i << BITMAP_SHIFT) + (BITMAP_BITS - q);
454 1.10.2.2 matt return;
455 1.10.2.2 matt }
456 1.10.2.2 matt } while (i--);
457 1.10.2.2 matt
458 1.10.2.2 matt /* If not found - set the lowest value */
459 1.10.2.2 matt ci_rq->r_highest_pri = 0;
460 1.10.2.2 matt }
461 1.10.2.2 matt }
462 1.10.2.2 matt
463 1.10.2.2 matt void
464 1.10.2.2 matt sched_slept(struct lwp *l)
465 1.10.2.2 matt {
466 1.10.2.2 matt sched_info_lwp_t *sil = l->l_sched_info;
467 1.10.2.2 matt
468 1.10.2.2 matt /* Save the time when thread has slept */
469 1.10.2.2 matt sil->sl_slept = hardclock_ticks;
470 1.10.2.2 matt
471 1.10.2.2 matt /*
472 1.10.2.2 matt * If thread is in time-sharing queue and batch flag is not marked,
473 1.10.2.2 matt * increase the the priority, and run with the lower time-quantum.
474 1.10.2.2 matt */
475 1.10.2.2 matt if (l->l_priority < PRI_HIGHEST_TS && (sil->sl_flags & SL_BATCH) == 0) {
476 1.10.2.2 matt KASSERT(l->l_class == SCHED_OTHER);
477 1.10.2.2 matt l->l_priority++;
478 1.10.2.2 matt }
479 1.10.2.2 matt }
480 1.10.2.2 matt
481 1.10.2.2 matt void
482 1.10.2.2 matt sched_wakeup(struct lwp *l)
483 1.10.2.2 matt {
484 1.10.2.2 matt sched_info_lwp_t *sil = l->l_sched_info;
485 1.10.2.2 matt
486 1.10.2.2 matt /* Update sleep time delta */
487 1.10.2.2 matt sil->sl_slpsum += (l->l_slptime == 0) ?
488 1.10.2.2 matt (hardclock_ticks - sil->sl_slept) : hz;
489 1.10.2.2 matt
490 1.10.2.2 matt /* If thread was sleeping a second or more - set a high priority */
491 1.10.2.2 matt if (l->l_slptime > 1 || (hardclock_ticks - sil->sl_slept) >= hz)
492 1.10.2.2 matt l->l_priority = high_pri[l->l_priority];
493 1.10.2.2 matt
494 1.10.2.2 matt /* Also, consider looking for a better CPU to wake up */
495 1.10.2.2 matt if ((l->l_flag & (LW_BOUND | LW_SYSTEM)) == 0)
496 1.10.2.2 matt l->l_cpu = sched_takecpu(l);
497 1.10.2.2 matt }
498 1.10.2.2 matt
499 1.10.2.2 matt void
500 1.10.2.2 matt sched_pstats_hook(struct lwp *l)
501 1.10.2.2 matt {
502 1.10.2.2 matt sched_info_lwp_t *sil = l->l_sched_info;
503 1.10.2.3 matt pri_t prio;
504 1.10.2.2 matt bool batch;
505 1.10.2.2 matt
506 1.10.2.2 matt if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
507 1.10.2.2 matt l->l_stat == LSSUSPENDED)
508 1.10.2.2 matt l->l_slptime++;
509 1.10.2.2 matt
510 1.10.2.2 matt /*
511 1.10.2.2 matt * Set that thread is more CPU-bound, if sum of run time exceeds the
512 1.10.2.2 matt * sum of sleep time. Check if thread is CPU-bound a first time.
513 1.10.2.2 matt */
514 1.10.2.2 matt batch = (sil->sl_rtsum > sil->sl_slpsum);
515 1.10.2.2 matt if (batch) {
516 1.10.2.2 matt if ((sil->sl_flags & SL_BATCH) == 0)
517 1.10.2.2 matt batch = false;
518 1.10.2.2 matt sil->sl_flags |= SL_BATCH;
519 1.10.2.2 matt } else
520 1.10.2.2 matt sil->sl_flags &= ~SL_BATCH;
521 1.10.2.2 matt
522 1.10.2.2 matt /* Reset the time sums */
523 1.10.2.2 matt sil->sl_slpsum = 0;
524 1.10.2.2 matt sil->sl_rtsum = 0;
525 1.10.2.2 matt
526 1.10.2.2 matt /* Estimate threads on time-sharing queue only */
527 1.10.2.2 matt if (l->l_priority >= PRI_HIGHEST_TS)
528 1.10.2.2 matt return;
529 1.10.2.2 matt
530 1.10.2.2 matt /* If it is CPU-bound not a first time - decrease the priority */
531 1.10.2.3 matt prio = l->l_priority;
532 1.10.2.3 matt if (batch && prio != 0)
533 1.10.2.3 matt prio--;
534 1.10.2.2 matt
535 1.10.2.2 matt /* If thread was not ran a second or more - set a high priority */
536 1.10.2.3 matt if (l->l_stat == LSRUN) {
537 1.10.2.3 matt if (sil->sl_lrtime && (hardclock_ticks - sil->sl_lrtime >= hz))
538 1.10.2.3 matt prio = high_pri[prio];
539 1.10.2.3 matt /* Re-enqueue the thread if priority has changed */
540 1.10.2.3 matt if (prio != l->l_priority)
541 1.10.2.3 matt lwp_changepri(l, prio);
542 1.10.2.3 matt } else {
543 1.10.2.3 matt /* In other states, change the priority directly */
544 1.10.2.3 matt l->l_priority = prio;
545 1.10.2.3 matt }
546 1.10.2.2 matt }
547 1.10.2.2 matt
548 1.10.2.2 matt /*
549 1.10.2.2 matt * Migration and balancing.
550 1.10.2.2 matt */
551 1.10.2.2 matt
552 1.10.2.2 matt #ifdef MULTIPROCESSOR
553 1.10.2.2 matt
554 1.10.2.2 matt /* Check if LWP can migrate to the chosen CPU */
555 1.10.2.2 matt static inline bool
556 1.10.2.2 matt sched_migratable(const struct lwp *l, const struct cpu_info *ci)
557 1.10.2.2 matt {
558 1.10.2.2 matt
559 1.10.2.2 matt if (ci->ci_schedstate.spc_flags & SPCF_OFFLINE)
560 1.10.2.2 matt return false;
561 1.10.2.2 matt
562 1.10.2.2 matt if ((l->l_flag & LW_BOUND) == 0)
563 1.10.2.2 matt return true;
564 1.10.2.2 matt #if 0
565 1.10.2.2 matt return cpu_in_pset(ci, l->l_psid);
566 1.10.2.2 matt #else
567 1.10.2.2 matt return false;
568 1.10.2.2 matt #endif
569 1.10.2.2 matt }
570 1.10.2.2 matt
571 1.10.2.2 matt /*
572 1.10.2.2 matt * Estimate the migration of LWP to the other CPU.
573 1.10.2.2 matt * Take and return the CPU, if migration is needed.
574 1.10.2.2 matt */
575 1.10.2.2 matt struct cpu_info *
576 1.10.2.2 matt sched_takecpu(struct lwp *l)
577 1.10.2.2 matt {
578 1.10.2.2 matt struct cpu_info *ci, *tci = NULL;
579 1.10.2.2 matt struct schedstate_percpu *spc;
580 1.10.2.2 matt runqueue_t *ci_rq;
581 1.10.2.2 matt sched_info_lwp_t *sil;
582 1.10.2.2 matt CPU_INFO_ITERATOR cii;
583 1.10.2.2 matt pri_t eprio, lpri;
584 1.10.2.2 matt
585 1.10.2.2 matt ci = l->l_cpu;
586 1.10.2.2 matt spc = &ci->ci_schedstate;
587 1.10.2.2 matt ci_rq = spc->spc_sched_info;
588 1.10.2.2 matt
589 1.10.2.2 matt /* CPU of this thread is idling - run there */
590 1.10.2.2 matt if (ci_rq->r_count == 0)
591 1.10.2.2 matt return ci;
592 1.10.2.2 matt
593 1.10.2.2 matt eprio = lwp_eprio(l);
594 1.10.2.2 matt sil = l->l_sched_info;
595 1.10.2.2 matt
596 1.10.2.2 matt /* Stay if thread is cache-hot */
597 1.10.2.2 matt if (l->l_stat == LSSLEEP && l->l_slptime <= 1 &&
598 1.10.2.2 matt CACHE_HOT(sil) && eprio >= spc->spc_curpriority)
599 1.10.2.2 matt return ci;
600 1.10.2.2 matt
601 1.10.2.2 matt /* Run on current CPU if priority of thread is higher */
602 1.10.2.2 matt ci = curcpu();
603 1.10.2.2 matt spc = &ci->ci_schedstate;
604 1.10.2.2 matt if (eprio > spc->spc_curpriority && sched_migratable(l, ci))
605 1.10.2.2 matt return ci;
606 1.10.2.2 matt
607 1.10.2.2 matt /*
608 1.10.2.2 matt * Look for the CPU with the lowest priority thread. In case of
609 1.10.2.2 matt * equal the priority - check the lower count of the threads.
610 1.10.2.2 matt */
611 1.10.2.2 matt lpri = PRI_COUNT;
612 1.10.2.2 matt for (CPU_INFO_FOREACH(cii, ci)) {
613 1.10.2.2 matt runqueue_t *ici_rq;
614 1.10.2.2 matt pri_t pri;
615 1.10.2.2 matt
616 1.10.2.2 matt spc = &ci->ci_schedstate;
617 1.10.2.2 matt ici_rq = spc->spc_sched_info;
618 1.10.2.2 matt pri = max(spc->spc_curpriority, ici_rq->r_highest_pri);
619 1.10.2.2 matt if (pri > lpri)
620 1.10.2.2 matt continue;
621 1.10.2.2 matt
622 1.10.2.2 matt if (pri == lpri && tci && ci_rq->r_count < ici_rq->r_count)
623 1.10.2.2 matt continue;
624 1.10.2.2 matt
625 1.10.2.2 matt if (sched_migratable(l, ci) == false)
626 1.10.2.2 matt continue;
627 1.10.2.2 matt
628 1.10.2.2 matt lpri = pri;
629 1.10.2.2 matt tci = ci;
630 1.10.2.2 matt ci_rq = ici_rq;
631 1.10.2.2 matt }
632 1.10.2.2 matt
633 1.10.2.2 matt KASSERT(tci != NULL);
634 1.10.2.2 matt return tci;
635 1.10.2.2 matt }
636 1.10.2.2 matt
637 1.10.2.2 matt /*
638 1.10.2.2 matt * Tries to catch an LWP from the runqueue of other CPU.
639 1.10.2.2 matt */
640 1.10.2.2 matt static struct lwp *
641 1.10.2.2 matt sched_catchlwp(void)
642 1.10.2.2 matt {
643 1.10.2.2 matt struct cpu_info *curci = curcpu(), *ci = worker_ci;
644 1.10.2.2 matt TAILQ_HEAD(, lwp) *q_head;
645 1.10.2.2 matt runqueue_t *ci_rq;
646 1.10.2.2 matt struct lwp *l;
647 1.10.2.2 matt
648 1.10.2.2 matt if (curci == ci)
649 1.10.2.2 matt return NULL;
650 1.10.2.2 matt
651 1.10.2.2 matt /* Lockless check */
652 1.10.2.2 matt ci_rq = ci->ci_schedstate.spc_sched_info;
653 1.10.2.2 matt if (ci_rq->r_count < min_catch)
654 1.10.2.2 matt return NULL;
655 1.10.2.2 matt
656 1.10.2.2 matt /*
657 1.10.2.2 matt * Double-lock the runqueues.
658 1.10.2.2 matt */
659 1.10.2.2 matt if (curci < ci) {
660 1.10.2.2 matt spc_lock(ci);
661 1.10.2.2 matt } else if (!mutex_tryenter(ci->ci_schedstate.spc_mutex)) {
662 1.10.2.2 matt const runqueue_t *cur_rq = curci->ci_schedstate.spc_sched_info;
663 1.10.2.2 matt
664 1.10.2.2 matt spc_unlock(curci);
665 1.10.2.2 matt spc_lock(ci);
666 1.10.2.2 matt spc_lock(curci);
667 1.10.2.2 matt
668 1.10.2.2 matt if (cur_rq->r_count) {
669 1.10.2.2 matt spc_unlock(ci);
670 1.10.2.2 matt return NULL;
671 1.10.2.2 matt }
672 1.10.2.2 matt }
673 1.10.2.2 matt
674 1.10.2.2 matt if (ci_rq->r_count < min_catch) {
675 1.10.2.2 matt spc_unlock(ci);
676 1.10.2.2 matt return NULL;
677 1.10.2.2 matt }
678 1.10.2.2 matt
679 1.10.2.2 matt /* Take the highest priority thread */
680 1.10.2.2 matt q_head = sched_getrq(ci_rq, ci_rq->r_highest_pri);
681 1.10.2.2 matt l = TAILQ_FIRST(q_head);
682 1.10.2.2 matt
683 1.10.2.2 matt for (;;) {
684 1.10.2.2 matt sched_info_lwp_t *sil;
685 1.10.2.2 matt
686 1.10.2.2 matt /* Check the first and next result from the queue */
687 1.10.2.2 matt if (l == NULL)
688 1.10.2.2 matt break;
689 1.10.2.2 matt
690 1.10.2.2 matt /* Look for threads, whose are allowed to migrate */
691 1.10.2.2 matt sil = l->l_sched_info;
692 1.10.2.2 matt if ((l->l_flag & LW_SYSTEM) || CACHE_HOT(sil) ||
693 1.10.2.2 matt sched_migratable(l, curci) == false) {
694 1.10.2.2 matt l = TAILQ_NEXT(l, l_runq);
695 1.10.2.2 matt continue;
696 1.10.2.2 matt }
697 1.10.2.2 matt /* Recheck if chosen thread is still on the runqueue */
698 1.10.2.2 matt if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM)) {
699 1.10.2.2 matt sched_dequeue(l);
700 1.10.2.2 matt l->l_cpu = curci;
701 1.10.2.2 matt lwp_setlock(l, curci->ci_schedstate.spc_mutex);
702 1.10.2.2 matt sched_enqueue(l, false);
703 1.10.2.2 matt break;
704 1.10.2.2 matt }
705 1.10.2.2 matt l = TAILQ_NEXT(l, l_runq);
706 1.10.2.2 matt }
707 1.10.2.2 matt spc_unlock(ci);
708 1.10.2.2 matt
709 1.10.2.2 matt return l;
710 1.10.2.2 matt }
711 1.10.2.2 matt
712 1.10.2.2 matt /*
713 1.10.2.2 matt * Periodical calculations for balancing.
714 1.10.2.2 matt */
715 1.10.2.2 matt static void
716 1.10.2.2 matt sched_balance(void *nocallout)
717 1.10.2.2 matt {
718 1.10.2.2 matt struct cpu_info *ci, *hci;
719 1.10.2.2 matt runqueue_t *ci_rq;
720 1.10.2.2 matt CPU_INFO_ITERATOR cii;
721 1.10.2.2 matt u_int highest;
722 1.10.2.2 matt
723 1.10.2.2 matt hci = curcpu();
724 1.10.2.2 matt highest = 0;
725 1.10.2.2 matt
726 1.10.2.2 matt /* Make lockless countings */
727 1.10.2.2 matt for (CPU_INFO_FOREACH(cii, ci)) {
728 1.10.2.2 matt ci_rq = ci->ci_schedstate.spc_sched_info;
729 1.10.2.2 matt
730 1.10.2.2 matt /* Average count of the threads */
731 1.10.2.2 matt ci_rq->r_avgcount = (ci_rq->r_avgcount + ci_rq->r_mcount) >> 1;
732 1.10.2.2 matt
733 1.10.2.2 matt /* Look for CPU with the highest average */
734 1.10.2.2 matt if (ci_rq->r_avgcount > highest) {
735 1.10.2.2 matt hci = ci;
736 1.10.2.2 matt highest = ci_rq->r_avgcount;
737 1.10.2.2 matt }
738 1.10.2.2 matt }
739 1.10.2.2 matt
740 1.10.2.2 matt /* Update the worker */
741 1.10.2.2 matt worker_ci = hci;
742 1.10.2.2 matt
743 1.10.2.2 matt if (nocallout == NULL)
744 1.10.2.2 matt callout_schedule(&balance_ch, balance_period);
745 1.10.2.2 matt }
746 1.10.2.2 matt
747 1.10.2.2 matt #else
748 1.10.2.2 matt
749 1.10.2.2 matt struct cpu_info *
750 1.10.2.2 matt sched_takecpu(struct lwp *l)
751 1.10.2.2 matt {
752 1.10.2.2 matt
753 1.10.2.2 matt return l->l_cpu;
754 1.10.2.2 matt }
755 1.10.2.2 matt
756 1.10.2.2 matt #endif /* MULTIPROCESSOR */
757 1.10.2.2 matt
758 1.10.2.2 matt /*
759 1.10.2.2 matt * Scheduler mill.
760 1.10.2.2 matt */
761 1.10.2.2 matt struct lwp *
762 1.10.2.2 matt sched_nextlwp(void)
763 1.10.2.2 matt {
764 1.10.2.2 matt struct cpu_info *ci = curcpu();
765 1.10.2.2 matt struct schedstate_percpu *spc;
766 1.10.2.2 matt TAILQ_HEAD(, lwp) *q_head;
767 1.10.2.2 matt sched_info_lwp_t *sil;
768 1.10.2.2 matt runqueue_t *ci_rq;
769 1.10.2.2 matt struct lwp *l;
770 1.10.2.2 matt
771 1.10.2.2 matt spc = &ci->ci_schedstate;
772 1.10.2.2 matt ci_rq = ci->ci_schedstate.spc_sched_info;
773 1.10.2.2 matt
774 1.10.2.2 matt #ifdef MULTIPROCESSOR
775 1.10.2.2 matt /* If runqueue is empty, try to catch some thread from other CPU */
776 1.10.2.3 matt if (__predict_false(spc->spc_flags & SPCF_OFFLINE)) {
777 1.10.2.2 matt if ((ci_rq->r_count - ci_rq->r_mcount) == 0)
778 1.10.2.2 matt return NULL;
779 1.10.2.2 matt } else if (ci_rq->r_count == 0) {
780 1.10.2.2 matt /* Reset the counter, and call the balancer */
781 1.10.2.2 matt ci_rq->r_avgcount = 0;
782 1.10.2.2 matt sched_balance(ci);
783 1.10.2.2 matt
784 1.10.2.2 matt /* The re-locking will be done inside */
785 1.10.2.2 matt return sched_catchlwp();
786 1.10.2.2 matt }
787 1.10.2.2 matt #else
788 1.10.2.2 matt if (ci_rq->r_count == 0)
789 1.10.2.2 matt return NULL;
790 1.10.2.2 matt #endif
791 1.10.2.2 matt
792 1.10.2.2 matt /* Take the highest priority thread */
793 1.10.2.2 matt KASSERT(ci_rq->r_bitmap[ci_rq->r_highest_pri >> BITMAP_SHIFT]);
794 1.10.2.2 matt q_head = sched_getrq(ci_rq, ci_rq->r_highest_pri);
795 1.10.2.2 matt l = TAILQ_FIRST(q_head);
796 1.10.2.2 matt KASSERT(l != NULL);
797 1.10.2.2 matt
798 1.10.2.2 matt /* Update the counters */
799 1.10.2.2 matt sil = l->l_sched_info;
800 1.10.2.2 matt KASSERT(sil->sl_timeslice >= min_ts);
801 1.10.2.2 matt KASSERT(sil->sl_timeslice <= max_ts);
802 1.10.2.2 matt spc->spc_ticks = sil->sl_timeslice;
803 1.10.2.2 matt sil->sl_rtime = hardclock_ticks;
804 1.10.2.2 matt
805 1.10.2.2 matt return l;
806 1.10.2.2 matt }
807 1.10.2.2 matt
808 1.10.2.2 matt bool
809 1.10.2.2 matt sched_curcpu_runnable_p(void)
810 1.10.2.2 matt {
811 1.10.2.2 matt const struct cpu_info *ci = curcpu();
812 1.10.2.2 matt const runqueue_t *ci_rq = ci->ci_schedstate.spc_sched_info;
813 1.10.2.2 matt
814 1.10.2.3 matt #ifndef __HAVE_FAST_SOFTINTS
815 1.10.2.3 matt if (ci->ci_data.cpu_softints)
816 1.10.2.3 matt return true;
817 1.10.2.3 matt #endif
818 1.10.2.3 matt
819 1.10.2.2 matt if (ci->ci_schedstate.spc_flags & SPCF_OFFLINE)
820 1.10.2.2 matt return (ci_rq->r_count - ci_rq->r_mcount);
821 1.10.2.2 matt
822 1.10.2.2 matt return ci_rq->r_count;
823 1.10.2.2 matt }
824 1.10.2.2 matt
825 1.10.2.2 matt /*
826 1.10.2.2 matt * Time-driven events.
827 1.10.2.2 matt */
828 1.10.2.2 matt
829 1.10.2.2 matt /*
830 1.10.2.2 matt * Called once per time-quantum. This routine is CPU-local and runs at
831 1.10.2.2 matt * IPL_SCHED, thus the locking is not needed.
832 1.10.2.2 matt */
833 1.10.2.2 matt void
834 1.10.2.2 matt sched_tick(struct cpu_info *ci)
835 1.10.2.2 matt {
836 1.10.2.2 matt const runqueue_t *ci_rq = ci->ci_schedstate.spc_sched_info;
837 1.10.2.2 matt struct schedstate_percpu *spc = &ci->ci_schedstate;
838 1.10.2.2 matt struct lwp *l = curlwp;
839 1.10.2.2 matt sched_info_lwp_t *sil = l->l_sched_info;
840 1.10.2.2 matt
841 1.10.2.2 matt if (CURCPU_IDLE_P())
842 1.10.2.2 matt return;
843 1.10.2.2 matt
844 1.10.2.2 matt switch (l->l_class) {
845 1.10.2.2 matt case SCHED_FIFO:
846 1.10.2.2 matt /*
847 1.10.2.2 matt * Update the time-quantum, and continue running,
848 1.10.2.2 matt * if thread runs on FIFO real-time policy.
849 1.10.2.2 matt */
850 1.10.2.2 matt spc->spc_ticks = sil->sl_timeslice;
851 1.10.2.2 matt return;
852 1.10.2.2 matt case SCHED_OTHER:
853 1.10.2.2 matt /*
854 1.10.2.2 matt * If thread is in time-sharing queue, decrease the priority,
855 1.10.2.2 matt * and run with a higher time-quantum.
856 1.10.2.2 matt */
857 1.10.2.2 matt if (l->l_priority > PRI_HIGHEST_TS)
858 1.10.2.2 matt break;
859 1.10.2.2 matt if (l->l_priority != 0)
860 1.10.2.2 matt l->l_priority--;
861 1.10.2.2 matt break;
862 1.10.2.2 matt }
863 1.10.2.2 matt
864 1.10.2.2 matt /*
865 1.10.2.2 matt * If there are higher priority threads or threads in the same queue,
866 1.10.2.2 matt * mark that thread should yield, otherwise, continue running.
867 1.10.2.2 matt */
868 1.10.2.2 matt if (lwp_eprio(l) <= ci_rq->r_highest_pri) {
869 1.10.2.2 matt spc->spc_flags |= SPCF_SHOULDYIELD;
870 1.10.2.2 matt cpu_need_resched(ci, 0);
871 1.10.2.2 matt } else
872 1.10.2.2 matt spc->spc_ticks = sil->sl_timeslice;
873 1.10.2.2 matt }
874 1.10.2.2 matt
875 1.10.2.2 matt /*
876 1.10.2.2 matt * Sysctl nodes and initialization.
877 1.10.2.2 matt */
878 1.10.2.2 matt
879 1.10.2.2 matt static int
880 1.10.2.2 matt sysctl_sched_mints(SYSCTLFN_ARGS)
881 1.10.2.2 matt {
882 1.10.2.2 matt struct sysctlnode node;
883 1.10.2.2 matt struct cpu_info *ci;
884 1.10.2.2 matt int error, newsize;
885 1.10.2.2 matt CPU_INFO_ITERATOR cii;
886 1.10.2.2 matt
887 1.10.2.2 matt node = *rnode;
888 1.10.2.2 matt node.sysctl_data = &newsize;
889 1.10.2.2 matt
890 1.10.2.2 matt newsize = hztoms(min_ts);
891 1.10.2.2 matt error = sysctl_lookup(SYSCTLFN_CALL(&node));
892 1.10.2.2 matt if (error || newp == NULL)
893 1.10.2.2 matt return error;
894 1.10.2.2 matt
895 1.10.2.2 matt newsize = mstohz(newsize);
896 1.10.2.2 matt if (newsize < 1 || newsize > hz || newsize >= max_ts)
897 1.10.2.2 matt return EINVAL;
898 1.10.2.2 matt
899 1.10.2.2 matt /* It is safe to do this in such order */
900 1.10.2.2 matt for (CPU_INFO_FOREACH(cii, ci))
901 1.10.2.2 matt spc_lock(ci);
902 1.10.2.2 matt
903 1.10.2.2 matt min_ts = newsize;
904 1.10.2.2 matt sched_precalcts();
905 1.10.2.2 matt
906 1.10.2.2 matt for (CPU_INFO_FOREACH(cii, ci))
907 1.10.2.2 matt spc_unlock(ci);
908 1.10.2.2 matt
909 1.10.2.2 matt return 0;
910 1.10.2.2 matt }
911 1.10.2.2 matt
912 1.10.2.2 matt static int
913 1.10.2.2 matt sysctl_sched_maxts(SYSCTLFN_ARGS)
914 1.10.2.2 matt {
915 1.10.2.2 matt struct sysctlnode node;
916 1.10.2.2 matt struct cpu_info *ci;
917 1.10.2.2 matt int error, newsize;
918 1.10.2.2 matt CPU_INFO_ITERATOR cii;
919 1.10.2.2 matt
920 1.10.2.2 matt node = *rnode;
921 1.10.2.2 matt node.sysctl_data = &newsize;
922 1.10.2.2 matt
923 1.10.2.2 matt newsize = hztoms(max_ts);
924 1.10.2.2 matt error = sysctl_lookup(SYSCTLFN_CALL(&node));
925 1.10.2.2 matt if (error || newp == NULL)
926 1.10.2.2 matt return error;
927 1.10.2.2 matt
928 1.10.2.2 matt newsize = mstohz(newsize);
929 1.10.2.2 matt if (newsize < 10 || newsize > hz || newsize <= min_ts)
930 1.10.2.2 matt return EINVAL;
931 1.10.2.2 matt
932 1.10.2.2 matt /* It is safe to do this in such order */
933 1.10.2.2 matt for (CPU_INFO_FOREACH(cii, ci))
934 1.10.2.2 matt spc_lock(ci);
935 1.10.2.2 matt
936 1.10.2.2 matt max_ts = newsize;
937 1.10.2.2 matt sched_precalcts();
938 1.10.2.2 matt
939 1.10.2.2 matt for (CPU_INFO_FOREACH(cii, ci))
940 1.10.2.2 matt spc_unlock(ci);
941 1.10.2.2 matt
942 1.10.2.2 matt return 0;
943 1.10.2.2 matt }
944 1.10.2.2 matt
945 1.10.2.2 matt SYSCTL_SETUP(sysctl_sched_setup, "sysctl kern.sched subtree setup")
946 1.10.2.2 matt {
947 1.10.2.2 matt const struct sysctlnode *node = NULL;
948 1.10.2.2 matt
949 1.10.2.2 matt sysctl_createv(clog, 0, NULL, NULL,
950 1.10.2.2 matt CTLFLAG_PERMANENT,
951 1.10.2.2 matt CTLTYPE_NODE, "kern", NULL,
952 1.10.2.2 matt NULL, 0, NULL, 0,
953 1.10.2.2 matt CTL_KERN, CTL_EOL);
954 1.10.2.2 matt sysctl_createv(clog, 0, NULL, &node,
955 1.10.2.2 matt CTLFLAG_PERMANENT,
956 1.10.2.2 matt CTLTYPE_NODE, "sched",
957 1.10.2.2 matt SYSCTL_DESCR("Scheduler options"),
958 1.10.2.2 matt NULL, 0, NULL, 0,
959 1.10.2.2 matt CTL_KERN, CTL_CREATE, CTL_EOL);
960 1.10.2.2 matt
961 1.10.2.2 matt if (node == NULL)
962 1.10.2.2 matt return;
963 1.10.2.2 matt
964 1.10.2.2 matt sysctl_createv(clog, 0, &node, NULL,
965 1.10.2.2 matt CTLFLAG_PERMANENT,
966 1.10.2.2 matt CTLTYPE_STRING, "name", NULL,
967 1.10.2.2 matt NULL, 0, __UNCONST("M2"), 0,
968 1.10.2.2 matt CTL_CREATE, CTL_EOL);
969 1.10.2.2 matt sysctl_createv(clog, 0, &node, NULL,
970 1.10.2.2 matt CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
971 1.10.2.2 matt CTLTYPE_INT, "maxts",
972 1.10.2.2 matt SYSCTL_DESCR("Maximal time quantum (in miliseconds)"),
973 1.10.2.2 matt sysctl_sched_maxts, 0, &max_ts, 0,
974 1.10.2.2 matt CTL_CREATE, CTL_EOL);
975 1.10.2.2 matt sysctl_createv(clog, 0, &node, NULL,
976 1.10.2.2 matt CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
977 1.10.2.2 matt CTLTYPE_INT, "mints",
978 1.10.2.2 matt SYSCTL_DESCR("Minimal time quantum (in miliseconds)"),
979 1.10.2.2 matt sysctl_sched_mints, 0, &min_ts, 0,
980 1.10.2.2 matt CTL_CREATE, CTL_EOL);
981 1.10.2.2 matt
982 1.10.2.2 matt #ifdef MULTIPROCESSOR
983 1.10.2.2 matt sysctl_createv(clog, 0, &node, NULL,
984 1.10.2.2 matt CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
985 1.10.2.2 matt CTLTYPE_INT, "cacheht_time",
986 1.10.2.2 matt SYSCTL_DESCR("Cache hotness time (in ticks)"),
987 1.10.2.2 matt NULL, 0, &cacheht_time, 0,
988 1.10.2.2 matt CTL_CREATE, CTL_EOL);
989 1.10.2.2 matt sysctl_createv(clog, 0, &node, NULL,
990 1.10.2.2 matt CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
991 1.10.2.2 matt CTLTYPE_INT, "balance_period",
992 1.10.2.2 matt SYSCTL_DESCR("Balance period (in ticks)"),
993 1.10.2.2 matt NULL, 0, &balance_period, 0,
994 1.10.2.2 matt CTL_CREATE, CTL_EOL);
995 1.10.2.2 matt sysctl_createv(clog, 0, &node, NULL,
996 1.10.2.2 matt CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
997 1.10.2.2 matt CTLTYPE_INT, "min_catch",
998 1.10.2.2 matt SYSCTL_DESCR("Minimal count of the threads for catching"),
999 1.10.2.2 matt NULL, 0, &min_catch, 0,
1000 1.10.2.2 matt CTL_CREATE, CTL_EOL);
1001 1.10.2.2 matt #endif
1002 1.10.2.2 matt }
1003 1.10.2.2 matt
1004 1.10.2.2 matt /*
1005 1.10.2.2 matt * Debugging.
1006 1.10.2.2 matt */
1007 1.10.2.2 matt
1008 1.10.2.2 matt #ifdef DDB
1009 1.10.2.2 matt
1010 1.10.2.2 matt void
1011 1.10.2.2 matt sched_print_runqueue(void (*pr)(const char *, ...))
1012 1.10.2.2 matt {
1013 1.10.2.2 matt runqueue_t *ci_rq;
1014 1.10.2.2 matt sched_info_lwp_t *sil;
1015 1.10.2.2 matt struct lwp *l;
1016 1.10.2.2 matt struct proc *p;
1017 1.10.2.2 matt int i;
1018 1.10.2.2 matt
1019 1.10.2.2 matt struct cpu_info *ci;
1020 1.10.2.2 matt CPU_INFO_ITERATOR cii;
1021 1.10.2.2 matt
1022 1.10.2.2 matt for (CPU_INFO_FOREACH(cii, ci)) {
1023 1.10.2.2 matt ci_rq = ci->ci_schedstate.spc_sched_info;
1024 1.10.2.2 matt
1025 1.10.2.2 matt (*pr)("Run-queue (CPU = %d):\n", ci->ci_cpuid);
1026 1.10.2.2 matt (*pr)(" pid.lid = %d.%d, threads count = %u, "
1027 1.10.2.2 matt "avgcount = %u, highest pri = %d\n",
1028 1.10.2.2 matt ci->ci_curlwp->l_proc->p_pid, ci->ci_curlwp->l_lid,
1029 1.10.2.2 matt ci_rq->r_count, ci_rq->r_avgcount, ci_rq->r_highest_pri);
1030 1.10.2.2 matt i = (PRI_COUNT >> BITMAP_SHIFT) - 1;
1031 1.10.2.2 matt do {
1032 1.10.2.2 matt uint32_t q;
1033 1.10.2.2 matt q = ci_rq->r_bitmap[i];
1034 1.10.2.2 matt (*pr)(" bitmap[%d] => [ %d (0x%x) ]\n", i, ffs(q), q);
1035 1.10.2.2 matt } while (i--);
1036 1.10.2.2 matt }
1037 1.10.2.2 matt
1038 1.10.2.2 matt (*pr)(" %5s %4s %4s %10s %3s %4s %11s %3s %s\n",
1039 1.10.2.2 matt "LID", "PRI", "EPRI", "FL", "ST", "TS", "LWP", "CPU", "LRTIME");
1040 1.10.2.2 matt
1041 1.10.2.2 matt PROCLIST_FOREACH(p, &allproc) {
1042 1.10.2.2 matt (*pr)(" /- %d (%s)\n", (int)p->p_pid, p->p_comm);
1043 1.10.2.2 matt LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1044 1.10.2.2 matt sil = l->l_sched_info;
1045 1.10.2.2 matt ci = l->l_cpu;
1046 1.10.2.2 matt (*pr)(" | %5d %4u %4u 0x%8.8x %3s %4u %11p %3d "
1047 1.10.2.2 matt "%u ST=%d RT=%d %d\n",
1048 1.10.2.2 matt (int)l->l_lid, l->l_priority, lwp_eprio(l),
1049 1.10.2.2 matt l->l_flag, l->l_stat == LSRUN ? "RQ" :
1050 1.10.2.2 matt (l->l_stat == LSSLEEP ? "SQ" : "-"),
1051 1.10.2.2 matt sil->sl_timeslice, l, ci->ci_cpuid,
1052 1.10.2.2 matt (u_int)(hardclock_ticks - sil->sl_lrtime),
1053 1.10.2.2 matt sil->sl_slpsum, sil->sl_rtsum, sil->sl_flags);
1054 1.10.2.2 matt }
1055 1.10.2.2 matt }
1056 1.10.2.2 matt }
1057 1.10.2.2 matt
1058 1.10.2.2 matt #endif /* defined(DDB) */
1059