kern_synch.c revision 1.226 1 1.226 yamt /* $NetBSD: kern_synch.c,v 1.226 2008/04/13 22:53:31 yamt Exp $ */
2 1.63 thorpej
3 1.63 thorpej /*-
4 1.218 ad * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 1.63 thorpej * All rights reserved.
6 1.63 thorpej *
7 1.63 thorpej * This code is derived from software contributed to The NetBSD Foundation
8 1.63 thorpej * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 1.188 yamt * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and
10 1.188 yamt * Daniel Sieger.
11 1.63 thorpej *
12 1.63 thorpej * Redistribution and use in source and binary forms, with or without
13 1.63 thorpej * modification, are permitted provided that the following conditions
14 1.63 thorpej * are met:
15 1.63 thorpej * 1. Redistributions of source code must retain the above copyright
16 1.63 thorpej * notice, this list of conditions and the following disclaimer.
17 1.63 thorpej * 2. Redistributions in binary form must reproduce the above copyright
18 1.63 thorpej * notice, this list of conditions and the following disclaimer in the
19 1.63 thorpej * documentation and/or other materials provided with the distribution.
20 1.63 thorpej * 3. All advertising materials mentioning features or use of this software
21 1.63 thorpej * must display the following acknowledgement:
22 1.63 thorpej * This product includes software developed by the NetBSD
23 1.63 thorpej * Foundation, Inc. and its contributors.
24 1.63 thorpej * 4. Neither the name of The NetBSD Foundation nor the names of its
25 1.63 thorpej * contributors may be used to endorse or promote products derived
26 1.63 thorpej * from this software without specific prior written permission.
27 1.63 thorpej *
28 1.63 thorpej * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29 1.63 thorpej * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30 1.63 thorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31 1.63 thorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32 1.63 thorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33 1.63 thorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34 1.63 thorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35 1.63 thorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36 1.63 thorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37 1.63 thorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 1.63 thorpej * POSSIBILITY OF SUCH DAMAGE.
39 1.63 thorpej */
40 1.26 cgd
41 1.223 ad /*
42 1.223 ad * Copyright (c) 2007, 2008 Mindaugas Rasiukevicius <rmind at NetBSD org>
43 1.223 ad * All rights reserved.
44 1.223 ad *
45 1.223 ad * Redistribution and use in source and binary forms, with or without
46 1.223 ad * modification, are permitted provided that the following conditions
47 1.223 ad * are met:
48 1.223 ad * 1. Redistributions of source code must retain the above copyright
49 1.223 ad * notice, this list of conditions and the following disclaimer.
50 1.223 ad * 2. Redistributions in binary form must reproduce the above copyright
51 1.223 ad * notice, this list of conditions and the following disclaimer in the
52 1.223 ad * documentation and/or other materials provided with the distribution.
53 1.223 ad *
54 1.223 ad * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
55 1.223 ad * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 1.223 ad * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 1.223 ad * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
58 1.223 ad * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 1.223 ad * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 1.223 ad * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 1.223 ad * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 1.223 ad * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 1.223 ad * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 1.223 ad * SUCH DAMAGE.
65 1.223 ad */
66 1.223 ad
67 1.26 cgd /*-
68 1.26 cgd * Copyright (c) 1982, 1986, 1990, 1991, 1993
69 1.26 cgd * The Regents of the University of California. All rights reserved.
70 1.26 cgd * (c) UNIX System Laboratories, Inc.
71 1.26 cgd * All or some portions of this file are derived from material licensed
72 1.26 cgd * to the University of California by American Telephone and Telegraph
73 1.26 cgd * Co. or Unix System Laboratories, Inc. and are reproduced herein with
74 1.26 cgd * the permission of UNIX System Laboratories, Inc.
75 1.26 cgd *
76 1.26 cgd * Redistribution and use in source and binary forms, with or without
77 1.26 cgd * modification, are permitted provided that the following conditions
78 1.26 cgd * are met:
79 1.26 cgd * 1. Redistributions of source code must retain the above copyright
80 1.26 cgd * notice, this list of conditions and the following disclaimer.
81 1.26 cgd * 2. Redistributions in binary form must reproduce the above copyright
82 1.26 cgd * notice, this list of conditions and the following disclaimer in the
83 1.26 cgd * documentation and/or other materials provided with the distribution.
84 1.136 agc * 3. Neither the name of the University nor the names of its contributors
85 1.26 cgd * may be used to endorse or promote products derived from this software
86 1.26 cgd * without specific prior written permission.
87 1.26 cgd *
88 1.26 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
89 1.26 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
90 1.26 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
91 1.26 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
92 1.26 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
93 1.26 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
94 1.26 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
95 1.26 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
96 1.26 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
97 1.26 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
98 1.26 cgd * SUCH DAMAGE.
99 1.26 cgd *
100 1.50 fvdl * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
101 1.26 cgd */
102 1.106 lukem
103 1.106 lukem #include <sys/cdefs.h>
104 1.226 yamt __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.226 2008/04/13 22:53:31 yamt Exp $");
105 1.48 mrg
106 1.109 yamt #include "opt_kstack.h"
107 1.82 thorpej #include "opt_lockdebug.h"
108 1.83 thorpej #include "opt_multiprocessor.h"
109 1.110 briggs #include "opt_perfctrs.h"
110 1.26 cgd
111 1.174 ad #define __MUTEX_PRIVATE
112 1.174 ad
113 1.26 cgd #include <sys/param.h>
114 1.26 cgd #include <sys/systm.h>
115 1.26 cgd #include <sys/proc.h>
116 1.26 cgd #include <sys/kernel.h>
117 1.111 briggs #if defined(PERFCTRS)
118 1.110 briggs #include <sys/pmc.h>
119 1.111 briggs #endif
120 1.188 yamt #include <sys/cpu.h>
121 1.26 cgd #include <sys/resourcevar.h>
122 1.55 ross #include <sys/sched.h>
123 1.179 dsl #include <sys/syscall_stats.h>
124 1.174 ad #include <sys/sleepq.h>
125 1.174 ad #include <sys/lockdebug.h>
126 1.190 ad #include <sys/evcnt.h>
127 1.199 ad #include <sys/intr.h>
128 1.207 ad #include <sys/lwpctl.h>
129 1.209 ad #include <sys/atomic.h>
130 1.215 ad #include <sys/simplelock.h>
131 1.223 ad #include <sys/bitops.h>
132 1.223 ad #include <sys/kmem.h>
133 1.223 ad #include <sys/sysctl.h>
134 1.223 ad #include <sys/idle.h>
135 1.47 mrg
136 1.47 mrg #include <uvm/uvm_extern.h>
137 1.47 mrg
138 1.223 ad /*
139 1.223 ad * Priority related defintions.
140 1.223 ad */
141 1.223 ad #define PRI_TS_COUNT (NPRI_USER)
142 1.223 ad #define PRI_RT_COUNT (PRI_COUNT - PRI_TS_COUNT)
143 1.223 ad #define PRI_HTS_RANGE (PRI_TS_COUNT / 10)
144 1.223 ad
145 1.223 ad #define PRI_HIGHEST_TS (MAXPRI_USER)
146 1.223 ad
147 1.223 ad /*
148 1.223 ad * Bits per map.
149 1.223 ad */
150 1.223 ad #define BITMAP_BITS (32)
151 1.223 ad #define BITMAP_SHIFT (5)
152 1.223 ad #define BITMAP_MSB (0x80000000U)
153 1.223 ad #define BITMAP_MASK (BITMAP_BITS - 1)
154 1.223 ad
155 1.223 ad /*
156 1.223 ad * Structures, runqueue.
157 1.223 ad */
158 1.34 christos
159 1.223 ad typedef struct {
160 1.223 ad TAILQ_HEAD(, lwp) q_head;
161 1.223 ad } queue_t;
162 1.223 ad
163 1.223 ad typedef struct {
164 1.223 ad /* Lock and bitmap */
165 1.223 ad uint32_t r_bitmap[PRI_COUNT >> BITMAP_SHIFT];
166 1.223 ad /* Counters */
167 1.223 ad u_int r_count; /* Count of the threads */
168 1.223 ad u_int r_avgcount; /* Average count of threads */
169 1.223 ad u_int r_mcount; /* Count of migratable threads */
170 1.223 ad /* Runqueues */
171 1.223 ad queue_t r_rt_queue[PRI_RT_COUNT];
172 1.223 ad queue_t r_ts_queue[PRI_TS_COUNT];
173 1.223 ad } runqueue_t;
174 1.26 cgd
175 1.221 ad static u_int sched_unsleep(struct lwp *, bool);
176 1.188 yamt static void sched_changepri(struct lwp *, pri_t);
177 1.188 yamt static void sched_lendpri(struct lwp *, pri_t);
178 1.223 ad static void *sched_getrq(runqueue_t *, const pri_t);
179 1.223 ad #ifdef MULTIPROCESSOR
180 1.223 ad static lwp_t *sched_catchlwp(void);
181 1.223 ad static void sched_balance(void *);
182 1.223 ad #endif
183 1.122 thorpej
184 1.174 ad syncobj_t sleep_syncobj = {
185 1.174 ad SOBJ_SLEEPQ_SORTED,
186 1.174 ad sleepq_unsleep,
187 1.184 yamt sleepq_changepri,
188 1.184 yamt sleepq_lendpri,
189 1.184 yamt syncobj_noowner,
190 1.174 ad };
191 1.174 ad
192 1.174 ad syncobj_t sched_syncobj = {
193 1.174 ad SOBJ_SLEEPQ_SORTED,
194 1.174 ad sched_unsleep,
195 1.184 yamt sched_changepri,
196 1.184 yamt sched_lendpri,
197 1.184 yamt syncobj_noowner,
198 1.174 ad };
199 1.122 thorpej
200 1.223 ad const int schedppq = 1;
201 1.223 ad callout_t sched_pstats_ch;
202 1.223 ad unsigned sched_pstats_ticks;
203 1.223 ad kcondvar_t lbolt; /* once a second sleep address */
204 1.223 ad
205 1.223 ad /*
206 1.223 ad * Migration and balancing.
207 1.223 ad */
208 1.223 ad static u_int cacheht_time; /* Cache hotness time */
209 1.223 ad static u_int min_catch; /* Minimal LWP count for catching */
210 1.223 ad static u_int balance_period; /* Balance period */
211 1.223 ad static struct cpu_info *worker_ci; /* Victim CPU */
212 1.223 ad #ifdef MULTIPROCESSOR
213 1.223 ad static struct callout balance_ch; /* Callout of balancer */
214 1.223 ad #endif
215 1.223 ad
216 1.26 cgd /*
217 1.174 ad * During autoconfiguration or after a panic, a sleep will simply lower the
218 1.174 ad * priority briefly to allow interrupts, then return. The priority to be
219 1.174 ad * used (safepri) is machine-dependent, thus this value is initialized and
220 1.174 ad * maintained in the machine-dependent layers. This priority will typically
221 1.174 ad * be 0, or the lowest priority that is safe for use on the interrupt stack;
222 1.174 ad * it can be made higher to block network software interrupts after panics.
223 1.26 cgd */
224 1.174 ad int safepri;
225 1.26 cgd
226 1.26 cgd /*
227 1.174 ad * OBSOLETE INTERFACE
228 1.174 ad *
229 1.26 cgd * General sleep call. Suspends the current process until a wakeup is
230 1.26 cgd * performed on the specified identifier. The process will then be made
231 1.174 ad * runnable with the specified priority. Sleeps at most timo/hz seconds (0
232 1.174 ad * means no timeout). If pri includes PCATCH flag, signals are checked
233 1.26 cgd * before and after sleeping, else signals are not checked. Returns 0 if
234 1.26 cgd * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
235 1.26 cgd * signal needs to be delivered, ERESTART is returned if the current system
236 1.26 cgd * call should be restarted if possible, and EINTR is returned if the system
237 1.26 cgd * call should be interrupted by the signal (return EINTR).
238 1.77 thorpej *
239 1.174 ad * The interlock is held until we are on a sleep queue. The interlock will
240 1.174 ad * be locked before returning back to the caller unless the PNORELOCK flag
241 1.174 ad * is specified, in which case the interlock will always be unlocked upon
242 1.174 ad * return.
243 1.26 cgd */
244 1.26 cgd int
245 1.185 yamt ltsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
246 1.174 ad volatile struct simplelock *interlock)
247 1.26 cgd {
248 1.122 thorpej struct lwp *l = curlwp;
249 1.174 ad sleepq_t *sq;
250 1.188 yamt int error;
251 1.26 cgd
252 1.204 ad KASSERT((l->l_pflag & LP_INTR) == 0);
253 1.204 ad
254 1.174 ad if (sleepq_dontsleep(l)) {
255 1.174 ad (void)sleepq_abort(NULL, 0);
256 1.174 ad if ((priority & PNORELOCK) != 0)
257 1.77 thorpej simple_unlock(interlock);
258 1.174 ad return 0;
259 1.26 cgd }
260 1.78 sommerfe
261 1.204 ad l->l_kpriority = true;
262 1.174 ad sq = sleeptab_lookup(&sleeptab, ident);
263 1.174 ad sleepq_enter(sq, l);
264 1.204 ad sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj);
265 1.42 cgd
266 1.174 ad if (interlock != NULL) {
267 1.204 ad KASSERT(simple_lock_held(interlock));
268 1.174 ad simple_unlock(interlock);
269 1.150 chs }
270 1.150 chs
271 1.188 yamt error = sleepq_block(timo, priority & PCATCH);
272 1.126 pk
273 1.174 ad if (interlock != NULL && (priority & PNORELOCK) == 0)
274 1.126 pk simple_lock(interlock);
275 1.174 ad
276 1.174 ad return error;
277 1.26 cgd }
278 1.26 cgd
279 1.187 ad int
280 1.187 ad mtsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
281 1.187 ad kmutex_t *mtx)
282 1.187 ad {
283 1.187 ad struct lwp *l = curlwp;
284 1.187 ad sleepq_t *sq;
285 1.188 yamt int error;
286 1.187 ad
287 1.204 ad KASSERT((l->l_pflag & LP_INTR) == 0);
288 1.204 ad
289 1.187 ad if (sleepq_dontsleep(l)) {
290 1.187 ad (void)sleepq_abort(mtx, (priority & PNORELOCK) != 0);
291 1.187 ad return 0;
292 1.187 ad }
293 1.187 ad
294 1.204 ad l->l_kpriority = true;
295 1.187 ad sq = sleeptab_lookup(&sleeptab, ident);
296 1.187 ad sleepq_enter(sq, l);
297 1.204 ad sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj);
298 1.187 ad mutex_exit(mtx);
299 1.188 yamt error = sleepq_block(timo, priority & PCATCH);
300 1.187 ad
301 1.187 ad if ((priority & PNORELOCK) == 0)
302 1.187 ad mutex_enter(mtx);
303 1.187 ad
304 1.187 ad return error;
305 1.187 ad }
306 1.187 ad
307 1.26 cgd /*
308 1.174 ad * General sleep call for situations where a wake-up is not expected.
309 1.26 cgd */
310 1.174 ad int
311 1.182 thorpej kpause(const char *wmesg, bool intr, int timo, kmutex_t *mtx)
312 1.26 cgd {
313 1.174 ad struct lwp *l = curlwp;
314 1.174 ad sleepq_t *sq;
315 1.174 ad int error;
316 1.26 cgd
317 1.174 ad if (sleepq_dontsleep(l))
318 1.174 ad return sleepq_abort(NULL, 0);
319 1.26 cgd
320 1.174 ad if (mtx != NULL)
321 1.174 ad mutex_exit(mtx);
322 1.204 ad l->l_kpriority = true;
323 1.174 ad sq = sleeptab_lookup(&sleeptab, l);
324 1.174 ad sleepq_enter(sq, l);
325 1.204 ad sleepq_enqueue(sq, l, wmesg, &sleep_syncobj);
326 1.188 yamt error = sleepq_block(timo, intr);
327 1.174 ad if (mtx != NULL)
328 1.174 ad mutex_enter(mtx);
329 1.83 thorpej
330 1.174 ad return error;
331 1.139 cl }
332 1.139 cl
333 1.26 cgd /*
334 1.174 ad * OBSOLETE INTERFACE
335 1.174 ad *
336 1.26 cgd * Make all processes sleeping on the specified identifier runnable.
337 1.26 cgd */
338 1.26 cgd void
339 1.174 ad wakeup(wchan_t ident)
340 1.26 cgd {
341 1.174 ad sleepq_t *sq;
342 1.83 thorpej
343 1.174 ad if (cold)
344 1.174 ad return;
345 1.83 thorpej
346 1.174 ad sq = sleeptab_lookup(&sleeptab, ident);
347 1.174 ad sleepq_wake(sq, ident, (u_int)-1);
348 1.63 thorpej }
349 1.63 thorpej
350 1.63 thorpej /*
351 1.174 ad * OBSOLETE INTERFACE
352 1.174 ad *
353 1.63 thorpej * Make the highest priority process first in line on the specified
354 1.63 thorpej * identifier runnable.
355 1.63 thorpej */
356 1.174 ad void
357 1.174 ad wakeup_one(wchan_t ident)
358 1.63 thorpej {
359 1.174 ad sleepq_t *sq;
360 1.63 thorpej
361 1.174 ad if (cold)
362 1.174 ad return;
363 1.188 yamt
364 1.174 ad sq = sleeptab_lookup(&sleeptab, ident);
365 1.174 ad sleepq_wake(sq, ident, 1);
366 1.174 ad }
367 1.63 thorpej
368 1.117 gmcgarry
369 1.117 gmcgarry /*
370 1.117 gmcgarry * General yield call. Puts the current process back on its run queue and
371 1.117 gmcgarry * performs a voluntary context switch. Should only be called when the
372 1.198 ad * current process explicitly requests it (eg sched_yield(2)).
373 1.117 gmcgarry */
374 1.117 gmcgarry void
375 1.117 gmcgarry yield(void)
376 1.117 gmcgarry {
377 1.122 thorpej struct lwp *l = curlwp;
378 1.117 gmcgarry
379 1.174 ad KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
380 1.174 ad lwp_lock(l);
381 1.217 ad KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
382 1.188 yamt KASSERT(l->l_stat == LSONPROC);
383 1.204 ad l->l_kpriority = false;
384 1.188 yamt (void)mi_switch(l);
385 1.174 ad KERNEL_LOCK(l->l_biglocks, l);
386 1.69 thorpej }
387 1.69 thorpej
388 1.69 thorpej /*
389 1.69 thorpej * General preemption call. Puts the current process back on its run queue
390 1.156 rpaulo * and performs an involuntary context switch.
391 1.69 thorpej */
392 1.69 thorpej void
393 1.174 ad preempt(void)
394 1.69 thorpej {
395 1.122 thorpej struct lwp *l = curlwp;
396 1.69 thorpej
397 1.174 ad KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
398 1.174 ad lwp_lock(l);
399 1.217 ad KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
400 1.188 yamt KASSERT(l->l_stat == LSONPROC);
401 1.204 ad l->l_kpriority = false;
402 1.174 ad l->l_nivcsw++;
403 1.188 yamt (void)mi_switch(l);
404 1.174 ad KERNEL_LOCK(l->l_biglocks, l);
405 1.69 thorpej }
406 1.69 thorpej
407 1.69 thorpej /*
408 1.188 yamt * Compute the amount of time during which the current lwp was running.
409 1.130 nathanw *
410 1.188 yamt * - update l_rtime unless it's an idle lwp.
411 1.188 yamt */
412 1.188 yamt
413 1.199 ad void
414 1.212 yamt updatertime(lwp_t *l, const struct bintime *now)
415 1.188 yamt {
416 1.188 yamt
417 1.199 ad if ((l->l_flag & LW_IDLE) != 0)
418 1.188 yamt return;
419 1.188 yamt
420 1.212 yamt /* rtime += now - stime */
421 1.212 yamt bintime_add(&l->l_rtime, now);
422 1.212 yamt bintime_sub(&l->l_rtime, &l->l_stime);
423 1.188 yamt }
424 1.188 yamt
425 1.188 yamt /*
426 1.188 yamt * The machine independent parts of context switch.
427 1.188 yamt *
428 1.188 yamt * Returns 1 if another LWP was actually run.
429 1.26 cgd */
430 1.122 thorpej int
431 1.199 ad mi_switch(lwp_t *l)
432 1.26 cgd {
433 1.216 rmind struct cpu_info *ci, *tci = NULL;
434 1.76 thorpej struct schedstate_percpu *spc;
435 1.188 yamt struct lwp *newl;
436 1.174 ad int retval, oldspl;
437 1.212 yamt struct bintime bt;
438 1.199 ad bool returning;
439 1.26 cgd
440 1.188 yamt KASSERT(lwp_locked(l, NULL));
441 1.188 yamt LOCKDEBUG_BARRIER(l->l_mutex, 1);
442 1.174 ad
443 1.174 ad #ifdef KSTACK_CHECK_MAGIC
444 1.174 ad kstack_check_magic(l);
445 1.174 ad #endif
446 1.83 thorpej
447 1.212 yamt binuptime(&bt);
448 1.199 ad
449 1.209 ad KDASSERT(l->l_cpu == curcpu());
450 1.196 ad ci = l->l_cpu;
451 1.196 ad spc = &ci->ci_schedstate;
452 1.199 ad returning = false;
453 1.190 ad newl = NULL;
454 1.190 ad
455 1.199 ad /*
456 1.199 ad * If we have been asked to switch to a specific LWP, then there
457 1.199 ad * is no need to inspect the run queues. If a soft interrupt is
458 1.199 ad * blocking, then return to the interrupted thread without adjusting
459 1.199 ad * VM context or its start time: neither have been changed in order
460 1.199 ad * to take the interrupt.
461 1.199 ad */
462 1.190 ad if (l->l_switchto != NULL) {
463 1.204 ad if ((l->l_pflag & LP_INTR) != 0) {
464 1.199 ad returning = true;
465 1.199 ad softint_block(l);
466 1.199 ad if ((l->l_flag & LW_TIMEINTR) != 0)
467 1.212 yamt updatertime(l, &bt);
468 1.199 ad }
469 1.190 ad newl = l->l_switchto;
470 1.190 ad l->l_switchto = NULL;
471 1.190 ad }
472 1.204 ad #ifndef __HAVE_FAST_SOFTINTS
473 1.204 ad else if (ci->ci_data.cpu_softints != 0) {
474 1.204 ad /* There are pending soft interrupts, so pick one. */
475 1.204 ad newl = softint_picklwp();
476 1.204 ad newl->l_stat = LSONPROC;
477 1.204 ad newl->l_flag |= LW_RUNNING;
478 1.204 ad }
479 1.204 ad #endif /* !__HAVE_FAST_SOFTINTS */
480 1.190 ad
481 1.180 dsl /* Count time spent in current system call */
482 1.199 ad if (!returning) {
483 1.199 ad SYSCALL_TIME_SLEEP(l);
484 1.180 dsl
485 1.199 ad /*
486 1.199 ad * XXXSMP If we are using h/w performance counters,
487 1.199 ad * save context.
488 1.199 ad */
489 1.174 ad #if PERFCTRS
490 1.199 ad if (PMC_ENABLED(l->l_proc)) {
491 1.199 ad pmc_save_context(l->l_proc);
492 1.199 ad }
493 1.199 ad #endif
494 1.212 yamt updatertime(l, &bt);
495 1.174 ad }
496 1.113 gmcgarry
497 1.113 gmcgarry /*
498 1.174 ad * If on the CPU and we have gotten this far, then we must yield.
499 1.113 gmcgarry */
500 1.174 ad KASSERT(l->l_stat != LSRUN);
501 1.216 rmind if (l->l_stat == LSONPROC && (l->l_target_cpu || l != newl)) {
502 1.217 ad KASSERT(lwp_locked(l, spc->spc_lwplock));
503 1.216 rmind
504 1.220 rmind if (l->l_target_cpu == l->l_cpu) {
505 1.220 rmind l->l_target_cpu = NULL;
506 1.220 rmind } else {
507 1.220 rmind tci = l->l_target_cpu;
508 1.220 rmind }
509 1.220 rmind
510 1.216 rmind if (__predict_false(tci != NULL)) {
511 1.216 rmind /* Double-lock the runqueues */
512 1.216 rmind spc_dlock(ci, tci);
513 1.216 rmind } else {
514 1.216 rmind /* Lock the runqueue */
515 1.216 rmind spc_lock(ci);
516 1.216 rmind }
517 1.216 rmind
518 1.188 yamt if ((l->l_flag & LW_IDLE) == 0) {
519 1.188 yamt l->l_stat = LSRUN;
520 1.216 rmind if (__predict_false(tci != NULL)) {
521 1.216 rmind /*
522 1.216 rmind * Set the new CPU, lock and unset the
523 1.216 rmind * l_target_cpu - thread will be enqueued
524 1.216 rmind * to the runqueue of target CPU.
525 1.216 rmind */
526 1.216 rmind l->l_cpu = tci;
527 1.216 rmind lwp_setlock(l, tci->ci_schedstate.spc_mutex);
528 1.216 rmind l->l_target_cpu = NULL;
529 1.216 rmind } else {
530 1.216 rmind lwp_setlock(l, spc->spc_mutex);
531 1.216 rmind }
532 1.188 yamt sched_enqueue(l, true);
533 1.216 rmind } else {
534 1.216 rmind KASSERT(tci == NULL);
535 1.188 yamt l->l_stat = LSIDL;
536 1.216 rmind }
537 1.216 rmind } else {
538 1.216 rmind /* Lock the runqueue */
539 1.216 rmind spc_lock(ci);
540 1.174 ad }
541 1.174 ad
542 1.174 ad /*
543 1.201 rmind * Let sched_nextlwp() select the LWP to run the CPU next.
544 1.209 ad * If no LWP is runnable, select the idle LWP.
545 1.209 ad *
546 1.209 ad * Note that spc_lwplock might not necessary be held, and
547 1.209 ad * new thread would be unlocked after setting the LWP-lock.
548 1.174 ad */
549 1.190 ad if (newl == NULL) {
550 1.190 ad newl = sched_nextlwp();
551 1.190 ad if (newl != NULL) {
552 1.190 ad sched_dequeue(newl);
553 1.190 ad KASSERT(lwp_locked(newl, spc->spc_mutex));
554 1.190 ad newl->l_stat = LSONPROC;
555 1.196 ad newl->l_cpu = ci;
556 1.190 ad newl->l_flag |= LW_RUNNING;
557 1.217 ad lwp_setlock(newl, spc->spc_lwplock);
558 1.190 ad } else {
559 1.196 ad newl = ci->ci_data.cpu_idlelwp;
560 1.190 ad newl->l_stat = LSONPROC;
561 1.190 ad newl->l_flag |= LW_RUNNING;
562 1.190 ad }
563 1.204 ad /*
564 1.204 ad * Only clear want_resched if there are no
565 1.204 ad * pending (slow) software interrupts.
566 1.204 ad */
567 1.204 ad ci->ci_want_resched = ci->ci_data.cpu_softints;
568 1.199 ad spc->spc_flags &= ~SPCF_SWITCHCLEAR;
569 1.204 ad spc->spc_curpriority = lwp_eprio(newl);
570 1.199 ad }
571 1.199 ad
572 1.204 ad /* Items that must be updated with the CPU locked. */
573 1.199 ad if (!returning) {
574 1.204 ad /* Update the new LWP's start time. */
575 1.212 yamt newl->l_stime = bt;
576 1.204 ad
577 1.199 ad /*
578 1.204 ad * ci_curlwp changes when a fast soft interrupt occurs.
579 1.204 ad * We use cpu_onproc to keep track of which kernel or
580 1.204 ad * user thread is running 'underneath' the software
581 1.204 ad * interrupt. This is important for time accounting,
582 1.204 ad * itimers and forcing user threads to preempt (aston).
583 1.199 ad */
584 1.204 ad ci->ci_data.cpu_onproc = newl;
585 1.188 yamt }
586 1.188 yamt
587 1.188 yamt if (l != newl) {
588 1.188 yamt struct lwp *prevlwp;
589 1.174 ad
590 1.209 ad /* Release all locks, but leave the current LWP locked */
591 1.216 rmind if (l->l_mutex == l->l_cpu->ci_schedstate.spc_mutex) {
592 1.216 rmind /*
593 1.216 rmind * In case of migration, drop the local runqueue
594 1.216 rmind * lock, thread is on other runqueue now.
595 1.216 rmind */
596 1.216 rmind if (__predict_false(tci != NULL))
597 1.216 rmind spc_unlock(ci);
598 1.209 ad /*
599 1.209 ad * Drop spc_lwplock, if the current LWP has been moved
600 1.209 ad * to the run queue (it is now locked by spc_mutex).
601 1.209 ad */
602 1.217 ad mutex_spin_exit(spc->spc_lwplock);
603 1.188 yamt } else {
604 1.209 ad /*
605 1.209 ad * Otherwise, drop the spc_mutex, we are done with the
606 1.209 ad * run queues.
607 1.209 ad */
608 1.188 yamt mutex_spin_exit(spc->spc_mutex);
609 1.216 rmind KASSERT(tci == NULL);
610 1.188 yamt }
611 1.188 yamt
612 1.209 ad /*
613 1.209 ad * Mark that context switch is going to be perfomed
614 1.209 ad * for this LWP, to protect it from being switched
615 1.209 ad * to on another CPU.
616 1.209 ad */
617 1.209 ad KASSERT(l->l_ctxswtch == 0);
618 1.209 ad l->l_ctxswtch = 1;
619 1.209 ad l->l_ncsw++;
620 1.209 ad l->l_flag &= ~LW_RUNNING;
621 1.209 ad
622 1.209 ad /*
623 1.209 ad * Increase the count of spin-mutexes before the release
624 1.209 ad * of the last lock - we must remain at IPL_SCHED during
625 1.209 ad * the context switch.
626 1.209 ad */
627 1.209 ad oldspl = MUTEX_SPIN_OLDSPL(ci);
628 1.209 ad ci->ci_mtx_count--;
629 1.209 ad lwp_unlock(l);
630 1.209 ad
631 1.218 ad /* Count the context switch on this CPU. */
632 1.218 ad ci->ci_data.cpu_nswtch++;
633 1.188 yamt
634 1.209 ad /* Update status for lwpctl, if present. */
635 1.209 ad if (l->l_lwpctl != NULL)
636 1.209 ad l->l_lwpctl->lc_curcpu = LWPCTL_CPU_NONE;
637 1.209 ad
638 1.199 ad /*
639 1.199 ad * Save old VM context, unless a soft interrupt
640 1.199 ad * handler is blocking.
641 1.199 ad */
642 1.199 ad if (!returning)
643 1.199 ad pmap_deactivate(l);
644 1.188 yamt
645 1.209 ad /*
646 1.209 ad * We may need to spin-wait for if 'newl' is still
647 1.209 ad * context switching on another CPU.
648 1.209 ad */
649 1.209 ad if (newl->l_ctxswtch != 0) {
650 1.209 ad u_int count;
651 1.209 ad count = SPINLOCK_BACKOFF_MIN;
652 1.209 ad while (newl->l_ctxswtch)
653 1.209 ad SPINLOCK_BACKOFF(count);
654 1.209 ad }
655 1.207 ad
656 1.188 yamt /* Switch to the new LWP.. */
657 1.204 ad prevlwp = cpu_switchto(l, newl, returning);
658 1.207 ad ci = curcpu();
659 1.207 ad
660 1.188 yamt /*
661 1.209 ad * Switched away - we have new curlwp.
662 1.209 ad * Restore VM context and IPL.
663 1.188 yamt */
664 1.209 ad pmap_activate(l);
665 1.188 yamt if (prevlwp != NULL) {
666 1.209 ad /* Normalize the count of the spin-mutexes */
667 1.209 ad ci->ci_mtx_count++;
668 1.209 ad /* Unmark the state of context switch */
669 1.209 ad membar_exit();
670 1.209 ad prevlwp->l_ctxswtch = 0;
671 1.188 yamt }
672 1.209 ad splx(oldspl);
673 1.209 ad
674 1.209 ad /* Update status for lwpctl, if present. */
675 1.219 ad if (l->l_lwpctl != NULL) {
676 1.209 ad l->l_lwpctl->lc_curcpu = (int)cpu_index(ci);
677 1.219 ad l->l_lwpctl->lc_pctr++;
678 1.219 ad }
679 1.174 ad
680 1.188 yamt retval = 1;
681 1.188 yamt } else {
682 1.188 yamt /* Nothing to do - just unlock and return. */
683 1.216 rmind KASSERT(tci == NULL);
684 1.216 rmind spc_unlock(ci);
685 1.188 yamt lwp_unlock(l);
686 1.122 thorpej retval = 0;
687 1.122 thorpej }
688 1.110 briggs
689 1.188 yamt KASSERT(l == curlwp);
690 1.188 yamt KASSERT(l->l_stat == LSONPROC);
691 1.207 ad KASSERT(l->l_cpu == ci);
692 1.188 yamt
693 1.110 briggs /*
694 1.174 ad * XXXSMP If we are using h/w performance counters, restore context.
695 1.26 cgd */
696 1.114 gmcgarry #if PERFCTRS
697 1.175 christos if (PMC_ENABLED(l->l_proc)) {
698 1.175 christos pmc_restore_context(l->l_proc);
699 1.166 christos }
700 1.114 gmcgarry #endif
701 1.180 dsl SYSCALL_TIME_WAKEUP(l);
702 1.188 yamt LOCKDEBUG_BARRIER(NULL, 1);
703 1.169 yamt
704 1.122 thorpej return retval;
705 1.26 cgd }
706 1.26 cgd
707 1.26 cgd /*
708 1.174 ad * Change process state to be runnable, placing it on the run queue if it is
709 1.174 ad * in memory, and awakening the swapper if it isn't in memory.
710 1.174 ad *
711 1.174 ad * Call with the process and LWP locked. Will return with the LWP unlocked.
712 1.26 cgd */
713 1.26 cgd void
714 1.122 thorpej setrunnable(struct lwp *l)
715 1.26 cgd {
716 1.122 thorpej struct proc *p = l->l_proc;
717 1.205 ad struct cpu_info *ci;
718 1.174 ad sigset_t *ss;
719 1.26 cgd
720 1.188 yamt KASSERT((l->l_flag & LW_IDLE) == 0);
721 1.183 ad KASSERT(mutex_owned(&p->p_smutex));
722 1.183 ad KASSERT(lwp_locked(l, NULL));
723 1.205 ad KASSERT(l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex);
724 1.83 thorpej
725 1.122 thorpej switch (l->l_stat) {
726 1.122 thorpej case LSSTOP:
727 1.33 mycroft /*
728 1.33 mycroft * If we're being traced (possibly because someone attached us
729 1.33 mycroft * while we were stopped), check for a signal from the debugger.
730 1.33 mycroft */
731 1.174 ad if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0) {
732 1.174 ad if ((sigprop[p->p_xstat] & SA_TOLWP) != 0)
733 1.174 ad ss = &l->l_sigpend.sp_set;
734 1.174 ad else
735 1.174 ad ss = &p->p_sigpend.sp_set;
736 1.174 ad sigaddset(ss, p->p_xstat);
737 1.174 ad signotify(l);
738 1.53 mycroft }
739 1.174 ad p->p_nrlwps++;
740 1.26 cgd break;
741 1.174 ad case LSSUSPENDED:
742 1.178 pavel l->l_flag &= ~LW_WSUSPEND;
743 1.174 ad p->p_nrlwps++;
744 1.192 rmind cv_broadcast(&p->p_lwpcv);
745 1.122 thorpej break;
746 1.174 ad case LSSLEEP:
747 1.174 ad KASSERT(l->l_wchan != NULL);
748 1.26 cgd break;
749 1.174 ad default:
750 1.174 ad panic("setrunnable: lwp %p state was %d", l, l->l_stat);
751 1.26 cgd }
752 1.139 cl
753 1.174 ad /*
754 1.174 ad * If the LWP was sleeping interruptably, then it's OK to start it
755 1.174 ad * again. If not, mark it as still sleeping.
756 1.174 ad */
757 1.174 ad if (l->l_wchan != NULL) {
758 1.174 ad l->l_stat = LSSLEEP;
759 1.183 ad /* lwp_unsleep() will release the lock. */
760 1.221 ad lwp_unsleep(l, true);
761 1.174 ad return;
762 1.174 ad }
763 1.139 cl
764 1.174 ad /*
765 1.174 ad * If the LWP is still on the CPU, mark it as LSONPROC. It may be
766 1.174 ad * about to call mi_switch(), in which case it will yield.
767 1.174 ad */
768 1.188 yamt if ((l->l_flag & LW_RUNNING) != 0) {
769 1.174 ad l->l_stat = LSONPROC;
770 1.174 ad l->l_slptime = 0;
771 1.174 ad lwp_unlock(l);
772 1.174 ad return;
773 1.174 ad }
774 1.122 thorpej
775 1.174 ad /*
776 1.205 ad * Look for a CPU to run.
777 1.205 ad * Set the LWP runnable.
778 1.174 ad */
779 1.205 ad ci = sched_takecpu(l);
780 1.205 ad l->l_cpu = ci;
781 1.206 ad if (l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex) {
782 1.206 ad lwp_unlock_to(l, ci->ci_schedstate.spc_mutex);
783 1.206 ad lwp_lock(l);
784 1.206 ad }
785 1.188 yamt sched_setrunnable(l);
786 1.174 ad l->l_stat = LSRUN;
787 1.122 thorpej l->l_slptime = 0;
788 1.174 ad
789 1.205 ad /*
790 1.205 ad * If thread is swapped out - wake the swapper to bring it back in.
791 1.205 ad * Otherwise, enter it into a run queue.
792 1.205 ad */
793 1.178 pavel if (l->l_flag & LW_INMEM) {
794 1.188 yamt sched_enqueue(l, false);
795 1.188 yamt resched_cpu(l);
796 1.174 ad lwp_unlock(l);
797 1.174 ad } else {
798 1.174 ad lwp_unlock(l);
799 1.177 ad uvm_kick_scheduler();
800 1.174 ad }
801 1.26 cgd }
802 1.26 cgd
803 1.26 cgd /*
804 1.174 ad * suspendsched:
805 1.174 ad *
806 1.174 ad * Convert all non-L_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
807 1.174 ad */
808 1.94 bouyer void
809 1.174 ad suspendsched(void)
810 1.94 bouyer {
811 1.174 ad CPU_INFO_ITERATOR cii;
812 1.174 ad struct cpu_info *ci;
813 1.122 thorpej struct lwp *l;
814 1.174 ad struct proc *p;
815 1.94 bouyer
816 1.94 bouyer /*
817 1.174 ad * We do this by process in order not to violate the locking rules.
818 1.94 bouyer */
819 1.204 ad mutex_enter(&proclist_lock);
820 1.174 ad PROCLIST_FOREACH(p, &allproc) {
821 1.174 ad mutex_enter(&p->p_smutex);
822 1.174 ad
823 1.178 pavel if ((p->p_flag & PK_SYSTEM) != 0) {
824 1.174 ad mutex_exit(&p->p_smutex);
825 1.94 bouyer continue;
826 1.174 ad }
827 1.174 ad
828 1.174 ad p->p_stat = SSTOP;
829 1.174 ad
830 1.174 ad LIST_FOREACH(l, &p->p_lwps, l_sibling) {
831 1.174 ad if (l == curlwp)
832 1.174 ad continue;
833 1.174 ad
834 1.174 ad lwp_lock(l);
835 1.122 thorpej
836 1.97 enami /*
837 1.174 ad * Set L_WREBOOT so that the LWP will suspend itself
838 1.174 ad * when it tries to return to user mode. We want to
839 1.174 ad * try and get to get as many LWPs as possible to
840 1.174 ad * the user / kernel boundary, so that they will
841 1.174 ad * release any locks that they hold.
842 1.97 enami */
843 1.178 pavel l->l_flag |= (LW_WREBOOT | LW_WSUSPEND);
844 1.174 ad
845 1.174 ad if (l->l_stat == LSSLEEP &&
846 1.178 pavel (l->l_flag & LW_SINTR) != 0) {
847 1.174 ad /* setrunnable() will release the lock. */
848 1.174 ad setrunnable(l);
849 1.174 ad continue;
850 1.174 ad }
851 1.174 ad
852 1.174 ad lwp_unlock(l);
853 1.94 bouyer }
854 1.174 ad
855 1.174 ad mutex_exit(&p->p_smutex);
856 1.94 bouyer }
857 1.204 ad mutex_exit(&proclist_lock);
858 1.174 ad
859 1.174 ad /*
860 1.174 ad * Kick all CPUs to make them preempt any LWPs running in user mode.
861 1.174 ad * They'll trap into the kernel and suspend themselves in userret().
862 1.174 ad */
863 1.204 ad for (CPU_INFO_FOREACH(cii, ci)) {
864 1.204 ad spc_lock(ci);
865 1.204 ad cpu_need_resched(ci, RESCHED_IMMED);
866 1.204 ad spc_unlock(ci);
867 1.204 ad }
868 1.174 ad }
869 1.174 ad
870 1.174 ad /*
871 1.174 ad * sched_unsleep:
872 1.174 ad *
873 1.174 ad * The is called when the LWP has not been awoken normally but instead
874 1.174 ad * interrupted: for example, if the sleep timed out. Because of this,
875 1.174 ad * it's not a valid action for running or idle LWPs.
876 1.174 ad */
877 1.221 ad static u_int
878 1.221 ad sched_unsleep(struct lwp *l, bool cleanup)
879 1.174 ad {
880 1.174 ad
881 1.174 ad lwp_unlock(l);
882 1.174 ad panic("sched_unsleep");
883 1.174 ad }
884 1.174 ad
885 1.204 ad void
886 1.188 yamt resched_cpu(struct lwp *l)
887 1.188 yamt {
888 1.188 yamt struct cpu_info *ci;
889 1.188 yamt
890 1.188 yamt /*
891 1.188 yamt * XXXSMP
892 1.188 yamt * Since l->l_cpu persists across a context switch,
893 1.188 yamt * this gives us *very weak* processor affinity, in
894 1.188 yamt * that we notify the CPU on which the process last
895 1.188 yamt * ran that it should try to switch.
896 1.188 yamt *
897 1.188 yamt * This does not guarantee that the process will run on
898 1.188 yamt * that processor next, because another processor might
899 1.188 yamt * grab it the next time it performs a context switch.
900 1.188 yamt *
901 1.188 yamt * This also does not handle the case where its last
902 1.188 yamt * CPU is running a higher-priority process, but every
903 1.188 yamt * other CPU is running a lower-priority process. There
904 1.188 yamt * are ways to handle this situation, but they're not
905 1.188 yamt * currently very pretty, and we also need to weigh the
906 1.188 yamt * cost of moving a process from one CPU to another.
907 1.188 yamt */
908 1.204 ad ci = l->l_cpu;
909 1.204 ad if (lwp_eprio(l) > ci->ci_schedstate.spc_curpriority)
910 1.188 yamt cpu_need_resched(ci, 0);
911 1.188 yamt }
912 1.188 yamt
913 1.188 yamt static void
914 1.185 yamt sched_changepri(struct lwp *l, pri_t pri)
915 1.174 ad {
916 1.174 ad
917 1.188 yamt KASSERT(lwp_locked(l, NULL));
918 1.174 ad
919 1.204 ad if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM) != 0) {
920 1.204 ad KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
921 1.204 ad sched_dequeue(l);
922 1.204 ad l->l_priority = pri;
923 1.204 ad sched_enqueue(l, false);
924 1.204 ad } else {
925 1.174 ad l->l_priority = pri;
926 1.157 yamt }
927 1.188 yamt resched_cpu(l);
928 1.184 yamt }
929 1.184 yamt
930 1.188 yamt static void
931 1.185 yamt sched_lendpri(struct lwp *l, pri_t pri)
932 1.184 yamt {
933 1.184 yamt
934 1.188 yamt KASSERT(lwp_locked(l, NULL));
935 1.184 yamt
936 1.204 ad if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM) != 0) {
937 1.204 ad KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
938 1.204 ad sched_dequeue(l);
939 1.204 ad l->l_inheritedprio = pri;
940 1.204 ad sched_enqueue(l, false);
941 1.204 ad } else {
942 1.184 yamt l->l_inheritedprio = pri;
943 1.184 yamt }
944 1.188 yamt resched_cpu(l);
945 1.184 yamt }
946 1.184 yamt
947 1.184 yamt struct lwp *
948 1.184 yamt syncobj_noowner(wchan_t wchan)
949 1.184 yamt {
950 1.184 yamt
951 1.184 yamt return NULL;
952 1.151 yamt }
953 1.151 yamt
954 1.188 yamt /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
955 1.188 yamt fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
956 1.115 nisimura
957 1.130 nathanw /*
958 1.188 yamt * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
959 1.188 yamt * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
960 1.188 yamt * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
961 1.188 yamt *
962 1.188 yamt * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
963 1.188 yamt * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
964 1.188 yamt *
965 1.188 yamt * If you dont want to bother with the faster/more-accurate formula, you
966 1.188 yamt * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
967 1.188 yamt * (more general) method of calculating the %age of CPU used by a process.
968 1.134 matt */
969 1.188 yamt #define CCPU_SHIFT (FSHIFT + 1)
970 1.134 matt
971 1.134 matt /*
972 1.188 yamt * sched_pstats:
973 1.188 yamt *
974 1.188 yamt * Update process statistics and check CPU resource allocation.
975 1.188 yamt * Call scheduler-specific hook to eventually adjust process/LWP
976 1.188 yamt * priorities.
977 1.130 nathanw */
978 1.188 yamt /* ARGSUSED */
979 1.113 gmcgarry void
980 1.188 yamt sched_pstats(void *arg)
981 1.113 gmcgarry {
982 1.188 yamt struct rlimit *rlim;
983 1.188 yamt struct lwp *l;
984 1.188 yamt struct proc *p;
985 1.204 ad int sig, clkhz;
986 1.188 yamt long runtm;
987 1.113 gmcgarry
988 1.188 yamt sched_pstats_ticks++;
989 1.174 ad
990 1.211 ad mutex_enter(&proclist_lock);
991 1.188 yamt PROCLIST_FOREACH(p, &allproc) {
992 1.188 yamt /*
993 1.188 yamt * Increment time in/out of memory and sleep time (if
994 1.188 yamt * sleeping). We ignore overflow; with 16-bit int's
995 1.188 yamt * (remember them?) overflow takes 45 days.
996 1.188 yamt */
997 1.188 yamt mutex_enter(&p->p_smutex);
998 1.188 yamt mutex_spin_enter(&p->p_stmutex);
999 1.212 yamt runtm = p->p_rtime.sec;
1000 1.188 yamt LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1001 1.188 yamt if ((l->l_flag & LW_IDLE) != 0)
1002 1.188 yamt continue;
1003 1.188 yamt lwp_lock(l);
1004 1.212 yamt runtm += l->l_rtime.sec;
1005 1.188 yamt l->l_swtime++;
1006 1.200 rmind sched_pstats_hook(l);
1007 1.188 yamt lwp_unlock(l);
1008 1.113 gmcgarry
1009 1.188 yamt /*
1010 1.188 yamt * p_pctcpu is only for ps.
1011 1.188 yamt */
1012 1.188 yamt l->l_pctcpu = (l->l_pctcpu * ccpu) >> FSHIFT;
1013 1.188 yamt if (l->l_slptime < 1) {
1014 1.188 yamt clkhz = stathz != 0 ? stathz : hz;
1015 1.188 yamt #if (FSHIFT >= CCPU_SHIFT)
1016 1.188 yamt l->l_pctcpu += (clkhz == 100) ?
1017 1.188 yamt ((fixpt_t)l->l_cpticks) <<
1018 1.188 yamt (FSHIFT - CCPU_SHIFT) :
1019 1.188 yamt 100 * (((fixpt_t) p->p_cpticks)
1020 1.188 yamt << (FSHIFT - CCPU_SHIFT)) / clkhz;
1021 1.188 yamt #else
1022 1.188 yamt l->l_pctcpu += ((FSCALE - ccpu) *
1023 1.188 yamt (l->l_cpticks * FSCALE / clkhz)) >> FSHIFT;
1024 1.146 matt #endif
1025 1.188 yamt l->l_cpticks = 0;
1026 1.188 yamt }
1027 1.188 yamt }
1028 1.188 yamt p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
1029 1.188 yamt mutex_spin_exit(&p->p_stmutex);
1030 1.174 ad
1031 1.188 yamt /*
1032 1.188 yamt * Check if the process exceeds its CPU resource allocation.
1033 1.188 yamt * If over max, kill it.
1034 1.188 yamt */
1035 1.188 yamt rlim = &p->p_rlimit[RLIMIT_CPU];
1036 1.188 yamt sig = 0;
1037 1.188 yamt if (runtm >= rlim->rlim_cur) {
1038 1.188 yamt if (runtm >= rlim->rlim_max)
1039 1.188 yamt sig = SIGKILL;
1040 1.188 yamt else {
1041 1.188 yamt sig = SIGXCPU;
1042 1.188 yamt if (rlim->rlim_cur < rlim->rlim_max)
1043 1.188 yamt rlim->rlim_cur += 5;
1044 1.188 yamt }
1045 1.188 yamt }
1046 1.188 yamt mutex_exit(&p->p_smutex);
1047 1.188 yamt if (sig) {
1048 1.213 ad mutex_enter(&proclist_mutex);
1049 1.188 yamt psignal(p, sig);
1050 1.213 ad mutex_exit(&proclist_mutex);
1051 1.188 yamt }
1052 1.174 ad }
1053 1.211 ad mutex_exit(&proclist_lock);
1054 1.188 yamt uvm_meter();
1055 1.191 ad cv_wakeup(&lbolt);
1056 1.188 yamt callout_schedule(&sched_pstats_ch, hz);
1057 1.113 gmcgarry }
1058 1.190 ad
1059 1.190 ad void
1060 1.190 ad sched_init(void)
1061 1.190 ad {
1062 1.190 ad
1063 1.208 ad cv_init(&lbolt, "lbolt");
1064 1.214 ad callout_init(&sched_pstats_ch, CALLOUT_MPSAFE);
1065 1.190 ad callout_setfunc(&sched_pstats_ch, sched_pstats, NULL);
1066 1.223 ad
1067 1.223 ad /* Balancing */
1068 1.223 ad worker_ci = curcpu();
1069 1.223 ad cacheht_time = mstohz(5); /* ~5 ms */
1070 1.223 ad balance_period = mstohz(300); /* ~300ms */
1071 1.223 ad
1072 1.223 ad /* Minimal count of LWPs for catching: log2(count of CPUs) */
1073 1.223 ad min_catch = min(ilog2(ncpu), 4);
1074 1.223 ad
1075 1.223 ad /* Initialize balancing callout and run it */
1076 1.223 ad #ifdef MULTIPROCESSOR
1077 1.223 ad callout_init(&balance_ch, CALLOUT_MPSAFE);
1078 1.223 ad callout_setfunc(&balance_ch, sched_balance, NULL);
1079 1.223 ad callout_schedule(&balance_ch, balance_period);
1080 1.223 ad #endif
1081 1.190 ad sched_pstats(NULL);
1082 1.190 ad }
1083 1.223 ad
1084 1.223 ad SYSCTL_SETUP(sysctl_sched_setup, "sysctl sched setup")
1085 1.223 ad {
1086 1.223 ad const struct sysctlnode *node = NULL;
1087 1.223 ad
1088 1.223 ad sysctl_createv(clog, 0, NULL, NULL,
1089 1.223 ad CTLFLAG_PERMANENT,
1090 1.223 ad CTLTYPE_NODE, "kern", NULL,
1091 1.223 ad NULL, 0, NULL, 0,
1092 1.223 ad CTL_KERN, CTL_EOL);
1093 1.223 ad sysctl_createv(clog, 0, NULL, &node,
1094 1.223 ad CTLFLAG_PERMANENT,
1095 1.223 ad CTLTYPE_NODE, "sched",
1096 1.223 ad SYSCTL_DESCR("Scheduler options"),
1097 1.223 ad NULL, 0, NULL, 0,
1098 1.223 ad CTL_KERN, CTL_CREATE, CTL_EOL);
1099 1.223 ad
1100 1.223 ad if (node == NULL)
1101 1.223 ad return;
1102 1.223 ad
1103 1.223 ad sysctl_createv(clog, 0, &node, NULL,
1104 1.223 ad CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1105 1.223 ad CTLTYPE_INT, "cacheht_time",
1106 1.223 ad SYSCTL_DESCR("Cache hotness time (in ticks)"),
1107 1.223 ad NULL, 0, &cacheht_time, 0,
1108 1.223 ad CTL_CREATE, CTL_EOL);
1109 1.223 ad sysctl_createv(clog, 0, &node, NULL,
1110 1.223 ad CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1111 1.223 ad CTLTYPE_INT, "balance_period",
1112 1.223 ad SYSCTL_DESCR("Balance period (in ticks)"),
1113 1.223 ad NULL, 0, &balance_period, 0,
1114 1.223 ad CTL_CREATE, CTL_EOL);
1115 1.223 ad sysctl_createv(clog, 0, &node, NULL,
1116 1.223 ad CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
1117 1.223 ad CTLTYPE_INT, "min_catch",
1118 1.223 ad SYSCTL_DESCR("Minimal count of threads for catching"),
1119 1.223 ad NULL, 0, &min_catch, 0,
1120 1.223 ad CTL_CREATE, CTL_EOL);
1121 1.223 ad sysctl_createv(clog, 0, &node, NULL,
1122 1.223 ad CTLFLAG_READWRITE,
1123 1.223 ad CTLTYPE_INT, "timesoftints",
1124 1.223 ad SYSCTL_DESCR("Track CPU time for soft interrupts"),
1125 1.223 ad NULL, 0, &softint_timing, 0,
1126 1.223 ad CTL_CREATE, CTL_EOL);
1127 1.223 ad }
1128 1.223 ad
1129 1.223 ad void
1130 1.223 ad sched_cpuattach(struct cpu_info *ci)
1131 1.223 ad {
1132 1.223 ad runqueue_t *ci_rq;
1133 1.223 ad void *rq_ptr;
1134 1.223 ad u_int i, size;
1135 1.223 ad
1136 1.223 ad if (ci->ci_schedstate.spc_lwplock == NULL) {
1137 1.223 ad ci->ci_schedstate.spc_lwplock =
1138 1.223 ad mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
1139 1.223 ad }
1140 1.223 ad if (ci == lwp0.l_cpu) {
1141 1.223 ad /* Initialize the scheduler structure of the primary LWP */
1142 1.223 ad lwp0.l_mutex = ci->ci_schedstate.spc_lwplock;
1143 1.223 ad }
1144 1.223 ad if (ci->ci_schedstate.spc_mutex != NULL) {
1145 1.223 ad /* Already initialized. */
1146 1.223 ad return;
1147 1.223 ad }
1148 1.223 ad
1149 1.223 ad /* Allocate the run queue */
1150 1.223 ad size = roundup2(sizeof(runqueue_t), coherency_unit) + coherency_unit;
1151 1.223 ad rq_ptr = kmem_zalloc(size, KM_SLEEP);
1152 1.223 ad if (rq_ptr == NULL) {
1153 1.223 ad panic("sched_cpuattach: could not allocate the runqueue");
1154 1.223 ad }
1155 1.223 ad ci_rq = (void *)(roundup2((uintptr_t)(rq_ptr), coherency_unit));
1156 1.223 ad
1157 1.223 ad /* Initialize run queues */
1158 1.223 ad ci->ci_schedstate.spc_mutex =
1159 1.223 ad mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
1160 1.223 ad for (i = 0; i < PRI_RT_COUNT; i++)
1161 1.223 ad TAILQ_INIT(&ci_rq->r_rt_queue[i].q_head);
1162 1.223 ad for (i = 0; i < PRI_TS_COUNT; i++)
1163 1.223 ad TAILQ_INIT(&ci_rq->r_ts_queue[i].q_head);
1164 1.223 ad
1165 1.223 ad ci->ci_schedstate.spc_sched_info = ci_rq;
1166 1.223 ad }
1167 1.223 ad
1168 1.223 ad /*
1169 1.223 ad * Control of the runqueue.
1170 1.223 ad */
1171 1.223 ad
1172 1.223 ad static void *
1173 1.223 ad sched_getrq(runqueue_t *ci_rq, const pri_t prio)
1174 1.223 ad {
1175 1.223 ad
1176 1.223 ad KASSERT(prio < PRI_COUNT);
1177 1.223 ad return (prio <= PRI_HIGHEST_TS) ?
1178 1.223 ad &ci_rq->r_ts_queue[prio].q_head :
1179 1.223 ad &ci_rq->r_rt_queue[prio - PRI_HIGHEST_TS - 1].q_head;
1180 1.223 ad }
1181 1.223 ad
1182 1.223 ad void
1183 1.223 ad sched_enqueue(struct lwp *l, bool swtch)
1184 1.223 ad {
1185 1.223 ad runqueue_t *ci_rq;
1186 1.223 ad struct schedstate_percpu *spc;
1187 1.223 ad TAILQ_HEAD(, lwp) *q_head;
1188 1.223 ad const pri_t eprio = lwp_eprio(l);
1189 1.223 ad struct cpu_info *ci;
1190 1.223 ad
1191 1.223 ad ci = l->l_cpu;
1192 1.223 ad spc = &ci->ci_schedstate;
1193 1.223 ad ci_rq = spc->spc_sched_info;
1194 1.223 ad KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
1195 1.223 ad
1196 1.223 ad /* Update the last run time on switch */
1197 1.223 ad if (__predict_true(swtch == true)) {
1198 1.223 ad l->l_rticks = hardclock_ticks;
1199 1.223 ad l->l_rticksum += (hardclock_ticks - l->l_rticks);
1200 1.223 ad } else if (l->l_rticks == 0)
1201 1.223 ad l->l_rticks = hardclock_ticks;
1202 1.223 ad
1203 1.223 ad /* Enqueue the thread */
1204 1.223 ad q_head = sched_getrq(ci_rq, eprio);
1205 1.223 ad if (TAILQ_EMPTY(q_head)) {
1206 1.223 ad u_int i;
1207 1.223 ad uint32_t q;
1208 1.223 ad
1209 1.223 ad /* Mark bit */
1210 1.223 ad i = eprio >> BITMAP_SHIFT;
1211 1.223 ad q = BITMAP_MSB >> (eprio & BITMAP_MASK);
1212 1.223 ad KASSERT((ci_rq->r_bitmap[i] & q) == 0);
1213 1.223 ad ci_rq->r_bitmap[i] |= q;
1214 1.223 ad }
1215 1.223 ad TAILQ_INSERT_TAIL(q_head, l, l_runq);
1216 1.223 ad ci_rq->r_count++;
1217 1.224 ad if ((l->l_pflag & LP_BOUND) == 0)
1218 1.223 ad ci_rq->r_mcount++;
1219 1.223 ad
1220 1.223 ad /*
1221 1.223 ad * Update the value of highest priority in the runqueue,
1222 1.223 ad * if priority of this thread is higher.
1223 1.223 ad */
1224 1.223 ad if (eprio > spc->spc_maxpriority)
1225 1.223 ad spc->spc_maxpriority = eprio;
1226 1.223 ad
1227 1.223 ad sched_newts(l);
1228 1.223 ad
1229 1.223 ad /*
1230 1.223 ad * Wake the chosen CPU or cause a preemption if the newly
1231 1.223 ad * enqueued thread has higher priority. Don't cause a
1232 1.223 ad * preemption if the thread is yielding (swtch).
1233 1.223 ad */
1234 1.223 ad if (!swtch && eprio > spc->spc_curpriority) {
1235 1.223 ad cpu_need_resched(ci,
1236 1.223 ad (eprio >= PRI_KERNEL ? RESCHED_IMMED : 0));
1237 1.223 ad }
1238 1.223 ad }
1239 1.223 ad
1240 1.223 ad void
1241 1.223 ad sched_dequeue(struct lwp *l)
1242 1.223 ad {
1243 1.223 ad runqueue_t *ci_rq;
1244 1.223 ad TAILQ_HEAD(, lwp) *q_head;
1245 1.223 ad struct schedstate_percpu *spc;
1246 1.223 ad const pri_t eprio = lwp_eprio(l);
1247 1.223 ad
1248 1.223 ad spc = & l->l_cpu->ci_schedstate;
1249 1.223 ad ci_rq = spc->spc_sched_info;
1250 1.223 ad KASSERT(lwp_locked(l, spc->spc_mutex));
1251 1.223 ad
1252 1.223 ad KASSERT(eprio <= spc->spc_maxpriority);
1253 1.223 ad KASSERT(ci_rq->r_bitmap[eprio >> BITMAP_SHIFT] != 0);
1254 1.223 ad KASSERT(ci_rq->r_count > 0);
1255 1.223 ad
1256 1.223 ad ci_rq->r_count--;
1257 1.224 ad if ((l->l_pflag & LP_BOUND) == 0)
1258 1.223 ad ci_rq->r_mcount--;
1259 1.223 ad
1260 1.223 ad q_head = sched_getrq(ci_rq, eprio);
1261 1.223 ad TAILQ_REMOVE(q_head, l, l_runq);
1262 1.223 ad if (TAILQ_EMPTY(q_head)) {
1263 1.223 ad u_int i;
1264 1.223 ad uint32_t q;
1265 1.223 ad
1266 1.223 ad /* Unmark bit */
1267 1.223 ad i = eprio >> BITMAP_SHIFT;
1268 1.223 ad q = BITMAP_MSB >> (eprio & BITMAP_MASK);
1269 1.223 ad KASSERT((ci_rq->r_bitmap[i] & q) != 0);
1270 1.223 ad ci_rq->r_bitmap[i] &= ~q;
1271 1.223 ad
1272 1.223 ad /*
1273 1.223 ad * Update the value of highest priority in the runqueue, in a
1274 1.223 ad * case it was a last thread in the queue of highest priority.
1275 1.223 ad */
1276 1.223 ad if (eprio != spc->spc_maxpriority)
1277 1.223 ad return;
1278 1.223 ad
1279 1.223 ad do {
1280 1.223 ad if (ci_rq->r_bitmap[i] != 0) {
1281 1.223 ad q = ffs(ci_rq->r_bitmap[i]);
1282 1.223 ad spc->spc_maxpriority =
1283 1.223 ad (i << BITMAP_SHIFT) + (BITMAP_BITS - q);
1284 1.223 ad return;
1285 1.223 ad }
1286 1.223 ad } while (i--);
1287 1.223 ad
1288 1.223 ad /* If not found - set the lowest value */
1289 1.223 ad spc->spc_maxpriority = 0;
1290 1.223 ad }
1291 1.223 ad }
1292 1.223 ad
1293 1.223 ad /*
1294 1.223 ad * Migration and balancing.
1295 1.223 ad */
1296 1.223 ad
1297 1.223 ad #ifdef MULTIPROCESSOR
1298 1.223 ad
1299 1.223 ad /* Estimate if LWP is cache-hot */
1300 1.223 ad static inline bool
1301 1.223 ad lwp_cache_hot(const struct lwp *l)
1302 1.223 ad {
1303 1.223 ad
1304 1.223 ad if (l->l_slptime || l->l_rticks == 0)
1305 1.223 ad return false;
1306 1.223 ad
1307 1.223 ad return (hardclock_ticks - l->l_rticks <= cacheht_time);
1308 1.223 ad }
1309 1.223 ad
1310 1.223 ad /* Check if LWP can migrate to the chosen CPU */
1311 1.223 ad static inline bool
1312 1.223 ad sched_migratable(const struct lwp *l, struct cpu_info *ci)
1313 1.223 ad {
1314 1.223 ad const struct schedstate_percpu *spc = &ci->ci_schedstate;
1315 1.223 ad
1316 1.223 ad /* CPU is offline */
1317 1.223 ad if (__predict_false(spc->spc_flags & SPCF_OFFLINE))
1318 1.223 ad return false;
1319 1.223 ad
1320 1.223 ad /* Affinity bind */
1321 1.223 ad if (__predict_false(l->l_flag & LW_AFFINITY))
1322 1.223 ad return CPU_ISSET(cpu_index(ci), &l->l_affinity);
1323 1.223 ad
1324 1.223 ad /* Processor-set */
1325 1.223 ad return (spc->spc_psid == l->l_psid);
1326 1.223 ad }
1327 1.223 ad
1328 1.223 ad /*
1329 1.223 ad * Estimate the migration of LWP to the other CPU.
1330 1.223 ad * Take and return the CPU, if migration is needed.
1331 1.223 ad */
1332 1.223 ad struct cpu_info *
1333 1.223 ad sched_takecpu(struct lwp *l)
1334 1.223 ad {
1335 1.223 ad struct cpu_info *ci, *tci, *first, *next;
1336 1.223 ad struct schedstate_percpu *spc;
1337 1.223 ad runqueue_t *ci_rq, *ici_rq;
1338 1.223 ad pri_t eprio, lpri, pri;
1339 1.223 ad
1340 1.223 ad KASSERT(lwp_locked(l, NULL));
1341 1.223 ad
1342 1.223 ad ci = l->l_cpu;
1343 1.223 ad spc = &ci->ci_schedstate;
1344 1.223 ad ci_rq = spc->spc_sched_info;
1345 1.223 ad
1346 1.223 ad /* If thread is strictly bound, do not estimate other CPUs */
1347 1.224 ad if (l->l_pflag & LP_BOUND)
1348 1.223 ad return ci;
1349 1.223 ad
1350 1.223 ad /* CPU of this thread is idling - run there */
1351 1.223 ad if (ci_rq->r_count == 0)
1352 1.223 ad return ci;
1353 1.223 ad
1354 1.223 ad eprio = lwp_eprio(l);
1355 1.223 ad
1356 1.223 ad /* Stay if thread is cache-hot */
1357 1.223 ad if (__predict_true(l->l_stat != LSIDL) &&
1358 1.223 ad lwp_cache_hot(l) && eprio >= spc->spc_curpriority)
1359 1.223 ad return ci;
1360 1.223 ad
1361 1.223 ad /* Run on current CPU if priority of thread is higher */
1362 1.223 ad ci = curcpu();
1363 1.223 ad spc = &ci->ci_schedstate;
1364 1.223 ad if (eprio > spc->spc_curpriority && sched_migratable(l, ci))
1365 1.223 ad return ci;
1366 1.223 ad
1367 1.223 ad /*
1368 1.223 ad * Look for the CPU with the lowest priority thread. In case of
1369 1.223 ad * equal priority, choose the CPU with the fewest of threads.
1370 1.223 ad */
1371 1.223 ad first = l->l_cpu;
1372 1.223 ad ci = first;
1373 1.223 ad tci = first;
1374 1.223 ad lpri = PRI_COUNT;
1375 1.223 ad do {
1376 1.223 ad next = CIRCLEQ_LOOP_NEXT(&cpu_queue, ci, ci_data.cpu_qchain);
1377 1.223 ad spc = &ci->ci_schedstate;
1378 1.223 ad ici_rq = spc->spc_sched_info;
1379 1.223 ad pri = max(spc->spc_curpriority, spc->spc_maxpriority);
1380 1.223 ad if (pri > lpri)
1381 1.223 ad continue;
1382 1.223 ad
1383 1.223 ad if (pri == lpri && ci_rq->r_count < ici_rq->r_count)
1384 1.223 ad continue;
1385 1.223 ad
1386 1.223 ad if (!sched_migratable(l, ci))
1387 1.223 ad continue;
1388 1.223 ad
1389 1.223 ad lpri = pri;
1390 1.223 ad tci = ci;
1391 1.223 ad ci_rq = ici_rq;
1392 1.223 ad } while (ci = next, ci != first);
1393 1.223 ad
1394 1.223 ad return tci;
1395 1.223 ad }
1396 1.223 ad
1397 1.223 ad /*
1398 1.223 ad * Tries to catch an LWP from the runqueue of other CPU.
1399 1.223 ad */
1400 1.223 ad static struct lwp *
1401 1.223 ad sched_catchlwp(void)
1402 1.223 ad {
1403 1.223 ad struct cpu_info *curci = curcpu(), *ci = worker_ci;
1404 1.223 ad struct schedstate_percpu *spc;
1405 1.223 ad TAILQ_HEAD(, lwp) *q_head;
1406 1.223 ad runqueue_t *ci_rq;
1407 1.223 ad struct lwp *l;
1408 1.223 ad
1409 1.223 ad if (curci == ci)
1410 1.223 ad return NULL;
1411 1.223 ad
1412 1.223 ad /* Lockless check */
1413 1.223 ad spc = &ci->ci_schedstate;
1414 1.223 ad ci_rq = spc->spc_sched_info;
1415 1.223 ad if (ci_rq->r_mcount < min_catch)
1416 1.223 ad return NULL;
1417 1.223 ad
1418 1.223 ad /*
1419 1.223 ad * Double-lock the runqueues.
1420 1.223 ad */
1421 1.223 ad if (curci < ci) {
1422 1.223 ad spc_lock(ci);
1423 1.223 ad } else if (!mutex_tryenter(ci->ci_schedstate.spc_mutex)) {
1424 1.223 ad const runqueue_t *cur_rq = curci->ci_schedstate.spc_sched_info;
1425 1.223 ad
1426 1.223 ad spc_unlock(curci);
1427 1.223 ad spc_lock(ci);
1428 1.223 ad spc_lock(curci);
1429 1.223 ad
1430 1.223 ad if (cur_rq->r_count) {
1431 1.223 ad spc_unlock(ci);
1432 1.223 ad return NULL;
1433 1.223 ad }
1434 1.223 ad }
1435 1.223 ad
1436 1.223 ad if (ci_rq->r_mcount < min_catch) {
1437 1.223 ad spc_unlock(ci);
1438 1.223 ad return NULL;
1439 1.223 ad }
1440 1.223 ad
1441 1.223 ad /* Take the highest priority thread */
1442 1.223 ad q_head = sched_getrq(ci_rq, spc->spc_maxpriority);
1443 1.223 ad l = TAILQ_FIRST(q_head);
1444 1.223 ad
1445 1.223 ad for (;;) {
1446 1.223 ad /* Check the first and next result from the queue */
1447 1.223 ad if (l == NULL)
1448 1.223 ad break;
1449 1.223 ad KASSERT(l->l_stat == LSRUN);
1450 1.223 ad KASSERT(l->l_flag & LW_INMEM);
1451 1.223 ad
1452 1.223 ad /* Look for threads, whose are allowed to migrate */
1453 1.224 ad if ((l->l_pflag & LP_BOUND) || lwp_cache_hot(l) ||
1454 1.223 ad !sched_migratable(l, curci)) {
1455 1.223 ad l = TAILQ_NEXT(l, l_runq);
1456 1.223 ad continue;
1457 1.223 ad }
1458 1.223 ad
1459 1.223 ad /* Grab the thread, and move to the local run queue */
1460 1.223 ad sched_dequeue(l);
1461 1.223 ad l->l_cpu = curci;
1462 1.223 ad lwp_unlock_to(l, curci->ci_schedstate.spc_mutex);
1463 1.223 ad sched_enqueue(l, false);
1464 1.223 ad return l;
1465 1.223 ad }
1466 1.223 ad spc_unlock(ci);
1467 1.223 ad
1468 1.223 ad return l;
1469 1.223 ad }
1470 1.223 ad
1471 1.223 ad /*
1472 1.223 ad * Periodical calculations for balancing.
1473 1.223 ad */
1474 1.223 ad static void
1475 1.223 ad sched_balance(void *nocallout)
1476 1.223 ad {
1477 1.223 ad struct cpu_info *ci, *hci;
1478 1.223 ad runqueue_t *ci_rq;
1479 1.223 ad CPU_INFO_ITERATOR cii;
1480 1.223 ad u_int highest;
1481 1.223 ad
1482 1.223 ad hci = curcpu();
1483 1.223 ad highest = 0;
1484 1.223 ad
1485 1.223 ad /* Make lockless countings */
1486 1.223 ad for (CPU_INFO_FOREACH(cii, ci)) {
1487 1.223 ad ci_rq = ci->ci_schedstate.spc_sched_info;
1488 1.223 ad
1489 1.223 ad /* Average count of the threads */
1490 1.223 ad ci_rq->r_avgcount = (ci_rq->r_avgcount + ci_rq->r_mcount) >> 1;
1491 1.223 ad
1492 1.223 ad /* Look for CPU with the highest average */
1493 1.223 ad if (ci_rq->r_avgcount > highest) {
1494 1.223 ad hci = ci;
1495 1.223 ad highest = ci_rq->r_avgcount;
1496 1.223 ad }
1497 1.223 ad }
1498 1.223 ad
1499 1.223 ad /* Update the worker */
1500 1.223 ad worker_ci = hci;
1501 1.223 ad
1502 1.223 ad if (nocallout == NULL)
1503 1.223 ad callout_schedule(&balance_ch, balance_period);
1504 1.223 ad }
1505 1.223 ad
1506 1.223 ad #else
1507 1.223 ad
1508 1.223 ad struct cpu_info *
1509 1.223 ad sched_takecpu(struct lwp *l)
1510 1.223 ad {
1511 1.223 ad
1512 1.223 ad return l->l_cpu;
1513 1.223 ad }
1514 1.223 ad
1515 1.223 ad #endif /* MULTIPROCESSOR */
1516 1.223 ad
1517 1.223 ad /*
1518 1.223 ad * Scheduler mill.
1519 1.223 ad */
1520 1.223 ad struct lwp *
1521 1.223 ad sched_nextlwp(void)
1522 1.223 ad {
1523 1.223 ad struct cpu_info *ci = curcpu();
1524 1.223 ad struct schedstate_percpu *spc;
1525 1.223 ad TAILQ_HEAD(, lwp) *q_head;
1526 1.223 ad runqueue_t *ci_rq;
1527 1.223 ad struct lwp *l;
1528 1.223 ad
1529 1.223 ad spc = &ci->ci_schedstate;
1530 1.223 ad ci_rq = spc->spc_sched_info;
1531 1.223 ad
1532 1.223 ad #ifdef MULTIPROCESSOR
1533 1.223 ad /* If runqueue is empty, try to catch some thread from other CPU */
1534 1.223 ad if (__predict_false(spc->spc_flags & SPCF_OFFLINE)) {
1535 1.223 ad if ((ci_rq->r_count - ci_rq->r_mcount) == 0)
1536 1.223 ad return NULL;
1537 1.223 ad } else if (ci_rq->r_count == 0) {
1538 1.223 ad /* Reset the counter, and call the balancer */
1539 1.223 ad ci_rq->r_avgcount = 0;
1540 1.223 ad sched_balance(ci);
1541 1.223 ad
1542 1.223 ad /* The re-locking will be done inside */
1543 1.223 ad return sched_catchlwp();
1544 1.223 ad }
1545 1.223 ad #else
1546 1.223 ad if (ci_rq->r_count == 0)
1547 1.223 ad return NULL;
1548 1.223 ad #endif
1549 1.223 ad
1550 1.223 ad /* Take the highest priority thread */
1551 1.223 ad KASSERT(ci_rq->r_bitmap[spc->spc_maxpriority >> BITMAP_SHIFT]);
1552 1.223 ad q_head = sched_getrq(ci_rq, spc->spc_maxpriority);
1553 1.223 ad l = TAILQ_FIRST(q_head);
1554 1.223 ad KASSERT(l != NULL);
1555 1.223 ad
1556 1.223 ad sched_oncpu(l);
1557 1.223 ad l->l_rticks = hardclock_ticks;
1558 1.223 ad
1559 1.223 ad return l;
1560 1.223 ad }
1561 1.223 ad
1562 1.223 ad bool
1563 1.223 ad sched_curcpu_runnable_p(void)
1564 1.223 ad {
1565 1.223 ad const struct cpu_info *ci = curcpu();
1566 1.223 ad const runqueue_t *ci_rq = ci->ci_schedstate.spc_sched_info;
1567 1.223 ad
1568 1.223 ad #ifndef __HAVE_FAST_SOFTINTS
1569 1.223 ad if (ci->ci_data.cpu_softints)
1570 1.223 ad return true;
1571 1.223 ad #endif
1572 1.223 ad
1573 1.223 ad if (ci->ci_schedstate.spc_flags & SPCF_OFFLINE)
1574 1.223 ad return (ci_rq->r_count - ci_rq->r_mcount);
1575 1.223 ad
1576 1.223 ad return ci_rq->r_count;
1577 1.223 ad }
1578 1.223 ad
1579 1.223 ad /*
1580 1.223 ad * Debugging.
1581 1.223 ad */
1582 1.223 ad
1583 1.223 ad #ifdef DDB
1584 1.223 ad
1585 1.223 ad void
1586 1.223 ad sched_print_runqueue(void (*pr)(const char *, ...))
1587 1.223 ad {
1588 1.223 ad runqueue_t *ci_rq;
1589 1.223 ad struct schedstate_percpu *spc;
1590 1.223 ad struct lwp *l;
1591 1.223 ad struct proc *p;
1592 1.223 ad int i;
1593 1.223 ad struct cpu_info *ci;
1594 1.223 ad CPU_INFO_ITERATOR cii;
1595 1.223 ad
1596 1.223 ad for (CPU_INFO_FOREACH(cii, ci)) {
1597 1.223 ad spc = &ci->ci_schedstate;
1598 1.223 ad ci_rq = spc->spc_sched_info;
1599 1.223 ad
1600 1.223 ad (*pr)("Run-queue (CPU = %u):\n", ci->ci_index);
1601 1.223 ad (*pr)(" pid.lid = %d.%d, threads count = %u, "
1602 1.223 ad "avgcount = %u, highest pri = %d\n",
1603 1.225 dogcow #ifdef MULTIPROCESSOR
1604 1.223 ad ci->ci_curlwp->l_proc->p_pid, ci->ci_curlwp->l_lid,
1605 1.225 dogcow #else
1606 1.225 dogcow curlwp->l_proc->p_pid, curlwp->l_lid,
1607 1.225 dogcow #endif
1608 1.223 ad ci_rq->r_count, ci_rq->r_avgcount, spc->spc_maxpriority);
1609 1.223 ad i = (PRI_COUNT >> BITMAP_SHIFT) - 1;
1610 1.223 ad do {
1611 1.223 ad uint32_t q;
1612 1.223 ad q = ci_rq->r_bitmap[i];
1613 1.223 ad (*pr)(" bitmap[%d] => [ %d (0x%x) ]\n", i, ffs(q), q);
1614 1.223 ad } while (i--);
1615 1.223 ad }
1616 1.223 ad
1617 1.226 yamt (*pr)(" %5s %4s %4s %10s %3s %18s %4s %s\n",
1618 1.223 ad "LID", "PRI", "EPRI", "FL", "ST", "LWP", "CPU", "LRTIME");
1619 1.223 ad
1620 1.223 ad PROCLIST_FOREACH(p, &allproc) {
1621 1.223 ad (*pr)(" /- %d (%s)\n", (int)p->p_pid, p->p_comm);
1622 1.223 ad LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1623 1.223 ad ci = l->l_cpu;
1624 1.226 yamt (*pr)(" | %5d %4u %4u 0x%8.8x %3s %18p %4u %u\n",
1625 1.223 ad (int)l->l_lid, l->l_priority, lwp_eprio(l),
1626 1.223 ad l->l_flag, l->l_stat == LSRUN ? "RQ" :
1627 1.223 ad (l->l_stat == LSSLEEP ? "SQ" : "-"),
1628 1.223 ad l, ci->ci_index,
1629 1.223 ad (u_int)(hardclock_ticks - l->l_rticks));
1630 1.223 ad }
1631 1.223 ad }
1632 1.223 ad }
1633 1.223 ad
1634 1.223 ad #endif
1635