kern_synch.c revision 1.295 1 1.295 njoly /* $NetBSD: kern_synch.c,v 1.295 2011/10/05 20:37:40 njoly Exp $ */
2 1.63 thorpej
3 1.63 thorpej /*-
4 1.260 ad * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2009
5 1.260 ad * The NetBSD Foundation, Inc.
6 1.63 thorpej * All rights reserved.
7 1.63 thorpej *
8 1.63 thorpej * This code is derived from software contributed to The NetBSD Foundation
9 1.63 thorpej * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10 1.188 yamt * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and
11 1.188 yamt * Daniel Sieger.
12 1.63 thorpej *
13 1.63 thorpej * Redistribution and use in source and binary forms, with or without
14 1.63 thorpej * modification, are permitted provided that the following conditions
15 1.63 thorpej * are met:
16 1.63 thorpej * 1. Redistributions of source code must retain the above copyright
17 1.63 thorpej * notice, this list of conditions and the following disclaimer.
18 1.63 thorpej * 2. Redistributions in binary form must reproduce the above copyright
19 1.63 thorpej * notice, this list of conditions and the following disclaimer in the
20 1.63 thorpej * documentation and/or other materials provided with the distribution.
21 1.63 thorpej *
22 1.63 thorpej * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23 1.63 thorpej * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24 1.63 thorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 1.63 thorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26 1.63 thorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 1.63 thorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 1.63 thorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 1.63 thorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 1.63 thorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 1.63 thorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 1.63 thorpej * POSSIBILITY OF SUCH DAMAGE.
33 1.63 thorpej */
34 1.26 cgd
35 1.26 cgd /*-
36 1.26 cgd * Copyright (c) 1982, 1986, 1990, 1991, 1993
37 1.26 cgd * The Regents of the University of California. All rights reserved.
38 1.26 cgd * (c) UNIX System Laboratories, Inc.
39 1.26 cgd * All or some portions of this file are derived from material licensed
40 1.26 cgd * to the University of California by American Telephone and Telegraph
41 1.26 cgd * Co. or Unix System Laboratories, Inc. and are reproduced herein with
42 1.26 cgd * the permission of UNIX System Laboratories, Inc.
43 1.26 cgd *
44 1.26 cgd * Redistribution and use in source and binary forms, with or without
45 1.26 cgd * modification, are permitted provided that the following conditions
46 1.26 cgd * are met:
47 1.26 cgd * 1. Redistributions of source code must retain the above copyright
48 1.26 cgd * notice, this list of conditions and the following disclaimer.
49 1.26 cgd * 2. Redistributions in binary form must reproduce the above copyright
50 1.26 cgd * notice, this list of conditions and the following disclaimer in the
51 1.26 cgd * documentation and/or other materials provided with the distribution.
52 1.136 agc * 3. Neither the name of the University nor the names of its contributors
53 1.26 cgd * may be used to endorse or promote products derived from this software
54 1.26 cgd * without specific prior written permission.
55 1.26 cgd *
56 1.26 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 1.26 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 1.26 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 1.26 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 1.26 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 1.26 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 1.26 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 1.26 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 1.26 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 1.26 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 1.26 cgd * SUCH DAMAGE.
67 1.26 cgd *
68 1.50 fvdl * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
69 1.26 cgd */
70 1.106 lukem
71 1.106 lukem #include <sys/cdefs.h>
72 1.295 njoly __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.295 2011/10/05 20:37:40 njoly Exp $");
73 1.48 mrg
74 1.109 yamt #include "opt_kstack.h"
75 1.110 briggs #include "opt_perfctrs.h"
76 1.252 wrstuden #include "opt_sa.h"
77 1.277 darran #include "opt_dtrace.h"
78 1.26 cgd
79 1.174 ad #define __MUTEX_PRIVATE
80 1.174 ad
81 1.26 cgd #include <sys/param.h>
82 1.26 cgd #include <sys/systm.h>
83 1.26 cgd #include <sys/proc.h>
84 1.26 cgd #include <sys/kernel.h>
85 1.111 briggs #if defined(PERFCTRS)
86 1.110 briggs #include <sys/pmc.h>
87 1.111 briggs #endif
88 1.188 yamt #include <sys/cpu.h>
89 1.290 christos #include <sys/pserialize.h>
90 1.26 cgd #include <sys/resourcevar.h>
91 1.55 ross #include <sys/sched.h>
92 1.252 wrstuden #include <sys/sa.h>
93 1.252 wrstuden #include <sys/savar.h>
94 1.179 dsl #include <sys/syscall_stats.h>
95 1.174 ad #include <sys/sleepq.h>
96 1.174 ad #include <sys/lockdebug.h>
97 1.190 ad #include <sys/evcnt.h>
98 1.199 ad #include <sys/intr.h>
99 1.207 ad #include <sys/lwpctl.h>
100 1.209 ad #include <sys/atomic.h>
101 1.215 ad #include <sys/simplelock.h>
102 1.295 njoly #include <sys/syslog.h>
103 1.47 mrg
104 1.47 mrg #include <uvm/uvm_extern.h>
105 1.47 mrg
106 1.231 ad #include <dev/lockstat.h>
107 1.231 ad
108 1.276 darran #include <sys/dtrace_bsd.h>
109 1.279 darran int dtrace_vtime_active=0;
110 1.276 darran dtrace_vtime_switch_func_t dtrace_vtime_switch_func;
111 1.276 darran
112 1.271 rmind static void sched_unsleep(struct lwp *, bool);
113 1.188 yamt static void sched_changepri(struct lwp *, pri_t);
114 1.188 yamt static void sched_lendpri(struct lwp *, pri_t);
115 1.250 rmind static void resched_cpu(struct lwp *);
116 1.122 thorpej
117 1.174 ad syncobj_t sleep_syncobj = {
118 1.174 ad SOBJ_SLEEPQ_SORTED,
119 1.174 ad sleepq_unsleep,
120 1.184 yamt sleepq_changepri,
121 1.184 yamt sleepq_lendpri,
122 1.184 yamt syncobj_noowner,
123 1.174 ad };
124 1.174 ad
125 1.174 ad syncobj_t sched_syncobj = {
126 1.174 ad SOBJ_SLEEPQ_SORTED,
127 1.174 ad sched_unsleep,
128 1.184 yamt sched_changepri,
129 1.184 yamt sched_lendpri,
130 1.184 yamt syncobj_noowner,
131 1.174 ad };
132 1.122 thorpej
133 1.289 rmind /* "Lightning bolt": once a second sleep address. */
134 1.289 rmind kcondvar_t lbolt __cacheline_aligned;
135 1.223 ad
136 1.289 rmind u_int sched_pstats_ticks __cacheline_aligned;
137 1.289 rmind
138 1.289 rmind /* Preemption event counters. */
139 1.289 rmind static struct evcnt kpreempt_ev_crit __cacheline_aligned;
140 1.289 rmind static struct evcnt kpreempt_ev_klock __cacheline_aligned;
141 1.289 rmind static struct evcnt kpreempt_ev_immed __cacheline_aligned;
142 1.231 ad
143 1.231 ad /*
144 1.174 ad * During autoconfiguration or after a panic, a sleep will simply lower the
145 1.174 ad * priority briefly to allow interrupts, then return. The priority to be
146 1.174 ad * used (safepri) is machine-dependent, thus this value is initialized and
147 1.174 ad * maintained in the machine-dependent layers. This priority will typically
148 1.174 ad * be 0, or the lowest priority that is safe for use on the interrupt stack;
149 1.174 ad * it can be made higher to block network software interrupts after panics.
150 1.26 cgd */
151 1.174 ad int safepri;
152 1.26 cgd
153 1.237 rmind void
154 1.270 elad synch_init(void)
155 1.237 rmind {
156 1.237 rmind
157 1.237 rmind cv_init(&lbolt, "lbolt");
158 1.237 rmind
159 1.239 ad evcnt_attach_dynamic(&kpreempt_ev_crit, EVCNT_TYPE_MISC, NULL,
160 1.237 rmind "kpreempt", "defer: critical section");
161 1.239 ad evcnt_attach_dynamic(&kpreempt_ev_klock, EVCNT_TYPE_MISC, NULL,
162 1.237 rmind "kpreempt", "defer: kernel_lock");
163 1.239 ad evcnt_attach_dynamic(&kpreempt_ev_immed, EVCNT_TYPE_MISC, NULL,
164 1.237 rmind "kpreempt", "immediate");
165 1.237 rmind }
166 1.237 rmind
167 1.26 cgd /*
168 1.174 ad * OBSOLETE INTERFACE
169 1.174 ad *
170 1.255 skrll * General sleep call. Suspends the current LWP until a wakeup is
171 1.255 skrll * performed on the specified identifier. The LWP will then be made
172 1.174 ad * runnable with the specified priority. Sleeps at most timo/hz seconds (0
173 1.174 ad * means no timeout). If pri includes PCATCH flag, signals are checked
174 1.26 cgd * before and after sleeping, else signals are not checked. Returns 0 if
175 1.26 cgd * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
176 1.26 cgd * signal needs to be delivered, ERESTART is returned if the current system
177 1.26 cgd * call should be restarted if possible, and EINTR is returned if the system
178 1.26 cgd * call should be interrupted by the signal (return EINTR).
179 1.77 thorpej *
180 1.174 ad * The interlock is held until we are on a sleep queue. The interlock will
181 1.174 ad * be locked before returning back to the caller unless the PNORELOCK flag
182 1.174 ad * is specified, in which case the interlock will always be unlocked upon
183 1.174 ad * return.
184 1.26 cgd */
185 1.26 cgd int
186 1.185 yamt ltsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
187 1.174 ad volatile struct simplelock *interlock)
188 1.26 cgd {
189 1.122 thorpej struct lwp *l = curlwp;
190 1.174 ad sleepq_t *sq;
191 1.244 ad kmutex_t *mp;
192 1.188 yamt int error;
193 1.26 cgd
194 1.204 ad KASSERT((l->l_pflag & LP_INTR) == 0);
195 1.272 pooka KASSERT(ident != &lbolt);
196 1.204 ad
197 1.174 ad if (sleepq_dontsleep(l)) {
198 1.174 ad (void)sleepq_abort(NULL, 0);
199 1.174 ad if ((priority & PNORELOCK) != 0)
200 1.77 thorpej simple_unlock(interlock);
201 1.174 ad return 0;
202 1.26 cgd }
203 1.78 sommerfe
204 1.204 ad l->l_kpriority = true;
205 1.244 ad sq = sleeptab_lookup(&sleeptab, ident, &mp);
206 1.244 ad sleepq_enter(sq, l, mp);
207 1.204 ad sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj);
208 1.42 cgd
209 1.174 ad if (interlock != NULL) {
210 1.204 ad KASSERT(simple_lock_held(interlock));
211 1.174 ad simple_unlock(interlock);
212 1.150 chs }
213 1.150 chs
214 1.188 yamt error = sleepq_block(timo, priority & PCATCH);
215 1.126 pk
216 1.174 ad if (interlock != NULL && (priority & PNORELOCK) == 0)
217 1.126 pk simple_lock(interlock);
218 1.174 ad
219 1.174 ad return error;
220 1.26 cgd }
221 1.26 cgd
222 1.187 ad int
223 1.187 ad mtsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
224 1.187 ad kmutex_t *mtx)
225 1.187 ad {
226 1.187 ad struct lwp *l = curlwp;
227 1.187 ad sleepq_t *sq;
228 1.244 ad kmutex_t *mp;
229 1.188 yamt int error;
230 1.187 ad
231 1.204 ad KASSERT((l->l_pflag & LP_INTR) == 0);
232 1.272 pooka KASSERT(ident != &lbolt);
233 1.204 ad
234 1.187 ad if (sleepq_dontsleep(l)) {
235 1.187 ad (void)sleepq_abort(mtx, (priority & PNORELOCK) != 0);
236 1.187 ad return 0;
237 1.187 ad }
238 1.187 ad
239 1.204 ad l->l_kpriority = true;
240 1.244 ad sq = sleeptab_lookup(&sleeptab, ident, &mp);
241 1.244 ad sleepq_enter(sq, l, mp);
242 1.204 ad sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj);
243 1.187 ad mutex_exit(mtx);
244 1.188 yamt error = sleepq_block(timo, priority & PCATCH);
245 1.187 ad
246 1.187 ad if ((priority & PNORELOCK) == 0)
247 1.187 ad mutex_enter(mtx);
248 1.187 ad
249 1.187 ad return error;
250 1.187 ad }
251 1.187 ad
252 1.26 cgd /*
253 1.174 ad * General sleep call for situations where a wake-up is not expected.
254 1.26 cgd */
255 1.174 ad int
256 1.182 thorpej kpause(const char *wmesg, bool intr, int timo, kmutex_t *mtx)
257 1.26 cgd {
258 1.174 ad struct lwp *l = curlwp;
259 1.244 ad kmutex_t *mp;
260 1.174 ad sleepq_t *sq;
261 1.174 ad int error;
262 1.26 cgd
263 1.284 pooka KASSERT(!(timo == 0 && intr == false));
264 1.284 pooka
265 1.174 ad if (sleepq_dontsleep(l))
266 1.174 ad return sleepq_abort(NULL, 0);
267 1.26 cgd
268 1.174 ad if (mtx != NULL)
269 1.174 ad mutex_exit(mtx);
270 1.204 ad l->l_kpriority = true;
271 1.244 ad sq = sleeptab_lookup(&sleeptab, l, &mp);
272 1.244 ad sleepq_enter(sq, l, mp);
273 1.204 ad sleepq_enqueue(sq, l, wmesg, &sleep_syncobj);
274 1.188 yamt error = sleepq_block(timo, intr);
275 1.174 ad if (mtx != NULL)
276 1.174 ad mutex_enter(mtx);
277 1.83 thorpej
278 1.174 ad return error;
279 1.139 cl }
280 1.139 cl
281 1.252 wrstuden #ifdef KERN_SA
282 1.252 wrstuden /*
283 1.252 wrstuden * sa_awaken:
284 1.252 wrstuden *
285 1.252 wrstuden * We believe this lwp is an SA lwp. If it's yielding,
286 1.252 wrstuden * let it know it needs to wake up.
287 1.252 wrstuden *
288 1.252 wrstuden * We are called and exit with the lwp locked. We are
289 1.252 wrstuden * called in the middle of wakeup operations, so we need
290 1.252 wrstuden * to not touch the locks at all.
291 1.252 wrstuden */
292 1.252 wrstuden void
293 1.252 wrstuden sa_awaken(struct lwp *l)
294 1.252 wrstuden {
295 1.252 wrstuden /* LOCK_ASSERT(lwp_locked(l, NULL)); */
296 1.252 wrstuden
297 1.252 wrstuden if (l == l->l_savp->savp_lwp && l->l_flag & LW_SA_YIELD)
298 1.252 wrstuden l->l_flag &= ~LW_SA_IDLE;
299 1.252 wrstuden }
300 1.252 wrstuden #endif /* KERN_SA */
301 1.252 wrstuden
302 1.26 cgd /*
303 1.174 ad * OBSOLETE INTERFACE
304 1.174 ad *
305 1.255 skrll * Make all LWPs sleeping on the specified identifier runnable.
306 1.26 cgd */
307 1.26 cgd void
308 1.174 ad wakeup(wchan_t ident)
309 1.26 cgd {
310 1.174 ad sleepq_t *sq;
311 1.244 ad kmutex_t *mp;
312 1.83 thorpej
313 1.261 rmind if (__predict_false(cold))
314 1.174 ad return;
315 1.83 thorpej
316 1.244 ad sq = sleeptab_lookup(&sleeptab, ident, &mp);
317 1.244 ad sleepq_wake(sq, ident, (u_int)-1, mp);
318 1.63 thorpej }
319 1.63 thorpej
320 1.63 thorpej /*
321 1.174 ad * OBSOLETE INTERFACE
322 1.174 ad *
323 1.255 skrll * Make the highest priority LWP first in line on the specified
324 1.63 thorpej * identifier runnable.
325 1.63 thorpej */
326 1.174 ad void
327 1.174 ad wakeup_one(wchan_t ident)
328 1.63 thorpej {
329 1.174 ad sleepq_t *sq;
330 1.244 ad kmutex_t *mp;
331 1.63 thorpej
332 1.261 rmind if (__predict_false(cold))
333 1.174 ad return;
334 1.188 yamt
335 1.244 ad sq = sleeptab_lookup(&sleeptab, ident, &mp);
336 1.244 ad sleepq_wake(sq, ident, 1, mp);
337 1.174 ad }
338 1.63 thorpej
339 1.117 gmcgarry
340 1.117 gmcgarry /*
341 1.255 skrll * General yield call. Puts the current LWP back on its run queue and
342 1.117 gmcgarry * performs a voluntary context switch. Should only be called when the
343 1.255 skrll * current LWP explicitly requests it (eg sched_yield(2)).
344 1.117 gmcgarry */
345 1.117 gmcgarry void
346 1.117 gmcgarry yield(void)
347 1.117 gmcgarry {
348 1.122 thorpej struct lwp *l = curlwp;
349 1.117 gmcgarry
350 1.174 ad KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
351 1.174 ad lwp_lock(l);
352 1.217 ad KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
353 1.188 yamt KASSERT(l->l_stat == LSONPROC);
354 1.204 ad l->l_kpriority = false;
355 1.188 yamt (void)mi_switch(l);
356 1.174 ad KERNEL_LOCK(l->l_biglocks, l);
357 1.69 thorpej }
358 1.69 thorpej
359 1.69 thorpej /*
360 1.255 skrll * General preemption call. Puts the current LWP back on its run queue
361 1.156 rpaulo * and performs an involuntary context switch.
362 1.69 thorpej */
363 1.69 thorpej void
364 1.174 ad preempt(void)
365 1.69 thorpej {
366 1.122 thorpej struct lwp *l = curlwp;
367 1.69 thorpej
368 1.174 ad KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
369 1.174 ad lwp_lock(l);
370 1.217 ad KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
371 1.188 yamt KASSERT(l->l_stat == LSONPROC);
372 1.204 ad l->l_kpriority = false;
373 1.174 ad l->l_nivcsw++;
374 1.188 yamt (void)mi_switch(l);
375 1.174 ad KERNEL_LOCK(l->l_biglocks, l);
376 1.69 thorpej }
377 1.69 thorpej
378 1.234 ad /*
379 1.234 ad * Handle a request made by another agent to preempt the current LWP
380 1.234 ad * in-kernel. Usually called when l_dopreempt may be non-zero.
381 1.234 ad *
382 1.234 ad * Character addresses for lockstat only.
383 1.234 ad */
384 1.231 ad static char in_critical_section;
385 1.231 ad static char kernel_lock_held;
386 1.231 ad static char is_softint;
387 1.262 yamt static char cpu_kpreempt_enter_fail;
388 1.231 ad
389 1.231 ad bool
390 1.231 ad kpreempt(uintptr_t where)
391 1.231 ad {
392 1.231 ad uintptr_t failed;
393 1.231 ad lwp_t *l;
394 1.264 ad int s, dop, lsflag;
395 1.231 ad
396 1.231 ad l = curlwp;
397 1.231 ad failed = 0;
398 1.231 ad while ((dop = l->l_dopreempt) != 0) {
399 1.231 ad if (l->l_stat != LSONPROC) {
400 1.231 ad /*
401 1.231 ad * About to block (or die), let it happen.
402 1.231 ad * Doesn't really count as "preemption has
403 1.231 ad * been blocked", since we're going to
404 1.231 ad * context switch.
405 1.231 ad */
406 1.231 ad l->l_dopreempt = 0;
407 1.231 ad return true;
408 1.231 ad }
409 1.231 ad if (__predict_false((l->l_flag & LW_IDLE) != 0)) {
410 1.231 ad /* Can't preempt idle loop, don't count as failure. */
411 1.261 rmind l->l_dopreempt = 0;
412 1.261 rmind return true;
413 1.231 ad }
414 1.231 ad if (__predict_false(l->l_nopreempt != 0)) {
415 1.231 ad /* LWP holds preemption disabled, explicitly. */
416 1.231 ad if ((dop & DOPREEMPT_COUNTED) == 0) {
417 1.234 ad kpreempt_ev_crit.ev_count++;
418 1.231 ad }
419 1.231 ad failed = (uintptr_t)&in_critical_section;
420 1.231 ad break;
421 1.231 ad }
422 1.231 ad if (__predict_false((l->l_pflag & LP_INTR) != 0)) {
423 1.261 rmind /* Can't preempt soft interrupts yet. */
424 1.261 rmind l->l_dopreempt = 0;
425 1.261 rmind failed = (uintptr_t)&is_softint;
426 1.261 rmind break;
427 1.231 ad }
428 1.231 ad s = splsched();
429 1.231 ad if (__predict_false(l->l_blcnt != 0 ||
430 1.231 ad curcpu()->ci_biglock_wanted != NULL)) {
431 1.231 ad /* Hold or want kernel_lock, code is not MT safe. */
432 1.231 ad splx(s);
433 1.231 ad if ((dop & DOPREEMPT_COUNTED) == 0) {
434 1.234 ad kpreempt_ev_klock.ev_count++;
435 1.231 ad }
436 1.231 ad failed = (uintptr_t)&kernel_lock_held;
437 1.231 ad break;
438 1.231 ad }
439 1.231 ad if (__predict_false(!cpu_kpreempt_enter(where, s))) {
440 1.231 ad /*
441 1.231 ad * It may be that the IPL is too high.
442 1.231 ad * kpreempt_enter() can schedule an
443 1.231 ad * interrupt to retry later.
444 1.231 ad */
445 1.231 ad splx(s);
446 1.262 yamt failed = (uintptr_t)&cpu_kpreempt_enter_fail;
447 1.231 ad break;
448 1.231 ad }
449 1.231 ad /* Do it! */
450 1.231 ad if (__predict_true((dop & DOPREEMPT_COUNTED) == 0)) {
451 1.234 ad kpreempt_ev_immed.ev_count++;
452 1.231 ad }
453 1.231 ad lwp_lock(l);
454 1.231 ad mi_switch(l);
455 1.231 ad l->l_nopreempt++;
456 1.231 ad splx(s);
457 1.231 ad
458 1.231 ad /* Take care of any MD cleanup. */
459 1.231 ad cpu_kpreempt_exit(where);
460 1.231 ad l->l_nopreempt--;
461 1.231 ad }
462 1.231 ad
463 1.264 ad if (__predict_true(!failed)) {
464 1.264 ad return false;
465 1.264 ad }
466 1.264 ad
467 1.231 ad /* Record preemption failure for reporting via lockstat. */
468 1.264 ad atomic_or_uint(&l->l_dopreempt, DOPREEMPT_COUNTED);
469 1.264 ad lsflag = 0;
470 1.264 ad LOCKSTAT_ENTER(lsflag);
471 1.264 ad if (__predict_false(lsflag)) {
472 1.264 ad if (where == 0) {
473 1.264 ad where = (uintptr_t)__builtin_return_address(0);
474 1.264 ad }
475 1.264 ad /* Preemption is on, might recurse, so make it atomic. */
476 1.264 ad if (atomic_cas_ptr_ni((void *)&l->l_pfailaddr, NULL,
477 1.264 ad (void *)where) == NULL) {
478 1.264 ad LOCKSTAT_START_TIMER(lsflag, l->l_pfailtime);
479 1.264 ad l->l_pfaillock = failed;
480 1.231 ad }
481 1.231 ad }
482 1.264 ad LOCKSTAT_EXIT(lsflag);
483 1.264 ad return true;
484 1.231 ad }
485 1.231 ad
486 1.69 thorpej /*
487 1.231 ad * Return true if preemption is explicitly disabled.
488 1.230 ad */
489 1.231 ad bool
490 1.231 ad kpreempt_disabled(void)
491 1.231 ad {
492 1.261 rmind const lwp_t *l = curlwp;
493 1.231 ad
494 1.231 ad return l->l_nopreempt != 0 || l->l_stat == LSZOMB ||
495 1.231 ad (l->l_flag & LW_IDLE) != 0 || cpu_kpreempt_disabled();
496 1.231 ad }
497 1.230 ad
498 1.230 ad /*
499 1.231 ad * Disable kernel preemption.
500 1.230 ad */
501 1.230 ad void
502 1.231 ad kpreempt_disable(void)
503 1.230 ad {
504 1.230 ad
505 1.231 ad KPREEMPT_DISABLE(curlwp);
506 1.230 ad }
507 1.230 ad
508 1.230 ad /*
509 1.231 ad * Reenable kernel preemption.
510 1.230 ad */
511 1.231 ad void
512 1.231 ad kpreempt_enable(void)
513 1.230 ad {
514 1.230 ad
515 1.231 ad KPREEMPT_ENABLE(curlwp);
516 1.230 ad }
517 1.230 ad
518 1.230 ad /*
519 1.188 yamt * Compute the amount of time during which the current lwp was running.
520 1.130 nathanw *
521 1.188 yamt * - update l_rtime unless it's an idle lwp.
522 1.188 yamt */
523 1.188 yamt
524 1.199 ad void
525 1.212 yamt updatertime(lwp_t *l, const struct bintime *now)
526 1.188 yamt {
527 1.188 yamt
528 1.261 rmind if (__predict_false(l->l_flag & LW_IDLE))
529 1.188 yamt return;
530 1.188 yamt
531 1.212 yamt /* rtime += now - stime */
532 1.212 yamt bintime_add(&l->l_rtime, now);
533 1.212 yamt bintime_sub(&l->l_rtime, &l->l_stime);
534 1.188 yamt }
535 1.188 yamt
536 1.188 yamt /*
537 1.245 ad * Select next LWP from the current CPU to run..
538 1.245 ad */
539 1.245 ad static inline lwp_t *
540 1.245 ad nextlwp(struct cpu_info *ci, struct schedstate_percpu *spc)
541 1.245 ad {
542 1.245 ad lwp_t *newl;
543 1.245 ad
544 1.245 ad /*
545 1.245 ad * Let sched_nextlwp() select the LWP to run the CPU next.
546 1.245 ad * If no LWP is runnable, select the idle LWP.
547 1.245 ad *
548 1.245 ad * Note that spc_lwplock might not necessary be held, and
549 1.245 ad * new thread would be unlocked after setting the LWP-lock.
550 1.245 ad */
551 1.245 ad newl = sched_nextlwp();
552 1.245 ad if (newl != NULL) {
553 1.245 ad sched_dequeue(newl);
554 1.245 ad KASSERT(lwp_locked(newl, spc->spc_mutex));
555 1.274 rmind KASSERT(newl->l_cpu == ci);
556 1.245 ad newl->l_stat = LSONPROC;
557 1.248 ad newl->l_pflag |= LP_RUNNING;
558 1.245 ad lwp_setlock(newl, spc->spc_lwplock);
559 1.245 ad } else {
560 1.245 ad newl = ci->ci_data.cpu_idlelwp;
561 1.245 ad newl->l_stat = LSONPROC;
562 1.248 ad newl->l_pflag |= LP_RUNNING;
563 1.245 ad }
564 1.261 rmind
565 1.245 ad /*
566 1.245 ad * Only clear want_resched if there are no pending (slow)
567 1.245 ad * software interrupts.
568 1.245 ad */
569 1.245 ad ci->ci_want_resched = ci->ci_data.cpu_softints;
570 1.245 ad spc->spc_flags &= ~SPCF_SWITCHCLEAR;
571 1.245 ad spc->spc_curpriority = lwp_eprio(newl);
572 1.245 ad
573 1.245 ad return newl;
574 1.245 ad }
575 1.245 ad
576 1.245 ad /*
577 1.188 yamt * The machine independent parts of context switch.
578 1.188 yamt *
579 1.188 yamt * Returns 1 if another LWP was actually run.
580 1.26 cgd */
581 1.122 thorpej int
582 1.199 ad mi_switch(lwp_t *l)
583 1.26 cgd {
584 1.246 rmind struct cpu_info *ci;
585 1.76 thorpej struct schedstate_percpu *spc;
586 1.188 yamt struct lwp *newl;
587 1.174 ad int retval, oldspl;
588 1.212 yamt struct bintime bt;
589 1.199 ad bool returning;
590 1.26 cgd
591 1.188 yamt KASSERT(lwp_locked(l, NULL));
592 1.231 ad KASSERT(kpreempt_disabled());
593 1.188 yamt LOCKDEBUG_BARRIER(l->l_mutex, 1);
594 1.174 ad
595 1.174 ad kstack_check_magic(l);
596 1.83 thorpej
597 1.212 yamt binuptime(&bt);
598 1.199 ad
599 1.267 yamt KASSERT((l->l_pflag & LP_RUNNING) != 0);
600 1.231 ad KASSERT(l->l_cpu == curcpu());
601 1.196 ad ci = l->l_cpu;
602 1.196 ad spc = &ci->ci_schedstate;
603 1.199 ad returning = false;
604 1.190 ad newl = NULL;
605 1.190 ad
606 1.199 ad /*
607 1.199 ad * If we have been asked to switch to a specific LWP, then there
608 1.199 ad * is no need to inspect the run queues. If a soft interrupt is
609 1.199 ad * blocking, then return to the interrupted thread without adjusting
610 1.199 ad * VM context or its start time: neither have been changed in order
611 1.199 ad * to take the interrupt.
612 1.199 ad */
613 1.190 ad if (l->l_switchto != NULL) {
614 1.204 ad if ((l->l_pflag & LP_INTR) != 0) {
615 1.199 ad returning = true;
616 1.199 ad softint_block(l);
617 1.248 ad if ((l->l_pflag & LP_TIMEINTR) != 0)
618 1.212 yamt updatertime(l, &bt);
619 1.199 ad }
620 1.190 ad newl = l->l_switchto;
621 1.190 ad l->l_switchto = NULL;
622 1.190 ad }
623 1.204 ad #ifndef __HAVE_FAST_SOFTINTS
624 1.204 ad else if (ci->ci_data.cpu_softints != 0) {
625 1.204 ad /* There are pending soft interrupts, so pick one. */
626 1.204 ad newl = softint_picklwp();
627 1.204 ad newl->l_stat = LSONPROC;
628 1.248 ad newl->l_pflag |= LP_RUNNING;
629 1.204 ad }
630 1.204 ad #endif /* !__HAVE_FAST_SOFTINTS */
631 1.190 ad
632 1.180 dsl /* Count time spent in current system call */
633 1.199 ad if (!returning) {
634 1.199 ad SYSCALL_TIME_SLEEP(l);
635 1.180 dsl
636 1.199 ad /*
637 1.199 ad * XXXSMP If we are using h/w performance counters,
638 1.199 ad * save context.
639 1.199 ad */
640 1.174 ad #if PERFCTRS
641 1.199 ad if (PMC_ENABLED(l->l_proc)) {
642 1.199 ad pmc_save_context(l->l_proc);
643 1.199 ad }
644 1.199 ad #endif
645 1.212 yamt updatertime(l, &bt);
646 1.174 ad }
647 1.113 gmcgarry
648 1.246 rmind /* Lock the runqueue */
649 1.246 rmind KASSERT(l->l_stat != LSRUN);
650 1.246 rmind mutex_spin_enter(spc->spc_mutex);
651 1.246 rmind
652 1.113 gmcgarry /*
653 1.174 ad * If on the CPU and we have gotten this far, then we must yield.
654 1.113 gmcgarry */
655 1.246 rmind if (l->l_stat == LSONPROC && l != newl) {
656 1.217 ad KASSERT(lwp_locked(l, spc->spc_lwplock));
657 1.188 yamt if ((l->l_flag & LW_IDLE) == 0) {
658 1.188 yamt l->l_stat = LSRUN;
659 1.246 rmind lwp_setlock(l, spc->spc_mutex);
660 1.246 rmind sched_enqueue(l, true);
661 1.285 rmind /*
662 1.285 rmind * Handle migration. Note that "migrating LWP" may
663 1.285 rmind * be reset here, if interrupt/preemption happens
664 1.285 rmind * early in idle LWP.
665 1.285 rmind */
666 1.285 rmind if (l->l_target_cpu != NULL) {
667 1.285 rmind KASSERT((l->l_pflag & LP_INTR) == 0);
668 1.246 rmind spc->spc_migrating = l;
669 1.216 rmind }
670 1.246 rmind } else
671 1.188 yamt l->l_stat = LSIDL;
672 1.174 ad }
673 1.174 ad
674 1.245 ad /* Pick new LWP to run. */
675 1.190 ad if (newl == NULL) {
676 1.245 ad newl = nextlwp(ci, spc);
677 1.199 ad }
678 1.199 ad
679 1.204 ad /* Items that must be updated with the CPU locked. */
680 1.199 ad if (!returning) {
681 1.204 ad /* Update the new LWP's start time. */
682 1.212 yamt newl->l_stime = bt;
683 1.204 ad
684 1.199 ad /*
685 1.204 ad * ci_curlwp changes when a fast soft interrupt occurs.
686 1.204 ad * We use cpu_onproc to keep track of which kernel or
687 1.204 ad * user thread is running 'underneath' the software
688 1.204 ad * interrupt. This is important for time accounting,
689 1.204 ad * itimers and forcing user threads to preempt (aston).
690 1.199 ad */
691 1.204 ad ci->ci_data.cpu_onproc = newl;
692 1.188 yamt }
693 1.188 yamt
694 1.241 ad /*
695 1.241 ad * Preemption related tasks. Must be done with the current
696 1.241 ad * CPU locked.
697 1.241 ad */
698 1.241 ad cpu_did_resched(l);
699 1.231 ad l->l_dopreempt = 0;
700 1.231 ad if (__predict_false(l->l_pfailaddr != 0)) {
701 1.231 ad LOCKSTAT_FLAG(lsflag);
702 1.231 ad LOCKSTAT_ENTER(lsflag);
703 1.231 ad LOCKSTAT_STOP_TIMER(lsflag, l->l_pfailtime);
704 1.231 ad LOCKSTAT_EVENT_RA(lsflag, l->l_pfaillock, LB_NOPREEMPT|LB_SPIN,
705 1.231 ad 1, l->l_pfailtime, l->l_pfailaddr);
706 1.231 ad LOCKSTAT_EXIT(lsflag);
707 1.231 ad l->l_pfailtime = 0;
708 1.231 ad l->l_pfaillock = 0;
709 1.231 ad l->l_pfailaddr = 0;
710 1.231 ad }
711 1.231 ad
712 1.188 yamt if (l != newl) {
713 1.188 yamt struct lwp *prevlwp;
714 1.174 ad
715 1.209 ad /* Release all locks, but leave the current LWP locked */
716 1.246 rmind if (l->l_mutex == spc->spc_mutex) {
717 1.209 ad /*
718 1.209 ad * Drop spc_lwplock, if the current LWP has been moved
719 1.209 ad * to the run queue (it is now locked by spc_mutex).
720 1.209 ad */
721 1.217 ad mutex_spin_exit(spc->spc_lwplock);
722 1.188 yamt } else {
723 1.209 ad /*
724 1.209 ad * Otherwise, drop the spc_mutex, we are done with the
725 1.209 ad * run queues.
726 1.209 ad */
727 1.188 yamt mutex_spin_exit(spc->spc_mutex);
728 1.188 yamt }
729 1.188 yamt
730 1.209 ad /*
731 1.253 skrll * Mark that context switch is going to be performed
732 1.209 ad * for this LWP, to protect it from being switched
733 1.209 ad * to on another CPU.
734 1.209 ad */
735 1.209 ad KASSERT(l->l_ctxswtch == 0);
736 1.209 ad l->l_ctxswtch = 1;
737 1.209 ad l->l_ncsw++;
738 1.267 yamt KASSERT((l->l_pflag & LP_RUNNING) != 0);
739 1.248 ad l->l_pflag &= ~LP_RUNNING;
740 1.209 ad
741 1.209 ad /*
742 1.209 ad * Increase the count of spin-mutexes before the release
743 1.209 ad * of the last lock - we must remain at IPL_SCHED during
744 1.209 ad * the context switch.
745 1.209 ad */
746 1.287 matt KASSERTMSG(ci->ci_mtx_count == -1,
747 1.291 jym "%s: cpu%u: ci_mtx_count (%d) != -1",
748 1.291 jym __func__, cpu_index(ci), ci->ci_mtx_count);
749 1.209 ad oldspl = MUTEX_SPIN_OLDSPL(ci);
750 1.209 ad ci->ci_mtx_count--;
751 1.209 ad lwp_unlock(l);
752 1.209 ad
753 1.218 ad /* Count the context switch on this CPU. */
754 1.218 ad ci->ci_data.cpu_nswtch++;
755 1.188 yamt
756 1.209 ad /* Update status for lwpctl, if present. */
757 1.209 ad if (l->l_lwpctl != NULL)
758 1.209 ad l->l_lwpctl->lc_curcpu = LWPCTL_CPU_NONE;
759 1.209 ad
760 1.199 ad /*
761 1.199 ad * Save old VM context, unless a soft interrupt
762 1.199 ad * handler is blocking.
763 1.199 ad */
764 1.199 ad if (!returning)
765 1.199 ad pmap_deactivate(l);
766 1.188 yamt
767 1.209 ad /*
768 1.275 skrll * We may need to spin-wait if 'newl' is still
769 1.209 ad * context switching on another CPU.
770 1.209 ad */
771 1.261 rmind if (__predict_false(newl->l_ctxswtch != 0)) {
772 1.209 ad u_int count;
773 1.209 ad count = SPINLOCK_BACKOFF_MIN;
774 1.209 ad while (newl->l_ctxswtch)
775 1.209 ad SPINLOCK_BACKOFF(count);
776 1.209 ad }
777 1.207 ad
778 1.276 darran /*
779 1.276 darran * If DTrace has set the active vtime enum to anything
780 1.276 darran * other than INACTIVE (0), then it should have set the
781 1.276 darran * function to call.
782 1.276 darran */
783 1.278 darran if (__predict_false(dtrace_vtime_active)) {
784 1.276 darran (*dtrace_vtime_switch_func)(newl);
785 1.276 darran }
786 1.276 darran
787 1.188 yamt /* Switch to the new LWP.. */
788 1.204 ad prevlwp = cpu_switchto(l, newl, returning);
789 1.207 ad ci = curcpu();
790 1.207 ad
791 1.188 yamt /*
792 1.209 ad * Switched away - we have new curlwp.
793 1.209 ad * Restore VM context and IPL.
794 1.188 yamt */
795 1.209 ad pmap_activate(l);
796 1.265 rmind uvm_emap_switch(l);
797 1.288 rmind pcu_switchpoint(l);
798 1.265 rmind
799 1.188 yamt if (prevlwp != NULL) {
800 1.209 ad /* Normalize the count of the spin-mutexes */
801 1.209 ad ci->ci_mtx_count++;
802 1.209 ad /* Unmark the state of context switch */
803 1.209 ad membar_exit();
804 1.209 ad prevlwp->l_ctxswtch = 0;
805 1.188 yamt }
806 1.209 ad
807 1.209 ad /* Update status for lwpctl, if present. */
808 1.219 ad if (l->l_lwpctl != NULL) {
809 1.209 ad l->l_lwpctl->lc_curcpu = (int)cpu_index(ci);
810 1.219 ad l->l_lwpctl->lc_pctr++;
811 1.219 ad }
812 1.174 ad
813 1.290 christos /* Note trip through cpu_switchto(). */
814 1.290 christos pserialize_switchpoint();
815 1.290 christos
816 1.231 ad KASSERT(l->l_cpu == ci);
817 1.231 ad splx(oldspl);
818 1.188 yamt retval = 1;
819 1.188 yamt } else {
820 1.188 yamt /* Nothing to do - just unlock and return. */
821 1.246 rmind mutex_spin_exit(spc->spc_mutex);
822 1.188 yamt lwp_unlock(l);
823 1.122 thorpej retval = 0;
824 1.122 thorpej }
825 1.110 briggs
826 1.188 yamt KASSERT(l == curlwp);
827 1.188 yamt KASSERT(l->l_stat == LSONPROC);
828 1.188 yamt
829 1.110 briggs /*
830 1.174 ad * XXXSMP If we are using h/w performance counters, restore context.
831 1.231 ad * XXXSMP preemption problem.
832 1.26 cgd */
833 1.114 gmcgarry #if PERFCTRS
834 1.175 christos if (PMC_ENABLED(l->l_proc)) {
835 1.175 christos pmc_restore_context(l->l_proc);
836 1.166 christos }
837 1.114 gmcgarry #endif
838 1.180 dsl SYSCALL_TIME_WAKEUP(l);
839 1.188 yamt LOCKDEBUG_BARRIER(NULL, 1);
840 1.169 yamt
841 1.122 thorpej return retval;
842 1.26 cgd }
843 1.26 cgd
844 1.26 cgd /*
845 1.245 ad * The machine independent parts of context switch to oblivion.
846 1.245 ad * Does not return. Call with the LWP unlocked.
847 1.245 ad */
848 1.245 ad void
849 1.245 ad lwp_exit_switchaway(lwp_t *l)
850 1.245 ad {
851 1.245 ad struct cpu_info *ci;
852 1.245 ad struct lwp *newl;
853 1.245 ad struct bintime bt;
854 1.245 ad
855 1.245 ad ci = l->l_cpu;
856 1.245 ad
857 1.245 ad KASSERT(kpreempt_disabled());
858 1.245 ad KASSERT(l->l_stat == LSZOMB || l->l_stat == LSIDL);
859 1.245 ad KASSERT(ci == curcpu());
860 1.245 ad LOCKDEBUG_BARRIER(NULL, 0);
861 1.245 ad
862 1.245 ad kstack_check_magic(l);
863 1.245 ad
864 1.245 ad /* Count time spent in current system call */
865 1.245 ad SYSCALL_TIME_SLEEP(l);
866 1.245 ad binuptime(&bt);
867 1.245 ad updatertime(l, &bt);
868 1.245 ad
869 1.245 ad /* Must stay at IPL_SCHED even after releasing run queue lock. */
870 1.245 ad (void)splsched();
871 1.245 ad
872 1.245 ad /*
873 1.245 ad * Let sched_nextlwp() select the LWP to run the CPU next.
874 1.245 ad * If no LWP is runnable, select the idle LWP.
875 1.245 ad *
876 1.245 ad * Note that spc_lwplock might not necessary be held, and
877 1.245 ad * new thread would be unlocked after setting the LWP-lock.
878 1.245 ad */
879 1.245 ad spc_lock(ci);
880 1.245 ad #ifndef __HAVE_FAST_SOFTINTS
881 1.245 ad if (ci->ci_data.cpu_softints != 0) {
882 1.245 ad /* There are pending soft interrupts, so pick one. */
883 1.245 ad newl = softint_picklwp();
884 1.245 ad newl->l_stat = LSONPROC;
885 1.248 ad newl->l_pflag |= LP_RUNNING;
886 1.245 ad } else
887 1.245 ad #endif /* !__HAVE_FAST_SOFTINTS */
888 1.245 ad {
889 1.245 ad newl = nextlwp(ci, &ci->ci_schedstate);
890 1.245 ad }
891 1.245 ad
892 1.245 ad /* Update the new LWP's start time. */
893 1.245 ad newl->l_stime = bt;
894 1.248 ad l->l_pflag &= ~LP_RUNNING;
895 1.245 ad
896 1.245 ad /*
897 1.245 ad * ci_curlwp changes when a fast soft interrupt occurs.
898 1.245 ad * We use cpu_onproc to keep track of which kernel or
899 1.245 ad * user thread is running 'underneath' the software
900 1.245 ad * interrupt. This is important for time accounting,
901 1.245 ad * itimers and forcing user threads to preempt (aston).
902 1.245 ad */
903 1.245 ad ci->ci_data.cpu_onproc = newl;
904 1.245 ad
905 1.245 ad /*
906 1.245 ad * Preemption related tasks. Must be done with the current
907 1.245 ad * CPU locked.
908 1.245 ad */
909 1.245 ad cpu_did_resched(l);
910 1.245 ad
911 1.245 ad /* Unlock the run queue. */
912 1.245 ad spc_unlock(ci);
913 1.245 ad
914 1.245 ad /* Count the context switch on this CPU. */
915 1.245 ad ci->ci_data.cpu_nswtch++;
916 1.245 ad
917 1.245 ad /* Update status for lwpctl, if present. */
918 1.245 ad if (l->l_lwpctl != NULL)
919 1.247 ad l->l_lwpctl->lc_curcpu = LWPCTL_CPU_EXITED;
920 1.245 ad
921 1.245 ad /*
922 1.275 skrll * We may need to spin-wait if 'newl' is still
923 1.245 ad * context switching on another CPU.
924 1.245 ad */
925 1.261 rmind if (__predict_false(newl->l_ctxswtch != 0)) {
926 1.245 ad u_int count;
927 1.245 ad count = SPINLOCK_BACKOFF_MIN;
928 1.245 ad while (newl->l_ctxswtch)
929 1.245 ad SPINLOCK_BACKOFF(count);
930 1.245 ad }
931 1.245 ad
932 1.279 darran /*
933 1.279 darran * If DTrace has set the active vtime enum to anything
934 1.279 darran * other than INACTIVE (0), then it should have set the
935 1.279 darran * function to call.
936 1.279 darran */
937 1.279 darran if (__predict_false(dtrace_vtime_active)) {
938 1.279 darran (*dtrace_vtime_switch_func)(newl);
939 1.279 darran }
940 1.276 darran
941 1.245 ad /* Switch to the new LWP.. */
942 1.245 ad (void)cpu_switchto(NULL, newl, false);
943 1.245 ad
944 1.251 uwe for (;;) continue; /* XXX: convince gcc about "noreturn" */
945 1.245 ad /* NOTREACHED */
946 1.245 ad }
947 1.245 ad
948 1.245 ad /*
949 1.271 rmind * setrunnable: change LWP state to be runnable, placing it on the run queue.
950 1.174 ad *
951 1.174 ad * Call with the process and LWP locked. Will return with the LWP unlocked.
952 1.26 cgd */
953 1.26 cgd void
954 1.122 thorpej setrunnable(struct lwp *l)
955 1.26 cgd {
956 1.122 thorpej struct proc *p = l->l_proc;
957 1.205 ad struct cpu_info *ci;
958 1.26 cgd
959 1.188 yamt KASSERT((l->l_flag & LW_IDLE) == 0);
960 1.229 ad KASSERT(mutex_owned(p->p_lock));
961 1.183 ad KASSERT(lwp_locked(l, NULL));
962 1.205 ad KASSERT(l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex);
963 1.83 thorpej
964 1.122 thorpej switch (l->l_stat) {
965 1.122 thorpej case LSSTOP:
966 1.33 mycroft /*
967 1.33 mycroft * If we're being traced (possibly because someone attached us
968 1.33 mycroft * while we were stopped), check for a signal from the debugger.
969 1.33 mycroft */
970 1.256 ad if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0)
971 1.174 ad signotify(l);
972 1.174 ad p->p_nrlwps++;
973 1.26 cgd break;
974 1.174 ad case LSSUSPENDED:
975 1.178 pavel l->l_flag &= ~LW_WSUSPEND;
976 1.174 ad p->p_nrlwps++;
977 1.192 rmind cv_broadcast(&p->p_lwpcv);
978 1.122 thorpej break;
979 1.174 ad case LSSLEEP:
980 1.174 ad KASSERT(l->l_wchan != NULL);
981 1.26 cgd break;
982 1.174 ad default:
983 1.174 ad panic("setrunnable: lwp %p state was %d", l, l->l_stat);
984 1.26 cgd }
985 1.139 cl
986 1.252 wrstuden #ifdef KERN_SA
987 1.252 wrstuden if (l->l_proc->p_sa)
988 1.252 wrstuden sa_awaken(l);
989 1.252 wrstuden #endif /* KERN_SA */
990 1.252 wrstuden
991 1.174 ad /*
992 1.286 pooka * If the LWP was sleeping, start it again.
993 1.174 ad */
994 1.174 ad if (l->l_wchan != NULL) {
995 1.174 ad l->l_stat = LSSLEEP;
996 1.183 ad /* lwp_unsleep() will release the lock. */
997 1.221 ad lwp_unsleep(l, true);
998 1.174 ad return;
999 1.174 ad }
1000 1.139 cl
1001 1.174 ad /*
1002 1.174 ad * If the LWP is still on the CPU, mark it as LSONPROC. It may be
1003 1.174 ad * about to call mi_switch(), in which case it will yield.
1004 1.174 ad */
1005 1.248 ad if ((l->l_pflag & LP_RUNNING) != 0) {
1006 1.174 ad l->l_stat = LSONPROC;
1007 1.174 ad l->l_slptime = 0;
1008 1.174 ad lwp_unlock(l);
1009 1.174 ad return;
1010 1.174 ad }
1011 1.122 thorpej
1012 1.174 ad /*
1013 1.205 ad * Look for a CPU to run.
1014 1.205 ad * Set the LWP runnable.
1015 1.174 ad */
1016 1.205 ad ci = sched_takecpu(l);
1017 1.205 ad l->l_cpu = ci;
1018 1.236 ad spc_lock(ci);
1019 1.236 ad lwp_unlock_to(l, ci->ci_schedstate.spc_mutex);
1020 1.188 yamt sched_setrunnable(l);
1021 1.174 ad l->l_stat = LSRUN;
1022 1.122 thorpej l->l_slptime = 0;
1023 1.174 ad
1024 1.271 rmind sched_enqueue(l, false);
1025 1.271 rmind resched_cpu(l);
1026 1.271 rmind lwp_unlock(l);
1027 1.26 cgd }
1028 1.26 cgd
1029 1.26 cgd /*
1030 1.174 ad * suspendsched:
1031 1.174 ad *
1032 1.266 yamt * Convert all non-LW_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
1033 1.174 ad */
1034 1.94 bouyer void
1035 1.174 ad suspendsched(void)
1036 1.94 bouyer {
1037 1.174 ad CPU_INFO_ITERATOR cii;
1038 1.174 ad struct cpu_info *ci;
1039 1.122 thorpej struct lwp *l;
1040 1.174 ad struct proc *p;
1041 1.94 bouyer
1042 1.94 bouyer /*
1043 1.174 ad * We do this by process in order not to violate the locking rules.
1044 1.94 bouyer */
1045 1.228 ad mutex_enter(proc_lock);
1046 1.174 ad PROCLIST_FOREACH(p, &allproc) {
1047 1.229 ad mutex_enter(p->p_lock);
1048 1.178 pavel if ((p->p_flag & PK_SYSTEM) != 0) {
1049 1.229 ad mutex_exit(p->p_lock);
1050 1.94 bouyer continue;
1051 1.174 ad }
1052 1.174 ad
1053 1.174 ad p->p_stat = SSTOP;
1054 1.174 ad
1055 1.174 ad LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1056 1.174 ad if (l == curlwp)
1057 1.174 ad continue;
1058 1.174 ad
1059 1.174 ad lwp_lock(l);
1060 1.122 thorpej
1061 1.97 enami /*
1062 1.174 ad * Set L_WREBOOT so that the LWP will suspend itself
1063 1.174 ad * when it tries to return to user mode. We want to
1064 1.174 ad * try and get to get as many LWPs as possible to
1065 1.174 ad * the user / kernel boundary, so that they will
1066 1.174 ad * release any locks that they hold.
1067 1.97 enami */
1068 1.178 pavel l->l_flag |= (LW_WREBOOT | LW_WSUSPEND);
1069 1.174 ad
1070 1.174 ad if (l->l_stat == LSSLEEP &&
1071 1.178 pavel (l->l_flag & LW_SINTR) != 0) {
1072 1.174 ad /* setrunnable() will release the lock. */
1073 1.174 ad setrunnable(l);
1074 1.174 ad continue;
1075 1.174 ad }
1076 1.174 ad
1077 1.174 ad lwp_unlock(l);
1078 1.94 bouyer }
1079 1.174 ad
1080 1.229 ad mutex_exit(p->p_lock);
1081 1.94 bouyer }
1082 1.228 ad mutex_exit(proc_lock);
1083 1.174 ad
1084 1.174 ad /*
1085 1.174 ad * Kick all CPUs to make them preempt any LWPs running in user mode.
1086 1.174 ad * They'll trap into the kernel and suspend themselves in userret().
1087 1.174 ad */
1088 1.204 ad for (CPU_INFO_FOREACH(cii, ci)) {
1089 1.204 ad spc_lock(ci);
1090 1.204 ad cpu_need_resched(ci, RESCHED_IMMED);
1091 1.204 ad spc_unlock(ci);
1092 1.204 ad }
1093 1.174 ad }
1094 1.174 ad
1095 1.174 ad /*
1096 1.174 ad * sched_unsleep:
1097 1.174 ad *
1098 1.174 ad * The is called when the LWP has not been awoken normally but instead
1099 1.174 ad * interrupted: for example, if the sleep timed out. Because of this,
1100 1.174 ad * it's not a valid action for running or idle LWPs.
1101 1.174 ad */
1102 1.271 rmind static void
1103 1.221 ad sched_unsleep(struct lwp *l, bool cleanup)
1104 1.174 ad {
1105 1.174 ad
1106 1.174 ad lwp_unlock(l);
1107 1.174 ad panic("sched_unsleep");
1108 1.174 ad }
1109 1.174 ad
1110 1.250 rmind static void
1111 1.188 yamt resched_cpu(struct lwp *l)
1112 1.188 yamt {
1113 1.274 rmind struct cpu_info *ci = l->l_cpu;
1114 1.188 yamt
1115 1.250 rmind KASSERT(lwp_locked(l, NULL));
1116 1.204 ad if (lwp_eprio(l) > ci->ci_schedstate.spc_curpriority)
1117 1.188 yamt cpu_need_resched(ci, 0);
1118 1.188 yamt }
1119 1.188 yamt
1120 1.188 yamt static void
1121 1.185 yamt sched_changepri(struct lwp *l, pri_t pri)
1122 1.174 ad {
1123 1.174 ad
1124 1.188 yamt KASSERT(lwp_locked(l, NULL));
1125 1.174 ad
1126 1.271 rmind if (l->l_stat == LSRUN) {
1127 1.204 ad KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
1128 1.204 ad sched_dequeue(l);
1129 1.204 ad l->l_priority = pri;
1130 1.204 ad sched_enqueue(l, false);
1131 1.204 ad } else {
1132 1.174 ad l->l_priority = pri;
1133 1.157 yamt }
1134 1.188 yamt resched_cpu(l);
1135 1.184 yamt }
1136 1.184 yamt
1137 1.188 yamt static void
1138 1.185 yamt sched_lendpri(struct lwp *l, pri_t pri)
1139 1.184 yamt {
1140 1.184 yamt
1141 1.188 yamt KASSERT(lwp_locked(l, NULL));
1142 1.184 yamt
1143 1.271 rmind if (l->l_stat == LSRUN) {
1144 1.204 ad KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
1145 1.204 ad sched_dequeue(l);
1146 1.204 ad l->l_inheritedprio = pri;
1147 1.204 ad sched_enqueue(l, false);
1148 1.204 ad } else {
1149 1.184 yamt l->l_inheritedprio = pri;
1150 1.184 yamt }
1151 1.188 yamt resched_cpu(l);
1152 1.184 yamt }
1153 1.184 yamt
1154 1.184 yamt struct lwp *
1155 1.184 yamt syncobj_noowner(wchan_t wchan)
1156 1.184 yamt {
1157 1.184 yamt
1158 1.184 yamt return NULL;
1159 1.151 yamt }
1160 1.151 yamt
1161 1.250 rmind /* Decay 95% of proc::p_pctcpu in 60 seconds, ccpu = exp(-1/20) */
1162 1.281 rmind const fixpt_t ccpu = 0.95122942450071400909 * FSCALE;
1163 1.281 rmind
1164 1.281 rmind /*
1165 1.281 rmind * Constants for averages over 1, 5 and 15 minutes when sampling at
1166 1.281 rmind * 5 second intervals.
1167 1.281 rmind */
1168 1.281 rmind static const fixpt_t cexp[ ] = {
1169 1.281 rmind 0.9200444146293232 * FSCALE, /* exp(-1/12) */
1170 1.281 rmind 0.9834714538216174 * FSCALE, /* exp(-1/60) */
1171 1.281 rmind 0.9944598480048967 * FSCALE, /* exp(-1/180) */
1172 1.281 rmind };
1173 1.134 matt
1174 1.134 matt /*
1175 1.188 yamt * sched_pstats:
1176 1.188 yamt *
1177 1.281 rmind * => Update process statistics and check CPU resource allocation.
1178 1.281 rmind * => Call scheduler-specific hook to eventually adjust LWP priorities.
1179 1.281 rmind * => Compute load average of a quantity on 1, 5 and 15 minute intervals.
1180 1.130 nathanw */
1181 1.113 gmcgarry void
1182 1.281 rmind sched_pstats(void)
1183 1.113 gmcgarry {
1184 1.281 rmind extern struct loadavg averunnable;
1185 1.281 rmind struct loadavg *avg = &averunnable;
1186 1.249 rmind const int clkhz = (stathz != 0 ? stathz : hz);
1187 1.281 rmind static bool backwards = false;
1188 1.281 rmind static u_int lavg_count = 0;
1189 1.188 yamt struct proc *p;
1190 1.281 rmind int nrun;
1191 1.113 gmcgarry
1192 1.188 yamt sched_pstats_ticks++;
1193 1.281 rmind if (++lavg_count >= 5) {
1194 1.281 rmind lavg_count = 0;
1195 1.281 rmind nrun = 0;
1196 1.281 rmind }
1197 1.228 ad mutex_enter(proc_lock);
1198 1.188 yamt PROCLIST_FOREACH(p, &allproc) {
1199 1.281 rmind struct lwp *l;
1200 1.281 rmind struct rlimit *rlim;
1201 1.281 rmind long runtm;
1202 1.281 rmind int sig;
1203 1.281 rmind
1204 1.271 rmind /* Increment sleep time (if sleeping), ignore overflow. */
1205 1.229 ad mutex_enter(p->p_lock);
1206 1.212 yamt runtm = p->p_rtime.sec;
1207 1.188 yamt LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1208 1.281 rmind fixpt_t lpctcpu;
1209 1.281 rmind u_int lcpticks;
1210 1.281 rmind
1211 1.249 rmind if (__predict_false((l->l_flag & LW_IDLE) != 0))
1212 1.188 yamt continue;
1213 1.188 yamt lwp_lock(l);
1214 1.212 yamt runtm += l->l_rtime.sec;
1215 1.188 yamt l->l_swtime++;
1216 1.242 rmind sched_lwp_stats(l);
1217 1.281 rmind
1218 1.281 rmind /* For load average calculation. */
1219 1.282 rmind if (__predict_false(lavg_count == 0) &&
1220 1.282 rmind (l->l_flag & (LW_SINTR | LW_SYSTEM)) == 0) {
1221 1.281 rmind switch (l->l_stat) {
1222 1.281 rmind case LSSLEEP:
1223 1.281 rmind if (l->l_slptime > 1) {
1224 1.281 rmind break;
1225 1.281 rmind }
1226 1.281 rmind case LSRUN:
1227 1.281 rmind case LSONPROC:
1228 1.281 rmind case LSIDL:
1229 1.281 rmind nrun++;
1230 1.281 rmind }
1231 1.281 rmind }
1232 1.282 rmind lwp_unlock(l);
1233 1.282 rmind
1234 1.282 rmind l->l_pctcpu = (l->l_pctcpu * ccpu) >> FSHIFT;
1235 1.282 rmind if (l->l_slptime != 0)
1236 1.282 rmind continue;
1237 1.282 rmind
1238 1.282 rmind lpctcpu = l->l_pctcpu;
1239 1.282 rmind lcpticks = atomic_swap_uint(&l->l_cpticks, 0);
1240 1.282 rmind lpctcpu += ((FSCALE - ccpu) *
1241 1.282 rmind (lcpticks * FSCALE / clkhz)) >> FSHIFT;
1242 1.282 rmind l->l_pctcpu = lpctcpu;
1243 1.188 yamt }
1244 1.249 rmind /* Calculating p_pctcpu only for ps(1) */
1245 1.188 yamt p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
1246 1.174 ad
1247 1.188 yamt /*
1248 1.188 yamt * Check if the process exceeds its CPU resource allocation.
1249 1.293 apb * If over the hard limit, kill it with SIGKILL.
1250 1.293 apb * If over the soft limit, send SIGXCPU and raise
1251 1.293 apb * the soft limit a little.
1252 1.188 yamt */
1253 1.188 yamt rlim = &p->p_rlimit[RLIMIT_CPU];
1254 1.188 yamt sig = 0;
1255 1.249 rmind if (__predict_false(runtm >= rlim->rlim_cur)) {
1256 1.293 apb if (runtm >= rlim->rlim_max) {
1257 1.188 yamt sig = SIGKILL;
1258 1.293 apb log(LOG_NOTICE, "pid %d is killed: %s\n",
1259 1.293 apb p->p_pid, "exceeded RLIMIT_CPU");
1260 1.293 apb uprintf("pid %d, command %s, is killed: %s\n",
1261 1.293 apb p->p_pid, p->p_comm,
1262 1.293 apb "exceeded RLIMIT_CPU");
1263 1.293 apb } else {
1264 1.188 yamt sig = SIGXCPU;
1265 1.188 yamt if (rlim->rlim_cur < rlim->rlim_max)
1266 1.188 yamt rlim->rlim_cur += 5;
1267 1.188 yamt }
1268 1.188 yamt }
1269 1.229 ad mutex_exit(p->p_lock);
1270 1.259 rmind if (__predict_false(runtm < 0)) {
1271 1.260 ad if (!backwards) {
1272 1.260 ad backwards = true;
1273 1.294 apb printf("WARNING: negative runtime; "
1274 1.260 ad "monotonic clock has gone backwards\n");
1275 1.260 ad }
1276 1.259 rmind } else if (__predict_false(sig)) {
1277 1.259 rmind KASSERT((p->p_flag & PK_SYSTEM) == 0);
1278 1.188 yamt psignal(p, sig);
1279 1.259 rmind }
1280 1.174 ad }
1281 1.228 ad mutex_exit(proc_lock);
1282 1.281 rmind
1283 1.281 rmind /* Load average calculation. */
1284 1.281 rmind if (__predict_false(lavg_count == 0)) {
1285 1.281 rmind int i;
1286 1.283 martin CTASSERT(__arraycount(cexp) == __arraycount(avg->ldavg));
1287 1.281 rmind for (i = 0; i < __arraycount(cexp); i++) {
1288 1.281 rmind avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
1289 1.281 rmind nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
1290 1.281 rmind }
1291 1.281 rmind }
1292 1.281 rmind
1293 1.281 rmind /* Lightning bolt. */
1294 1.273 pooka cv_broadcast(&lbolt);
1295 1.113 gmcgarry }
1296