kern_synch.c revision 1.342 1 1.342 ad /* $NetBSD: kern_synch.c,v 1.342 2020/02/23 16:27:09 ad Exp $ */
2 1.63 thorpej
3 1.63 thorpej /*-
4 1.340 ad * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2009, 2019, 2020
5 1.260 ad * The NetBSD Foundation, Inc.
6 1.63 thorpej * All rights reserved.
7 1.63 thorpej *
8 1.63 thorpej * This code is derived from software contributed to The NetBSD Foundation
9 1.63 thorpej * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10 1.188 yamt * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and
11 1.188 yamt * Daniel Sieger.
12 1.63 thorpej *
13 1.63 thorpej * Redistribution and use in source and binary forms, with or without
14 1.63 thorpej * modification, are permitted provided that the following conditions
15 1.63 thorpej * are met:
16 1.63 thorpej * 1. Redistributions of source code must retain the above copyright
17 1.63 thorpej * notice, this list of conditions and the following disclaimer.
18 1.63 thorpej * 2. Redistributions in binary form must reproduce the above copyright
19 1.63 thorpej * notice, this list of conditions and the following disclaimer in the
20 1.63 thorpej * documentation and/or other materials provided with the distribution.
21 1.63 thorpej *
22 1.63 thorpej * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23 1.63 thorpej * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24 1.63 thorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 1.63 thorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26 1.63 thorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 1.63 thorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 1.63 thorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 1.63 thorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 1.63 thorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 1.63 thorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 1.63 thorpej * POSSIBILITY OF SUCH DAMAGE.
33 1.63 thorpej */
34 1.26 cgd
35 1.26 cgd /*-
36 1.26 cgd * Copyright (c) 1982, 1986, 1990, 1991, 1993
37 1.26 cgd * The Regents of the University of California. All rights reserved.
38 1.26 cgd * (c) UNIX System Laboratories, Inc.
39 1.26 cgd * All or some portions of this file are derived from material licensed
40 1.26 cgd * to the University of California by American Telephone and Telegraph
41 1.26 cgd * Co. or Unix System Laboratories, Inc. and are reproduced herein with
42 1.26 cgd * the permission of UNIX System Laboratories, Inc.
43 1.26 cgd *
44 1.26 cgd * Redistribution and use in source and binary forms, with or without
45 1.26 cgd * modification, are permitted provided that the following conditions
46 1.26 cgd * are met:
47 1.26 cgd * 1. Redistributions of source code must retain the above copyright
48 1.26 cgd * notice, this list of conditions and the following disclaimer.
49 1.26 cgd * 2. Redistributions in binary form must reproduce the above copyright
50 1.26 cgd * notice, this list of conditions and the following disclaimer in the
51 1.26 cgd * documentation and/or other materials provided with the distribution.
52 1.136 agc * 3. Neither the name of the University nor the names of its contributors
53 1.26 cgd * may be used to endorse or promote products derived from this software
54 1.26 cgd * without specific prior written permission.
55 1.26 cgd *
56 1.26 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 1.26 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 1.26 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 1.26 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 1.26 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 1.26 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 1.26 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 1.26 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 1.26 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 1.26 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 1.26 cgd * SUCH DAMAGE.
67 1.26 cgd *
68 1.50 fvdl * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
69 1.26 cgd */
70 1.106 lukem
71 1.106 lukem #include <sys/cdefs.h>
72 1.342 ad __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.342 2020/02/23 16:27:09 ad Exp $");
73 1.48 mrg
74 1.109 yamt #include "opt_kstack.h"
75 1.277 darran #include "opt_dtrace.h"
76 1.26 cgd
77 1.174 ad #define __MUTEX_PRIVATE
78 1.174 ad
79 1.26 cgd #include <sys/param.h>
80 1.26 cgd #include <sys/systm.h>
81 1.26 cgd #include <sys/proc.h>
82 1.26 cgd #include <sys/kernel.h>
83 1.188 yamt #include <sys/cpu.h>
84 1.290 christos #include <sys/pserialize.h>
85 1.26 cgd #include <sys/resourcevar.h>
86 1.341 ad #include <sys/rwlock.h>
87 1.55 ross #include <sys/sched.h>
88 1.179 dsl #include <sys/syscall_stats.h>
89 1.174 ad #include <sys/sleepq.h>
90 1.174 ad #include <sys/lockdebug.h>
91 1.190 ad #include <sys/evcnt.h>
92 1.199 ad #include <sys/intr.h>
93 1.207 ad #include <sys/lwpctl.h>
94 1.209 ad #include <sys/atomic.h>
95 1.295 njoly #include <sys/syslog.h>
96 1.47 mrg
97 1.47 mrg #include <uvm/uvm_extern.h>
98 1.47 mrg
99 1.231 ad #include <dev/lockstat.h>
100 1.231 ad
101 1.276 darran #include <sys/dtrace_bsd.h>
102 1.279 darran int dtrace_vtime_active=0;
103 1.276 darran dtrace_vtime_switch_func_t dtrace_vtime_switch_func;
104 1.276 darran
105 1.271 rmind static void sched_unsleep(struct lwp *, bool);
106 1.188 yamt static void sched_changepri(struct lwp *, pri_t);
107 1.188 yamt static void sched_lendpri(struct lwp *, pri_t);
108 1.122 thorpej
109 1.174 ad syncobj_t sleep_syncobj = {
110 1.313 ozaki .sobj_flag = SOBJ_SLEEPQ_SORTED,
111 1.313 ozaki .sobj_unsleep = sleepq_unsleep,
112 1.313 ozaki .sobj_changepri = sleepq_changepri,
113 1.313 ozaki .sobj_lendpri = sleepq_lendpri,
114 1.313 ozaki .sobj_owner = syncobj_noowner,
115 1.174 ad };
116 1.174 ad
117 1.174 ad syncobj_t sched_syncobj = {
118 1.313 ozaki .sobj_flag = SOBJ_SLEEPQ_SORTED,
119 1.313 ozaki .sobj_unsleep = sched_unsleep,
120 1.313 ozaki .sobj_changepri = sched_changepri,
121 1.313 ozaki .sobj_lendpri = sched_lendpri,
122 1.313 ozaki .sobj_owner = syncobj_noowner,
123 1.174 ad };
124 1.122 thorpej
125 1.342 ad syncobj_t kpause_syncobj = {
126 1.342 ad .sobj_flag = SOBJ_SLEEPQ_NULL,
127 1.342 ad .sobj_unsleep = sleepq_unsleep,
128 1.342 ad .sobj_changepri = sleepq_changepri,
129 1.342 ad .sobj_lendpri = sleepq_lendpri,
130 1.342 ad .sobj_owner = syncobj_noowner,
131 1.342 ad };
132 1.342 ad
133 1.289 rmind /* "Lightning bolt": once a second sleep address. */
134 1.289 rmind kcondvar_t lbolt __cacheline_aligned;
135 1.223 ad
136 1.289 rmind u_int sched_pstats_ticks __cacheline_aligned;
137 1.289 rmind
138 1.289 rmind /* Preemption event counters. */
139 1.289 rmind static struct evcnt kpreempt_ev_crit __cacheline_aligned;
140 1.289 rmind static struct evcnt kpreempt_ev_klock __cacheline_aligned;
141 1.289 rmind static struct evcnt kpreempt_ev_immed __cacheline_aligned;
142 1.231 ad
143 1.237 rmind void
144 1.270 elad synch_init(void)
145 1.237 rmind {
146 1.237 rmind
147 1.237 rmind cv_init(&lbolt, "lbolt");
148 1.237 rmind
149 1.239 ad evcnt_attach_dynamic(&kpreempt_ev_crit, EVCNT_TYPE_MISC, NULL,
150 1.237 rmind "kpreempt", "defer: critical section");
151 1.239 ad evcnt_attach_dynamic(&kpreempt_ev_klock, EVCNT_TYPE_MISC, NULL,
152 1.237 rmind "kpreempt", "defer: kernel_lock");
153 1.239 ad evcnt_attach_dynamic(&kpreempt_ev_immed, EVCNT_TYPE_MISC, NULL,
154 1.237 rmind "kpreempt", "immediate");
155 1.237 rmind }
156 1.237 rmind
157 1.26 cgd /*
158 1.174 ad * OBSOLETE INTERFACE
159 1.174 ad *
160 1.255 skrll * General sleep call. Suspends the current LWP until a wakeup is
161 1.255 skrll * performed on the specified identifier. The LWP will then be made
162 1.174 ad * runnable with the specified priority. Sleeps at most timo/hz seconds (0
163 1.174 ad * means no timeout). If pri includes PCATCH flag, signals are checked
164 1.26 cgd * before and after sleeping, else signals are not checked. Returns 0 if
165 1.26 cgd * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
166 1.26 cgd * signal needs to be delivered, ERESTART is returned if the current system
167 1.26 cgd * call should be restarted if possible, and EINTR is returned if the system
168 1.26 cgd * call should be interrupted by the signal (return EINTR).
169 1.26 cgd */
170 1.26 cgd int
171 1.297 rmind tsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo)
172 1.26 cgd {
173 1.122 thorpej struct lwp *l = curlwp;
174 1.174 ad sleepq_t *sq;
175 1.244 ad kmutex_t *mp;
176 1.26 cgd
177 1.204 ad KASSERT((l->l_pflag & LP_INTR) == 0);
178 1.272 pooka KASSERT(ident != &lbolt);
179 1.204 ad
180 1.174 ad if (sleepq_dontsleep(l)) {
181 1.174 ad (void)sleepq_abort(NULL, 0);
182 1.174 ad return 0;
183 1.26 cgd }
184 1.78 sommerfe
185 1.204 ad l->l_kpriority = true;
186 1.244 ad sq = sleeptab_lookup(&sleeptab, ident, &mp);
187 1.244 ad sleepq_enter(sq, l, mp);
188 1.204 ad sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj);
189 1.297 rmind return sleepq_block(timo, priority & PCATCH);
190 1.26 cgd }
191 1.26 cgd
192 1.187 ad int
193 1.187 ad mtsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
194 1.187 ad kmutex_t *mtx)
195 1.187 ad {
196 1.187 ad struct lwp *l = curlwp;
197 1.187 ad sleepq_t *sq;
198 1.244 ad kmutex_t *mp;
199 1.188 yamt int error;
200 1.187 ad
201 1.204 ad KASSERT((l->l_pflag & LP_INTR) == 0);
202 1.272 pooka KASSERT(ident != &lbolt);
203 1.204 ad
204 1.187 ad if (sleepq_dontsleep(l)) {
205 1.187 ad (void)sleepq_abort(mtx, (priority & PNORELOCK) != 0);
206 1.187 ad return 0;
207 1.187 ad }
208 1.187 ad
209 1.204 ad l->l_kpriority = true;
210 1.244 ad sq = sleeptab_lookup(&sleeptab, ident, &mp);
211 1.244 ad sleepq_enter(sq, l, mp);
212 1.204 ad sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj);
213 1.187 ad mutex_exit(mtx);
214 1.188 yamt error = sleepq_block(timo, priority & PCATCH);
215 1.187 ad
216 1.187 ad if ((priority & PNORELOCK) == 0)
217 1.187 ad mutex_enter(mtx);
218 1.297 rmind
219 1.187 ad return error;
220 1.187 ad }
221 1.187 ad
222 1.26 cgd /*
223 1.341 ad * XXXAD Temporary - for use of UVM only. PLEASE DO NOT USE ELSEWHERE.
224 1.341 ad * Will go once there is a better solution, eg waits interlocked by
225 1.341 ad * pg->interlock. To wake an LWP sleeping with this, you need to hold a
226 1.341 ad * write lock.
227 1.341 ad */
228 1.341 ad int
229 1.341 ad rwtsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
230 1.341 ad krwlock_t *rw)
231 1.341 ad {
232 1.341 ad struct lwp *l = curlwp;
233 1.341 ad sleepq_t *sq;
234 1.341 ad kmutex_t *mp;
235 1.341 ad int error;
236 1.341 ad krw_t op;
237 1.341 ad
238 1.341 ad KASSERT((l->l_pflag & LP_INTR) == 0);
239 1.341 ad KASSERT(ident != &lbolt);
240 1.341 ad
241 1.341 ad if (sleepq_dontsleep(l)) {
242 1.341 ad (void)sleepq_abort(NULL, (priority & PNORELOCK) != 0);
243 1.341 ad if ((priority & PNORELOCK) != 0)
244 1.341 ad rw_exit(rw);
245 1.341 ad return 0;
246 1.341 ad }
247 1.341 ad
248 1.341 ad l->l_kpriority = true;
249 1.341 ad sq = sleeptab_lookup(&sleeptab, ident, &mp);
250 1.341 ad sleepq_enter(sq, l, mp);
251 1.341 ad sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj);
252 1.341 ad op = rw_lock_op(rw);
253 1.341 ad rw_exit(rw);
254 1.341 ad error = sleepq_block(timo, priority & PCATCH);
255 1.341 ad
256 1.341 ad if ((priority & PNORELOCK) == 0)
257 1.341 ad rw_enter(rw, op);
258 1.341 ad
259 1.341 ad return error;
260 1.341 ad }
261 1.341 ad
262 1.341 ad /*
263 1.174 ad * General sleep call for situations where a wake-up is not expected.
264 1.26 cgd */
265 1.174 ad int
266 1.182 thorpej kpause(const char *wmesg, bool intr, int timo, kmutex_t *mtx)
267 1.26 cgd {
268 1.174 ad struct lwp *l = curlwp;
269 1.174 ad int error;
270 1.26 cgd
271 1.284 pooka KASSERT(!(timo == 0 && intr == false));
272 1.284 pooka
273 1.174 ad if (sleepq_dontsleep(l))
274 1.174 ad return sleepq_abort(NULL, 0);
275 1.26 cgd
276 1.174 ad if (mtx != NULL)
277 1.174 ad mutex_exit(mtx);
278 1.204 ad l->l_kpriority = true;
279 1.342 ad lwp_lock(l);
280 1.342 ad KERNEL_UNLOCK_ALL(NULL, &l->l_biglocks);
281 1.342 ad sleepq_enqueue(NULL, l, wmesg, &kpause_syncobj);
282 1.188 yamt error = sleepq_block(timo, intr);
283 1.174 ad if (mtx != NULL)
284 1.174 ad mutex_enter(mtx);
285 1.83 thorpej
286 1.174 ad return error;
287 1.139 cl }
288 1.139 cl
289 1.26 cgd /*
290 1.174 ad * OBSOLETE INTERFACE
291 1.174 ad *
292 1.255 skrll * Make all LWPs sleeping on the specified identifier runnable.
293 1.26 cgd */
294 1.26 cgd void
295 1.174 ad wakeup(wchan_t ident)
296 1.26 cgd {
297 1.174 ad sleepq_t *sq;
298 1.244 ad kmutex_t *mp;
299 1.83 thorpej
300 1.261 rmind if (__predict_false(cold))
301 1.174 ad return;
302 1.83 thorpej
303 1.244 ad sq = sleeptab_lookup(&sleeptab, ident, &mp);
304 1.244 ad sleepq_wake(sq, ident, (u_int)-1, mp);
305 1.63 thorpej }
306 1.63 thorpej
307 1.63 thorpej /*
308 1.255 skrll * General yield call. Puts the current LWP back on its run queue and
309 1.117 gmcgarry * performs a voluntary context switch. Should only be called when the
310 1.255 skrll * current LWP explicitly requests it (eg sched_yield(2)).
311 1.117 gmcgarry */
312 1.117 gmcgarry void
313 1.117 gmcgarry yield(void)
314 1.117 gmcgarry {
315 1.122 thorpej struct lwp *l = curlwp;
316 1.117 gmcgarry
317 1.174 ad KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
318 1.174 ad lwp_lock(l);
319 1.329 ad
320 1.217 ad KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
321 1.188 yamt KASSERT(l->l_stat == LSONPROC);
322 1.329 ad
323 1.325 ad /* Voluntary - ditch kpriority boost. */
324 1.204 ad l->l_kpriority = false;
325 1.329 ad spc_lock(l->l_cpu);
326 1.329 ad mi_switch(l);
327 1.174 ad KERNEL_LOCK(l->l_biglocks, l);
328 1.69 thorpej }
329 1.69 thorpej
330 1.69 thorpej /*
331 1.255 skrll * General preemption call. Puts the current LWP back on its run queue
332 1.156 rpaulo * and performs an involuntary context switch.
333 1.69 thorpej */
334 1.69 thorpej void
335 1.174 ad preempt(void)
336 1.69 thorpej {
337 1.122 thorpej struct lwp *l = curlwp;
338 1.69 thorpej
339 1.174 ad KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
340 1.174 ad lwp_lock(l);
341 1.329 ad
342 1.217 ad KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
343 1.188 yamt KASSERT(l->l_stat == LSONPROC);
344 1.329 ad
345 1.325 ad /* Involuntary - keep kpriority boost. */
346 1.321 mlelstv l->l_pflag |= LP_PREEMPTING;
347 1.329 ad spc_lock(l->l_cpu);
348 1.329 ad mi_switch(l);
349 1.174 ad KERNEL_LOCK(l->l_biglocks, l);
350 1.69 thorpej }
351 1.69 thorpej
352 1.234 ad /*
353 1.234 ad * Handle a request made by another agent to preempt the current LWP
354 1.234 ad * in-kernel. Usually called when l_dopreempt may be non-zero.
355 1.234 ad *
356 1.234 ad * Character addresses for lockstat only.
357 1.234 ad */
358 1.326 ad static char kpreempt_is_disabled;
359 1.231 ad static char kernel_lock_held;
360 1.326 ad static char is_softint_lwp;
361 1.326 ad static char spl_is_raised;
362 1.231 ad
363 1.231 ad bool
364 1.231 ad kpreempt(uintptr_t where)
365 1.231 ad {
366 1.231 ad uintptr_t failed;
367 1.231 ad lwp_t *l;
368 1.264 ad int s, dop, lsflag;
369 1.231 ad
370 1.231 ad l = curlwp;
371 1.231 ad failed = 0;
372 1.231 ad while ((dop = l->l_dopreempt) != 0) {
373 1.231 ad if (l->l_stat != LSONPROC) {
374 1.231 ad /*
375 1.231 ad * About to block (or die), let it happen.
376 1.231 ad * Doesn't really count as "preemption has
377 1.231 ad * been blocked", since we're going to
378 1.231 ad * context switch.
379 1.231 ad */
380 1.325 ad atomic_swap_uint(&l->l_dopreempt, 0);
381 1.231 ad return true;
382 1.231 ad }
383 1.231 ad if (__predict_false((l->l_flag & LW_IDLE) != 0)) {
384 1.231 ad /* Can't preempt idle loop, don't count as failure. */
385 1.325 ad atomic_swap_uint(&l->l_dopreempt, 0);
386 1.261 rmind return true;
387 1.231 ad }
388 1.231 ad if (__predict_false(l->l_nopreempt != 0)) {
389 1.231 ad /* LWP holds preemption disabled, explicitly. */
390 1.231 ad if ((dop & DOPREEMPT_COUNTED) == 0) {
391 1.234 ad kpreempt_ev_crit.ev_count++;
392 1.231 ad }
393 1.326 ad failed = (uintptr_t)&kpreempt_is_disabled;
394 1.231 ad break;
395 1.231 ad }
396 1.231 ad if (__predict_false((l->l_pflag & LP_INTR) != 0)) {
397 1.261 rmind /* Can't preempt soft interrupts yet. */
398 1.325 ad atomic_swap_uint(&l->l_dopreempt, 0);
399 1.326 ad failed = (uintptr_t)&is_softint_lwp;
400 1.261 rmind break;
401 1.231 ad }
402 1.231 ad s = splsched();
403 1.338 ad if (__predict_false(l->l_blcnt != 0 ||
404 1.338 ad curcpu()->ci_biglock_wanted != NULL)) {
405 1.231 ad /* Hold or want kernel_lock, code is not MT safe. */
406 1.231 ad splx(s);
407 1.231 ad if ((dop & DOPREEMPT_COUNTED) == 0) {
408 1.234 ad kpreempt_ev_klock.ev_count++;
409 1.231 ad }
410 1.231 ad failed = (uintptr_t)&kernel_lock_held;
411 1.231 ad break;
412 1.231 ad }
413 1.231 ad if (__predict_false(!cpu_kpreempt_enter(where, s))) {
414 1.231 ad /*
415 1.231 ad * It may be that the IPL is too high.
416 1.231 ad * kpreempt_enter() can schedule an
417 1.231 ad * interrupt to retry later.
418 1.231 ad */
419 1.231 ad splx(s);
420 1.326 ad failed = (uintptr_t)&spl_is_raised;
421 1.231 ad break;
422 1.231 ad }
423 1.231 ad /* Do it! */
424 1.231 ad if (__predict_true((dop & DOPREEMPT_COUNTED) == 0)) {
425 1.234 ad kpreempt_ev_immed.ev_count++;
426 1.231 ad }
427 1.231 ad lwp_lock(l);
428 1.329 ad /* Involuntary - keep kpriority boost. */
429 1.326 ad l->l_pflag |= LP_PREEMPTING;
430 1.329 ad spc_lock(l->l_cpu);
431 1.231 ad mi_switch(l);
432 1.231 ad l->l_nopreempt++;
433 1.231 ad splx(s);
434 1.231 ad
435 1.231 ad /* Take care of any MD cleanup. */
436 1.231 ad cpu_kpreempt_exit(where);
437 1.231 ad l->l_nopreempt--;
438 1.231 ad }
439 1.231 ad
440 1.264 ad if (__predict_true(!failed)) {
441 1.264 ad return false;
442 1.264 ad }
443 1.264 ad
444 1.231 ad /* Record preemption failure for reporting via lockstat. */
445 1.264 ad atomic_or_uint(&l->l_dopreempt, DOPREEMPT_COUNTED);
446 1.264 ad lsflag = 0;
447 1.264 ad LOCKSTAT_ENTER(lsflag);
448 1.264 ad if (__predict_false(lsflag)) {
449 1.264 ad if (where == 0) {
450 1.264 ad where = (uintptr_t)__builtin_return_address(0);
451 1.264 ad }
452 1.264 ad /* Preemption is on, might recurse, so make it atomic. */
453 1.264 ad if (atomic_cas_ptr_ni((void *)&l->l_pfailaddr, NULL,
454 1.264 ad (void *)where) == NULL) {
455 1.264 ad LOCKSTAT_START_TIMER(lsflag, l->l_pfailtime);
456 1.264 ad l->l_pfaillock = failed;
457 1.231 ad }
458 1.231 ad }
459 1.264 ad LOCKSTAT_EXIT(lsflag);
460 1.264 ad return true;
461 1.231 ad }
462 1.231 ad
463 1.69 thorpej /*
464 1.231 ad * Return true if preemption is explicitly disabled.
465 1.230 ad */
466 1.231 ad bool
467 1.231 ad kpreempt_disabled(void)
468 1.231 ad {
469 1.261 rmind const lwp_t *l = curlwp;
470 1.231 ad
471 1.231 ad return l->l_nopreempt != 0 || l->l_stat == LSZOMB ||
472 1.332 ad (l->l_flag & LW_IDLE) != 0 || (l->l_pflag & LP_INTR) != 0 ||
473 1.332 ad cpu_kpreempt_disabled();
474 1.231 ad }
475 1.230 ad
476 1.230 ad /*
477 1.231 ad * Disable kernel preemption.
478 1.230 ad */
479 1.230 ad void
480 1.231 ad kpreempt_disable(void)
481 1.230 ad {
482 1.230 ad
483 1.231 ad KPREEMPT_DISABLE(curlwp);
484 1.230 ad }
485 1.230 ad
486 1.230 ad /*
487 1.231 ad * Reenable kernel preemption.
488 1.230 ad */
489 1.231 ad void
490 1.231 ad kpreempt_enable(void)
491 1.230 ad {
492 1.230 ad
493 1.231 ad KPREEMPT_ENABLE(curlwp);
494 1.230 ad }
495 1.230 ad
496 1.230 ad /*
497 1.188 yamt * Compute the amount of time during which the current lwp was running.
498 1.130 nathanw *
499 1.188 yamt * - update l_rtime unless it's an idle lwp.
500 1.188 yamt */
501 1.188 yamt
502 1.199 ad void
503 1.212 yamt updatertime(lwp_t *l, const struct bintime *now)
504 1.188 yamt {
505 1.188 yamt
506 1.261 rmind if (__predict_false(l->l_flag & LW_IDLE))
507 1.188 yamt return;
508 1.188 yamt
509 1.212 yamt /* rtime += now - stime */
510 1.212 yamt bintime_add(&l->l_rtime, now);
511 1.212 yamt bintime_sub(&l->l_rtime, &l->l_stime);
512 1.188 yamt }
513 1.188 yamt
514 1.188 yamt /*
515 1.245 ad * Select next LWP from the current CPU to run..
516 1.245 ad */
517 1.245 ad static inline lwp_t *
518 1.245 ad nextlwp(struct cpu_info *ci, struct schedstate_percpu *spc)
519 1.245 ad {
520 1.245 ad lwp_t *newl;
521 1.245 ad
522 1.245 ad /*
523 1.245 ad * Let sched_nextlwp() select the LWP to run the CPU next.
524 1.245 ad * If no LWP is runnable, select the idle LWP.
525 1.245 ad *
526 1.340 ad * On arrival here LWPs on a run queue are locked by spc_mutex which
527 1.340 ad * is currently held. Idle LWPs are always locked by spc_lwplock,
528 1.340 ad * which may or may not be held here. On exit from this code block,
529 1.340 ad * in all cases newl is locked by spc_lwplock.
530 1.245 ad */
531 1.245 ad newl = sched_nextlwp();
532 1.245 ad if (newl != NULL) {
533 1.245 ad sched_dequeue(newl);
534 1.245 ad KASSERT(lwp_locked(newl, spc->spc_mutex));
535 1.274 rmind KASSERT(newl->l_cpu == ci);
536 1.340 ad newl->l_stat = LSONPROC;
537 1.340 ad newl->l_pflag |= LP_RUNNING;
538 1.340 ad spc->spc_curpriority = lwp_eprio(newl);
539 1.340 ad spc->spc_flags &= ~(SPCF_SWITCHCLEAR | SPCF_IDLE);
540 1.245 ad lwp_setlock(newl, spc->spc_lwplock);
541 1.245 ad } else {
542 1.340 ad /*
543 1.340 ad * Updates to newl here are unlocked, but newl is the idle
544 1.340 ad * LWP and thus sheltered from outside interference, so no
545 1.340 ad * harm is going to come of it.
546 1.340 ad */
547 1.245 ad newl = ci->ci_data.cpu_idlelwp;
548 1.340 ad newl->l_stat = LSONPROC;
549 1.340 ad newl->l_pflag |= LP_RUNNING;
550 1.340 ad spc->spc_curpriority = PRI_IDLE;
551 1.334 ad spc->spc_flags = (spc->spc_flags & ~SPCF_SWITCHCLEAR) |
552 1.334 ad SPCF_IDLE;
553 1.245 ad }
554 1.261 rmind
555 1.245 ad /*
556 1.325 ad * Only clear want_resched if there are no pending (slow) software
557 1.325 ad * interrupts. We can do this without an atomic, because no new
558 1.325 ad * LWPs can appear in the queue due to our hold on spc_mutex, and
559 1.325 ad * the update to ci_want_resched will become globally visible before
560 1.325 ad * the release of spc_mutex becomes globally visible.
561 1.245 ad */
562 1.245 ad ci->ci_want_resched = ci->ci_data.cpu_softints;
563 1.245 ad
564 1.245 ad return newl;
565 1.245 ad }
566 1.245 ad
567 1.245 ad /*
568 1.188 yamt * The machine independent parts of context switch.
569 1.188 yamt *
570 1.335 ad * NOTE: l->l_cpu is not changed in this routine, because an LWP never
571 1.335 ad * changes its own l_cpu (that would screw up curcpu on many ports and could
572 1.335 ad * cause all kinds of other evil stuff). l_cpu is always changed by some
573 1.339 ad * other actor, when it's known the LWP is not running (the LP_RUNNING flag
574 1.335 ad * is checked under lock).
575 1.26 cgd */
576 1.329 ad void
577 1.199 ad mi_switch(lwp_t *l)
578 1.26 cgd {
579 1.246 rmind struct cpu_info *ci;
580 1.76 thorpej struct schedstate_percpu *spc;
581 1.188 yamt struct lwp *newl;
582 1.339 ad kmutex_t *lock;
583 1.329 ad int oldspl;
584 1.212 yamt struct bintime bt;
585 1.199 ad bool returning;
586 1.26 cgd
587 1.188 yamt KASSERT(lwp_locked(l, NULL));
588 1.231 ad KASSERT(kpreempt_disabled());
589 1.329 ad KASSERT(mutex_owned(curcpu()->ci_schedstate.spc_mutex));
590 1.337 ad KASSERTMSG(l->l_blcnt == 0, "kernel_lock leaked");
591 1.174 ad
592 1.174 ad kstack_check_magic(l);
593 1.83 thorpej
594 1.212 yamt binuptime(&bt);
595 1.199 ad
596 1.304 matt KASSERTMSG(l == curlwp, "l %p curlwp %p", l, curlwp);
597 1.339 ad KASSERT((l->l_pflag & LP_RUNNING) != 0);
598 1.329 ad KASSERT(l->l_cpu == curcpu() || l->l_stat == LSRUN);
599 1.329 ad ci = curcpu();
600 1.196 ad spc = &ci->ci_schedstate;
601 1.199 ad returning = false;
602 1.190 ad newl = NULL;
603 1.190 ad
604 1.199 ad /*
605 1.199 ad * If we have been asked to switch to a specific LWP, then there
606 1.199 ad * is no need to inspect the run queues. If a soft interrupt is
607 1.199 ad * blocking, then return to the interrupted thread without adjusting
608 1.199 ad * VM context or its start time: neither have been changed in order
609 1.199 ad * to take the interrupt.
610 1.199 ad */
611 1.190 ad if (l->l_switchto != NULL) {
612 1.204 ad if ((l->l_pflag & LP_INTR) != 0) {
613 1.199 ad returning = true;
614 1.199 ad softint_block(l);
615 1.248 ad if ((l->l_pflag & LP_TIMEINTR) != 0)
616 1.212 yamt updatertime(l, &bt);
617 1.199 ad }
618 1.190 ad newl = l->l_switchto;
619 1.190 ad l->l_switchto = NULL;
620 1.190 ad }
621 1.204 ad #ifndef __HAVE_FAST_SOFTINTS
622 1.204 ad else if (ci->ci_data.cpu_softints != 0) {
623 1.204 ad /* There are pending soft interrupts, so pick one. */
624 1.204 ad newl = softint_picklwp();
625 1.204 ad newl->l_stat = LSONPROC;
626 1.339 ad newl->l_pflag |= LP_RUNNING;
627 1.204 ad }
628 1.204 ad #endif /* !__HAVE_FAST_SOFTINTS */
629 1.190 ad
630 1.113 gmcgarry /*
631 1.174 ad * If on the CPU and we have gotten this far, then we must yield.
632 1.113 gmcgarry */
633 1.246 rmind if (l->l_stat == LSONPROC && l != newl) {
634 1.217 ad KASSERT(lwp_locked(l, spc->spc_lwplock));
635 1.329 ad KASSERT((l->l_flag & LW_IDLE) == 0);
636 1.329 ad l->l_stat = LSRUN;
637 1.329 ad lwp_setlock(l, spc->spc_mutex);
638 1.329 ad sched_enqueue(l);
639 1.336 ad sched_preempted(l);
640 1.336 ad
641 1.329 ad /*
642 1.329 ad * Handle migration. Note that "migrating LWP" may
643 1.329 ad * be reset here, if interrupt/preemption happens
644 1.329 ad * early in idle LWP.
645 1.329 ad */
646 1.329 ad if (l->l_target_cpu != NULL && (l->l_pflag & LP_BOUND) == 0) {
647 1.329 ad KASSERT((l->l_pflag & LP_INTR) == 0);
648 1.329 ad spc->spc_migrating = l;
649 1.329 ad }
650 1.174 ad }
651 1.174 ad
652 1.245 ad /* Pick new LWP to run. */
653 1.190 ad if (newl == NULL) {
654 1.245 ad newl = nextlwp(ci, spc);
655 1.199 ad }
656 1.199 ad
657 1.204 ad /* Items that must be updated with the CPU locked. */
658 1.199 ad if (!returning) {
659 1.326 ad /* Count time spent in current system call */
660 1.326 ad SYSCALL_TIME_SLEEP(l);
661 1.326 ad
662 1.326 ad updatertime(l, &bt);
663 1.326 ad
664 1.204 ad /* Update the new LWP's start time. */
665 1.212 yamt newl->l_stime = bt;
666 1.204 ad
667 1.199 ad /*
668 1.204 ad * ci_curlwp changes when a fast soft interrupt occurs.
669 1.327 ad * We use ci_onproc to keep track of which kernel or
670 1.204 ad * user thread is running 'underneath' the software
671 1.204 ad * interrupt. This is important for time accounting,
672 1.204 ad * itimers and forcing user threads to preempt (aston).
673 1.199 ad */
674 1.327 ad ci->ci_onproc = newl;
675 1.188 yamt }
676 1.188 yamt
677 1.241 ad /*
678 1.325 ad * Preemption related tasks. Must be done holding spc_mutex. Clear
679 1.325 ad * l_dopreempt without an atomic - it's only ever set non-zero by
680 1.325 ad * sched_resched_cpu() which also holds spc_mutex, and only ever
681 1.325 ad * cleared by the LWP itself (us) with atomics when not under lock.
682 1.241 ad */
683 1.231 ad l->l_dopreempt = 0;
684 1.231 ad if (__predict_false(l->l_pfailaddr != 0)) {
685 1.231 ad LOCKSTAT_FLAG(lsflag);
686 1.231 ad LOCKSTAT_ENTER(lsflag);
687 1.231 ad LOCKSTAT_STOP_TIMER(lsflag, l->l_pfailtime);
688 1.231 ad LOCKSTAT_EVENT_RA(lsflag, l->l_pfaillock, LB_NOPREEMPT|LB_SPIN,
689 1.231 ad 1, l->l_pfailtime, l->l_pfailaddr);
690 1.231 ad LOCKSTAT_EXIT(lsflag);
691 1.231 ad l->l_pfailtime = 0;
692 1.231 ad l->l_pfaillock = 0;
693 1.231 ad l->l_pfailaddr = 0;
694 1.231 ad }
695 1.231 ad
696 1.188 yamt if (l != newl) {
697 1.188 yamt struct lwp *prevlwp;
698 1.174 ad
699 1.209 ad /* Release all locks, but leave the current LWP locked */
700 1.246 rmind if (l->l_mutex == spc->spc_mutex) {
701 1.209 ad /*
702 1.209 ad * Drop spc_lwplock, if the current LWP has been moved
703 1.209 ad * to the run queue (it is now locked by spc_mutex).
704 1.209 ad */
705 1.217 ad mutex_spin_exit(spc->spc_lwplock);
706 1.188 yamt } else {
707 1.209 ad /*
708 1.209 ad * Otherwise, drop the spc_mutex, we are done with the
709 1.209 ad * run queues.
710 1.209 ad */
711 1.188 yamt mutex_spin_exit(spc->spc_mutex);
712 1.188 yamt }
713 1.188 yamt
714 1.330 ad /* We're down to only one lock, so do debug checks. */
715 1.330 ad LOCKDEBUG_BARRIER(l->l_mutex, 1);
716 1.330 ad
717 1.335 ad /* Count the context switch. */
718 1.335 ad CPU_COUNT(CPU_COUNT_NSWTCH, 1);
719 1.209 ad l->l_ncsw++;
720 1.335 ad if ((l->l_pflag & LP_PREEMPTING) != 0) {
721 1.321 mlelstv l->l_nivcsw++;
722 1.335 ad l->l_pflag &= ~LP_PREEMPTING;
723 1.335 ad }
724 1.209 ad
725 1.209 ad /*
726 1.209 ad * Increase the count of spin-mutexes before the release
727 1.335 ad * of the last lock - we must remain at IPL_SCHED after
728 1.335 ad * releasing the lock.
729 1.209 ad */
730 1.287 matt KASSERTMSG(ci->ci_mtx_count == -1,
731 1.301 rmind "%s: cpu%u: ci_mtx_count (%d) != -1 "
732 1.301 rmind "(block with spin-mutex held)",
733 1.291 jym __func__, cpu_index(ci), ci->ci_mtx_count);
734 1.209 ad oldspl = MUTEX_SPIN_OLDSPL(ci);
735 1.335 ad ci->ci_mtx_count = -2;
736 1.188 yamt
737 1.209 ad /* Update status for lwpctl, if present. */
738 1.335 ad if (l->l_lwpctl != NULL) {
739 1.335 ad l->l_lwpctl->lc_curcpu = (l->l_stat == LSZOMB ?
740 1.335 ad LWPCTL_CPU_EXITED : LWPCTL_CPU_NONE);
741 1.335 ad }
742 1.209 ad
743 1.199 ad /*
744 1.335 ad * If curlwp is a soft interrupt LWP, there's nobody on the
745 1.335 ad * other side to unlock - we're returning into an assembly
746 1.335 ad * trampoline. Unlock now. This is safe because this is a
747 1.335 ad * kernel LWP and is bound to current CPU: the worst anyone
748 1.335 ad * else will do to it, is to put it back onto this CPU's run
749 1.335 ad * queue (and the CPU is busy here right now!).
750 1.199 ad */
751 1.335 ad if (returning) {
752 1.335 ad /* Keep IPL_SCHED after this; MD code will fix up. */
753 1.339 ad l->l_pflag &= ~LP_RUNNING;
754 1.335 ad lwp_unlock(l);
755 1.335 ad } else {
756 1.335 ad /* A normal LWP: save old VM context. */
757 1.199 ad pmap_deactivate(l);
758 1.209 ad }
759 1.207 ad
760 1.276 darran /*
761 1.276 darran * If DTrace has set the active vtime enum to anything
762 1.276 darran * other than INACTIVE (0), then it should have set the
763 1.276 darran * function to call.
764 1.276 darran */
765 1.278 darran if (__predict_false(dtrace_vtime_active)) {
766 1.276 darran (*dtrace_vtime_switch_func)(newl);
767 1.276 darran }
768 1.276 darran
769 1.318 ozaki /*
770 1.318 ozaki * We must ensure not to come here from inside a read section.
771 1.318 ozaki */
772 1.318 ozaki KASSERT(pserialize_not_in_read_section());
773 1.318 ozaki
774 1.188 yamt /* Switch to the new LWP.. */
775 1.305 mlelstv #ifdef MULTIPROCESSOR
776 1.304 matt KASSERT(curlwp == ci->ci_curlwp);
777 1.305 mlelstv #endif
778 1.304 matt KASSERTMSG(l == curlwp, "l %p curlwp %p", l, curlwp);
779 1.204 ad prevlwp = cpu_switchto(l, newl, returning);
780 1.207 ad ci = curcpu();
781 1.305 mlelstv #ifdef MULTIPROCESSOR
782 1.304 matt KASSERT(curlwp == ci->ci_curlwp);
783 1.305 mlelstv #endif
784 1.304 matt KASSERTMSG(l == curlwp, "l %p curlwp %p prevlwp %p",
785 1.304 matt l, curlwp, prevlwp);
786 1.335 ad KASSERT(prevlwp != NULL);
787 1.335 ad KASSERT(l->l_cpu == ci);
788 1.335 ad KASSERT(ci->ci_mtx_count == -2);
789 1.335 ad
790 1.335 ad /*
791 1.339 ad * Immediately mark the previous LWP as no longer running
792 1.339 ad * and unlock (to keep lock wait times short as possible).
793 1.339 ad * We'll still be at IPL_SCHED afterwards. If a zombie,
794 1.339 ad * don't touch after clearing LP_RUNNING as it could be
795 1.339 ad * reaped by another CPU. Issue a memory barrier to ensure
796 1.339 ad * this.
797 1.335 ad */
798 1.339 ad KASSERT((prevlwp->l_pflag & LP_RUNNING) != 0);
799 1.339 ad lock = prevlwp->l_mutex;
800 1.339 ad if (__predict_false(prevlwp->l_stat == LSZOMB)) {
801 1.339 ad membar_sync();
802 1.339 ad }
803 1.339 ad prevlwp->l_pflag &= ~LP_RUNNING;
804 1.339 ad mutex_spin_exit(lock);
805 1.207 ad
806 1.188 yamt /*
807 1.209 ad * Switched away - we have new curlwp.
808 1.209 ad * Restore VM context and IPL.
809 1.188 yamt */
810 1.209 ad pmap_activate(l);
811 1.288 rmind pcu_switchpoint(l);
812 1.265 rmind
813 1.209 ad /* Update status for lwpctl, if present. */
814 1.219 ad if (l->l_lwpctl != NULL) {
815 1.209 ad l->l_lwpctl->lc_curcpu = (int)cpu_index(ci);
816 1.219 ad l->l_lwpctl->lc_pctr++;
817 1.219 ad }
818 1.174 ad
819 1.300 yamt /*
820 1.335 ad * Normalize the spin mutex count and restore the previous
821 1.335 ad * SPL. Note that, unless the caller disabled preemption,
822 1.335 ad * we can be preempted at any time after this splx().
823 1.300 yamt */
824 1.331 ad KASSERT(l->l_cpu == ci);
825 1.335 ad KASSERT(ci->ci_mtx_count == -1);
826 1.335 ad ci->ci_mtx_count = 0;
827 1.329 ad splx(oldspl);
828 1.188 yamt } else {
829 1.188 yamt /* Nothing to do - just unlock and return. */
830 1.246 rmind mutex_spin_exit(spc->spc_mutex);
831 1.321 mlelstv l->l_pflag &= ~LP_PREEMPTING;
832 1.188 yamt lwp_unlock(l);
833 1.122 thorpej }
834 1.110 briggs
835 1.188 yamt KASSERT(l == curlwp);
836 1.188 yamt KASSERT(l->l_stat == LSONPROC);
837 1.188 yamt
838 1.180 dsl SYSCALL_TIME_WAKEUP(l);
839 1.188 yamt LOCKDEBUG_BARRIER(NULL, 1);
840 1.26 cgd }
841 1.26 cgd
842 1.26 cgd /*
843 1.271 rmind * setrunnable: change LWP state to be runnable, placing it on the run queue.
844 1.174 ad *
845 1.174 ad * Call with the process and LWP locked. Will return with the LWP unlocked.
846 1.26 cgd */
847 1.26 cgd void
848 1.122 thorpej setrunnable(struct lwp *l)
849 1.26 cgd {
850 1.122 thorpej struct proc *p = l->l_proc;
851 1.205 ad struct cpu_info *ci;
852 1.326 ad kmutex_t *oldlock;
853 1.26 cgd
854 1.188 yamt KASSERT((l->l_flag & LW_IDLE) == 0);
855 1.324 kamil KASSERT((l->l_flag & LW_DBGSUSPEND) == 0);
856 1.229 ad KASSERT(mutex_owned(p->p_lock));
857 1.183 ad KASSERT(lwp_locked(l, NULL));
858 1.205 ad KASSERT(l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex);
859 1.83 thorpej
860 1.122 thorpej switch (l->l_stat) {
861 1.122 thorpej case LSSTOP:
862 1.33 mycroft /*
863 1.33 mycroft * If we're being traced (possibly because someone attached us
864 1.33 mycroft * while we were stopped), check for a signal from the debugger.
865 1.33 mycroft */
866 1.310 christos if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xsig != 0)
867 1.174 ad signotify(l);
868 1.174 ad p->p_nrlwps++;
869 1.26 cgd break;
870 1.174 ad case LSSUSPENDED:
871 1.326 ad KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
872 1.178 pavel l->l_flag &= ~LW_WSUSPEND;
873 1.174 ad p->p_nrlwps++;
874 1.192 rmind cv_broadcast(&p->p_lwpcv);
875 1.122 thorpej break;
876 1.174 ad case LSSLEEP:
877 1.174 ad KASSERT(l->l_wchan != NULL);
878 1.26 cgd break;
879 1.326 ad case LSIDL:
880 1.326 ad KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
881 1.326 ad break;
882 1.174 ad default:
883 1.174 ad panic("setrunnable: lwp %p state was %d", l, l->l_stat);
884 1.26 cgd }
885 1.139 cl
886 1.174 ad /*
887 1.286 pooka * If the LWP was sleeping, start it again.
888 1.174 ad */
889 1.174 ad if (l->l_wchan != NULL) {
890 1.174 ad l->l_stat = LSSLEEP;
891 1.183 ad /* lwp_unsleep() will release the lock. */
892 1.221 ad lwp_unsleep(l, true);
893 1.174 ad return;
894 1.174 ad }
895 1.139 cl
896 1.174 ad /*
897 1.174 ad * If the LWP is still on the CPU, mark it as LSONPROC. It may be
898 1.174 ad * about to call mi_switch(), in which case it will yield.
899 1.174 ad */
900 1.339 ad if ((l->l_pflag & LP_RUNNING) != 0) {
901 1.174 ad l->l_stat = LSONPROC;
902 1.174 ad l->l_slptime = 0;
903 1.174 ad lwp_unlock(l);
904 1.174 ad return;
905 1.174 ad }
906 1.122 thorpej
907 1.174 ad /*
908 1.205 ad * Look for a CPU to run.
909 1.205 ad * Set the LWP runnable.
910 1.174 ad */
911 1.205 ad ci = sched_takecpu(l);
912 1.205 ad l->l_cpu = ci;
913 1.236 ad spc_lock(ci);
914 1.326 ad oldlock = lwp_setlock(l, l->l_cpu->ci_schedstate.spc_mutex);
915 1.188 yamt sched_setrunnable(l);
916 1.174 ad l->l_stat = LSRUN;
917 1.122 thorpej l->l_slptime = 0;
918 1.326 ad sched_enqueue(l);
919 1.326 ad sched_resched_lwp(l, true);
920 1.326 ad /* SPC & LWP now unlocked. */
921 1.326 ad mutex_spin_exit(oldlock);
922 1.26 cgd }
923 1.26 cgd
924 1.26 cgd /*
925 1.174 ad * suspendsched:
926 1.174 ad *
927 1.266 yamt * Convert all non-LW_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
928 1.174 ad */
929 1.94 bouyer void
930 1.174 ad suspendsched(void)
931 1.94 bouyer {
932 1.174 ad CPU_INFO_ITERATOR cii;
933 1.174 ad struct cpu_info *ci;
934 1.122 thorpej struct lwp *l;
935 1.174 ad struct proc *p;
936 1.94 bouyer
937 1.94 bouyer /*
938 1.174 ad * We do this by process in order not to violate the locking rules.
939 1.94 bouyer */
940 1.228 ad mutex_enter(proc_lock);
941 1.174 ad PROCLIST_FOREACH(p, &allproc) {
942 1.229 ad mutex_enter(p->p_lock);
943 1.178 pavel if ((p->p_flag & PK_SYSTEM) != 0) {
944 1.229 ad mutex_exit(p->p_lock);
945 1.94 bouyer continue;
946 1.174 ad }
947 1.174 ad
948 1.309 pgoyette if (p->p_stat != SSTOP) {
949 1.309 pgoyette if (p->p_stat != SZOMB && p->p_stat != SDEAD) {
950 1.309 pgoyette p->p_pptr->p_nstopchild++;
951 1.309 pgoyette p->p_waited = 0;
952 1.309 pgoyette }
953 1.309 pgoyette p->p_stat = SSTOP;
954 1.309 pgoyette }
955 1.174 ad
956 1.174 ad LIST_FOREACH(l, &p->p_lwps, l_sibling) {
957 1.174 ad if (l == curlwp)
958 1.174 ad continue;
959 1.174 ad
960 1.174 ad lwp_lock(l);
961 1.122 thorpej
962 1.97 enami /*
963 1.174 ad * Set L_WREBOOT so that the LWP will suspend itself
964 1.174 ad * when it tries to return to user mode. We want to
965 1.174 ad * try and get to get as many LWPs as possible to
966 1.174 ad * the user / kernel boundary, so that they will
967 1.174 ad * release any locks that they hold.
968 1.97 enami */
969 1.178 pavel l->l_flag |= (LW_WREBOOT | LW_WSUSPEND);
970 1.174 ad
971 1.174 ad if (l->l_stat == LSSLEEP &&
972 1.178 pavel (l->l_flag & LW_SINTR) != 0) {
973 1.174 ad /* setrunnable() will release the lock. */
974 1.174 ad setrunnable(l);
975 1.174 ad continue;
976 1.174 ad }
977 1.174 ad
978 1.174 ad lwp_unlock(l);
979 1.94 bouyer }
980 1.174 ad
981 1.229 ad mutex_exit(p->p_lock);
982 1.94 bouyer }
983 1.228 ad mutex_exit(proc_lock);
984 1.174 ad
985 1.174 ad /*
986 1.174 ad * Kick all CPUs to make them preempt any LWPs running in user mode.
987 1.326 ad * They'll trap into the kernel and suspend themselves in userret().
988 1.326 ad *
989 1.326 ad * Unusually, we don't hold any other scheduler object locked, which
990 1.326 ad * would keep preemption off for sched_resched_cpu(), so disable it
991 1.326 ad * explicitly.
992 1.174 ad */
993 1.326 ad kpreempt_disable();
994 1.204 ad for (CPU_INFO_FOREACH(cii, ci)) {
995 1.204 ad spc_lock(ci);
996 1.326 ad sched_resched_cpu(ci, PRI_KERNEL, true);
997 1.326 ad /* spc now unlocked */
998 1.204 ad }
999 1.326 ad kpreempt_enable();
1000 1.174 ad }
1001 1.174 ad
1002 1.174 ad /*
1003 1.174 ad * sched_unsleep:
1004 1.174 ad *
1005 1.174 ad * The is called when the LWP has not been awoken normally but instead
1006 1.174 ad * interrupted: for example, if the sleep timed out. Because of this,
1007 1.174 ad * it's not a valid action for running or idle LWPs.
1008 1.174 ad */
1009 1.271 rmind static void
1010 1.221 ad sched_unsleep(struct lwp *l, bool cleanup)
1011 1.174 ad {
1012 1.174 ad
1013 1.174 ad lwp_unlock(l);
1014 1.174 ad panic("sched_unsleep");
1015 1.174 ad }
1016 1.174 ad
1017 1.250 rmind static void
1018 1.326 ad sched_changepri(struct lwp *l, pri_t pri)
1019 1.188 yamt {
1020 1.326 ad struct schedstate_percpu *spc;
1021 1.326 ad struct cpu_info *ci;
1022 1.188 yamt
1023 1.250 rmind KASSERT(lwp_locked(l, NULL));
1024 1.188 yamt
1025 1.326 ad ci = l->l_cpu;
1026 1.326 ad spc = &ci->ci_schedstate;
1027 1.174 ad
1028 1.271 rmind if (l->l_stat == LSRUN) {
1029 1.326 ad KASSERT(lwp_locked(l, spc->spc_mutex));
1030 1.204 ad sched_dequeue(l);
1031 1.204 ad l->l_priority = pri;
1032 1.326 ad sched_enqueue(l);
1033 1.326 ad sched_resched_lwp(l, false);
1034 1.326 ad } else if (l->l_stat == LSONPROC && l->l_class != SCHED_OTHER) {
1035 1.326 ad /* On priority drop, only evict realtime LWPs. */
1036 1.326 ad KASSERT(lwp_locked(l, spc->spc_lwplock));
1037 1.326 ad l->l_priority = pri;
1038 1.326 ad spc_lock(ci);
1039 1.326 ad sched_resched_cpu(ci, spc->spc_maxpriority, true);
1040 1.326 ad /* spc now unlocked */
1041 1.204 ad } else {
1042 1.174 ad l->l_priority = pri;
1043 1.157 yamt }
1044 1.184 yamt }
1045 1.184 yamt
1046 1.188 yamt static void
1047 1.185 yamt sched_lendpri(struct lwp *l, pri_t pri)
1048 1.184 yamt {
1049 1.326 ad struct schedstate_percpu *spc;
1050 1.326 ad struct cpu_info *ci;
1051 1.184 yamt
1052 1.188 yamt KASSERT(lwp_locked(l, NULL));
1053 1.184 yamt
1054 1.326 ad ci = l->l_cpu;
1055 1.326 ad spc = &ci->ci_schedstate;
1056 1.326 ad
1057 1.271 rmind if (l->l_stat == LSRUN) {
1058 1.326 ad KASSERT(lwp_locked(l, spc->spc_mutex));
1059 1.204 ad sched_dequeue(l);
1060 1.204 ad l->l_inheritedprio = pri;
1061 1.311 christos l->l_auxprio = MAX(l->l_inheritedprio, l->l_protectprio);
1062 1.326 ad sched_enqueue(l);
1063 1.326 ad sched_resched_lwp(l, false);
1064 1.326 ad } else if (l->l_stat == LSONPROC && l->l_class != SCHED_OTHER) {
1065 1.326 ad /* On priority drop, only evict realtime LWPs. */
1066 1.326 ad KASSERT(lwp_locked(l, spc->spc_lwplock));
1067 1.326 ad l->l_inheritedprio = pri;
1068 1.326 ad l->l_auxprio = MAX(l->l_inheritedprio, l->l_protectprio);
1069 1.326 ad spc_lock(ci);
1070 1.326 ad sched_resched_cpu(ci, spc->spc_maxpriority, true);
1071 1.326 ad /* spc now unlocked */
1072 1.204 ad } else {
1073 1.184 yamt l->l_inheritedprio = pri;
1074 1.311 christos l->l_auxprio = MAX(l->l_inheritedprio, l->l_protectprio);
1075 1.184 yamt }
1076 1.184 yamt }
1077 1.184 yamt
1078 1.184 yamt struct lwp *
1079 1.184 yamt syncobj_noowner(wchan_t wchan)
1080 1.184 yamt {
1081 1.184 yamt
1082 1.184 yamt return NULL;
1083 1.151 yamt }
1084 1.151 yamt
1085 1.250 rmind /* Decay 95% of proc::p_pctcpu in 60 seconds, ccpu = exp(-1/20) */
1086 1.281 rmind const fixpt_t ccpu = 0.95122942450071400909 * FSCALE;
1087 1.281 rmind
1088 1.281 rmind /*
1089 1.281 rmind * Constants for averages over 1, 5 and 15 minutes when sampling at
1090 1.281 rmind * 5 second intervals.
1091 1.281 rmind */
1092 1.281 rmind static const fixpt_t cexp[ ] = {
1093 1.281 rmind 0.9200444146293232 * FSCALE, /* exp(-1/12) */
1094 1.281 rmind 0.9834714538216174 * FSCALE, /* exp(-1/60) */
1095 1.281 rmind 0.9944598480048967 * FSCALE, /* exp(-1/180) */
1096 1.281 rmind };
1097 1.134 matt
1098 1.134 matt /*
1099 1.188 yamt * sched_pstats:
1100 1.188 yamt *
1101 1.281 rmind * => Update process statistics and check CPU resource allocation.
1102 1.281 rmind * => Call scheduler-specific hook to eventually adjust LWP priorities.
1103 1.281 rmind * => Compute load average of a quantity on 1, 5 and 15 minute intervals.
1104 1.130 nathanw */
1105 1.113 gmcgarry void
1106 1.281 rmind sched_pstats(void)
1107 1.113 gmcgarry {
1108 1.281 rmind extern struct loadavg averunnable;
1109 1.281 rmind struct loadavg *avg = &averunnable;
1110 1.249 rmind const int clkhz = (stathz != 0 ? stathz : hz);
1111 1.281 rmind static bool backwards = false;
1112 1.281 rmind static u_int lavg_count = 0;
1113 1.188 yamt struct proc *p;
1114 1.281 rmind int nrun;
1115 1.113 gmcgarry
1116 1.188 yamt sched_pstats_ticks++;
1117 1.281 rmind if (++lavg_count >= 5) {
1118 1.281 rmind lavg_count = 0;
1119 1.281 rmind nrun = 0;
1120 1.281 rmind }
1121 1.228 ad mutex_enter(proc_lock);
1122 1.188 yamt PROCLIST_FOREACH(p, &allproc) {
1123 1.281 rmind struct lwp *l;
1124 1.281 rmind struct rlimit *rlim;
1125 1.296 dholland time_t runtm;
1126 1.281 rmind int sig;
1127 1.281 rmind
1128 1.271 rmind /* Increment sleep time (if sleeping), ignore overflow. */
1129 1.229 ad mutex_enter(p->p_lock);
1130 1.212 yamt runtm = p->p_rtime.sec;
1131 1.188 yamt LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1132 1.281 rmind fixpt_t lpctcpu;
1133 1.281 rmind u_int lcpticks;
1134 1.281 rmind
1135 1.249 rmind if (__predict_false((l->l_flag & LW_IDLE) != 0))
1136 1.188 yamt continue;
1137 1.188 yamt lwp_lock(l);
1138 1.212 yamt runtm += l->l_rtime.sec;
1139 1.188 yamt l->l_swtime++;
1140 1.242 rmind sched_lwp_stats(l);
1141 1.281 rmind
1142 1.281 rmind /* For load average calculation. */
1143 1.282 rmind if (__predict_false(lavg_count == 0) &&
1144 1.282 rmind (l->l_flag & (LW_SINTR | LW_SYSTEM)) == 0) {
1145 1.281 rmind switch (l->l_stat) {
1146 1.281 rmind case LSSLEEP:
1147 1.281 rmind if (l->l_slptime > 1) {
1148 1.281 rmind break;
1149 1.281 rmind }
1150 1.323 mrg /* FALLTHROUGH */
1151 1.281 rmind case LSRUN:
1152 1.281 rmind case LSONPROC:
1153 1.281 rmind case LSIDL:
1154 1.281 rmind nrun++;
1155 1.281 rmind }
1156 1.281 rmind }
1157 1.282 rmind lwp_unlock(l);
1158 1.282 rmind
1159 1.282 rmind l->l_pctcpu = (l->l_pctcpu * ccpu) >> FSHIFT;
1160 1.282 rmind if (l->l_slptime != 0)
1161 1.282 rmind continue;
1162 1.282 rmind
1163 1.282 rmind lpctcpu = l->l_pctcpu;
1164 1.282 rmind lcpticks = atomic_swap_uint(&l->l_cpticks, 0);
1165 1.282 rmind lpctcpu += ((FSCALE - ccpu) *
1166 1.282 rmind (lcpticks * FSCALE / clkhz)) >> FSHIFT;
1167 1.282 rmind l->l_pctcpu = lpctcpu;
1168 1.188 yamt }
1169 1.249 rmind /* Calculating p_pctcpu only for ps(1) */
1170 1.188 yamt p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
1171 1.174 ad
1172 1.303 christos if (__predict_false(runtm < 0)) {
1173 1.303 christos if (!backwards) {
1174 1.303 christos backwards = true;
1175 1.303 christos printf("WARNING: negative runtime; "
1176 1.303 christos "monotonic clock has gone backwards\n");
1177 1.303 christos }
1178 1.303 christos mutex_exit(p->p_lock);
1179 1.303 christos continue;
1180 1.303 christos }
1181 1.303 christos
1182 1.188 yamt /*
1183 1.188 yamt * Check if the process exceeds its CPU resource allocation.
1184 1.293 apb * If over the hard limit, kill it with SIGKILL.
1185 1.293 apb * If over the soft limit, send SIGXCPU and raise
1186 1.293 apb * the soft limit a little.
1187 1.188 yamt */
1188 1.188 yamt rlim = &p->p_rlimit[RLIMIT_CPU];
1189 1.188 yamt sig = 0;
1190 1.249 rmind if (__predict_false(runtm >= rlim->rlim_cur)) {
1191 1.293 apb if (runtm >= rlim->rlim_max) {
1192 1.188 yamt sig = SIGKILL;
1193 1.312 christos log(LOG_NOTICE,
1194 1.312 christos "pid %d, command %s, is killed: %s\n",
1195 1.312 christos p->p_pid, p->p_comm, "exceeded RLIMIT_CPU");
1196 1.293 apb uprintf("pid %d, command %s, is killed: %s\n",
1197 1.312 christos p->p_pid, p->p_comm, "exceeded RLIMIT_CPU");
1198 1.293 apb } else {
1199 1.188 yamt sig = SIGXCPU;
1200 1.188 yamt if (rlim->rlim_cur < rlim->rlim_max)
1201 1.188 yamt rlim->rlim_cur += 5;
1202 1.188 yamt }
1203 1.188 yamt }
1204 1.229 ad mutex_exit(p->p_lock);
1205 1.303 christos if (__predict_false(sig)) {
1206 1.259 rmind KASSERT((p->p_flag & PK_SYSTEM) == 0);
1207 1.188 yamt psignal(p, sig);
1208 1.259 rmind }
1209 1.174 ad }
1210 1.281 rmind
1211 1.281 rmind /* Load average calculation. */
1212 1.281 rmind if (__predict_false(lavg_count == 0)) {
1213 1.281 rmind int i;
1214 1.283 martin CTASSERT(__arraycount(cexp) == __arraycount(avg->ldavg));
1215 1.281 rmind for (i = 0; i < __arraycount(cexp); i++) {
1216 1.281 rmind avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
1217 1.281 rmind nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
1218 1.281 rmind }
1219 1.281 rmind }
1220 1.281 rmind
1221 1.281 rmind /* Lightning bolt. */
1222 1.273 pooka cv_broadcast(&lbolt);
1223 1.325 ad
1224 1.325 ad mutex_exit(proc_lock);
1225 1.113 gmcgarry }
1226