kern_synch.c revision 1.359 1 /* $NetBSD: kern_synch.c,v 1.359 2023/09/23 18:48:04 ad Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2009, 2019, 2020, 2023
5 * The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10 * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and
11 * Daniel Sieger.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 /*-
36 * Copyright (c) 1982, 1986, 1990, 1991, 1993
37 * The Regents of the University of California. All rights reserved.
38 * (c) UNIX System Laboratories, Inc.
39 * All or some portions of this file are derived from material licensed
40 * to the University of California by American Telephone and Telegraph
41 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
42 * the permission of UNIX System Laboratories, Inc.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 * 3. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * SUCH DAMAGE.
67 *
68 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
69 */
70
71 #include <sys/cdefs.h>
72 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.359 2023/09/23 18:48:04 ad Exp $");
73
74 #include "opt_kstack.h"
75 #include "opt_ddb.h"
76 #include "opt_dtrace.h"
77
78 #define __MUTEX_PRIVATE
79
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/proc.h>
83 #include <sys/kernel.h>
84 #include <sys/cpu.h>
85 #include <sys/pserialize.h>
86 #include <sys/resource.h>
87 #include <sys/resourcevar.h>
88 #include <sys/rwlock.h>
89 #include <sys/sched.h>
90 #include <sys/syscall_stats.h>
91 #include <sys/sleepq.h>
92 #include <sys/lockdebug.h>
93 #include <sys/evcnt.h>
94 #include <sys/intr.h>
95 #include <sys/lwpctl.h>
96 #include <sys/atomic.h>
97 #include <sys/syslog.h>
98
99 #include <uvm/uvm_extern.h>
100
101 #include <dev/lockstat.h>
102
103 #include <sys/dtrace_bsd.h>
104 int dtrace_vtime_active=0;
105 dtrace_vtime_switch_func_t dtrace_vtime_switch_func;
106
107 #ifdef DDB
108 #include <ddb/ddb.h>
109 #endif
110
111 static void sched_unsleep(struct lwp *, bool);
112 static void sched_changepri(struct lwp *, pri_t);
113 static void sched_lendpri(struct lwp *, pri_t);
114
115 syncobj_t sleep_syncobj = {
116 .sobj_name = "sleep",
117 .sobj_flag = SOBJ_SLEEPQ_SORTED,
118 .sobj_boostpri = PRI_KERNEL,
119 .sobj_unsleep = sleepq_unsleep,
120 .sobj_changepri = sleepq_changepri,
121 .sobj_lendpri = sleepq_lendpri,
122 .sobj_owner = syncobj_noowner,
123 };
124
125 syncobj_t sched_syncobj = {
126 .sobj_name = "sched",
127 .sobj_flag = SOBJ_SLEEPQ_SORTED,
128 .sobj_boostpri = PRI_USER,
129 .sobj_unsleep = sched_unsleep,
130 .sobj_changepri = sched_changepri,
131 .sobj_lendpri = sched_lendpri,
132 .sobj_owner = syncobj_noowner,
133 };
134
135 syncobj_t kpause_syncobj = {
136 .sobj_name = "kpause",
137 .sobj_flag = SOBJ_SLEEPQ_NULL,
138 .sobj_boostpri = PRI_KERNEL,
139 .sobj_unsleep = sleepq_unsleep,
140 .sobj_changepri = sleepq_changepri,
141 .sobj_lendpri = sleepq_lendpri,
142 .sobj_owner = syncobj_noowner,
143 };
144
145 /* "Lightning bolt": once a second sleep address. */
146 kcondvar_t lbolt __cacheline_aligned;
147
148 u_int sched_pstats_ticks __cacheline_aligned;
149
150 /* Preemption event counters. */
151 static struct evcnt kpreempt_ev_crit __cacheline_aligned;
152 static struct evcnt kpreempt_ev_klock __cacheline_aligned;
153 static struct evcnt kpreempt_ev_immed __cacheline_aligned;
154
155 void
156 synch_init(void)
157 {
158
159 cv_init(&lbolt, "lbolt");
160
161 evcnt_attach_dynamic(&kpreempt_ev_crit, EVCNT_TYPE_MISC, NULL,
162 "kpreempt", "defer: critical section");
163 evcnt_attach_dynamic(&kpreempt_ev_klock, EVCNT_TYPE_MISC, NULL,
164 "kpreempt", "defer: kernel_lock");
165 evcnt_attach_dynamic(&kpreempt_ev_immed, EVCNT_TYPE_MISC, NULL,
166 "kpreempt", "immediate");
167 }
168
169 /*
170 * OBSOLETE INTERFACE
171 *
172 * General sleep call. Suspends the current LWP until a wakeup is
173 * performed on the specified identifier. The LWP will then be made
174 * runnable with the specified priority. Sleeps at most timo/hz seconds (0
175 * means no timeout). If pri includes PCATCH flag, signals are checked
176 * before and after sleeping, else signals are not checked. Returns 0 if
177 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
178 * signal needs to be delivered, ERESTART is returned if the current system
179 * call should be restarted if possible, and EINTR is returned if the system
180 * call should be interrupted by the signal (return EINTR).
181 */
182 int
183 tsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo)
184 {
185 struct lwp *l = curlwp;
186 sleepq_t *sq;
187 kmutex_t *mp;
188 bool catch_p;
189
190 KASSERT((l->l_pflag & LP_INTR) == 0);
191 KASSERT(ident != &lbolt);
192 //KASSERT(KERNEL_LOCKED_P());
193
194 if (sleepq_dontsleep(l)) {
195 (void)sleepq_abort(NULL, 0);
196 return 0;
197 }
198
199 catch_p = priority & PCATCH;
200 sq = sleeptab_lookup(&sleeptab, ident, &mp);
201 sleepq_enter(sq, l, mp);
202 sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj, catch_p);
203 return sleepq_block(timo, catch_p, &sleep_syncobj);
204 }
205
206 int
207 mtsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
208 kmutex_t *mtx)
209 {
210 struct lwp *l = curlwp;
211 sleepq_t *sq;
212 kmutex_t *mp;
213 bool catch_p;
214 int error;
215
216 KASSERT((l->l_pflag & LP_INTR) == 0);
217 KASSERT(ident != &lbolt);
218
219 if (sleepq_dontsleep(l)) {
220 (void)sleepq_abort(mtx, (priority & PNORELOCK) != 0);
221 return 0;
222 }
223
224 catch_p = priority & PCATCH;
225 sq = sleeptab_lookup(&sleeptab, ident, &mp);
226 sleepq_enter(sq, l, mp);
227 sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj, catch_p);
228 mutex_exit(mtx);
229 error = sleepq_block(timo, catch_p, &sleep_syncobj);
230
231 if ((priority & PNORELOCK) == 0)
232 mutex_enter(mtx);
233
234 return error;
235 }
236
237 /*
238 * General sleep call for situations where a wake-up is not expected.
239 */
240 int
241 kpause(const char *wmesg, bool intr, int timo, kmutex_t *mtx)
242 {
243 struct lwp *l = curlwp;
244 int error;
245
246 KASSERT(timo != 0 || intr);
247
248 if (sleepq_dontsleep(l))
249 return sleepq_abort(NULL, 0);
250
251 if (mtx != NULL)
252 mutex_exit(mtx);
253 lwp_lock(l);
254 KERNEL_UNLOCK_ALL(NULL, &l->l_biglocks);
255 sleepq_enqueue(NULL, l, wmesg, &kpause_syncobj, intr);
256 error = sleepq_block(timo, intr, &kpause_syncobj);
257 if (mtx != NULL)
258 mutex_enter(mtx);
259
260 return error;
261 }
262
263 /*
264 * OBSOLETE INTERFACE
265 *
266 * Make all LWPs sleeping on the specified identifier runnable.
267 */
268 void
269 wakeup(wchan_t ident)
270 {
271 sleepq_t *sq;
272 kmutex_t *mp;
273
274 if (__predict_false(cold))
275 return;
276
277 sq = sleeptab_lookup(&sleeptab, ident, &mp);
278 sleepq_wake(sq, ident, (u_int)-1, mp);
279 }
280
281 /*
282 * General yield call. Puts the current LWP back on its run queue and
283 * performs a context switch.
284 */
285 void
286 yield(void)
287 {
288 struct lwp *l = curlwp;
289
290 KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
291 lwp_lock(l);
292
293 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
294 KASSERT(l->l_stat == LSONPROC);
295
296 spc_lock(l->l_cpu);
297 mi_switch(l);
298 KERNEL_LOCK(l->l_biglocks, l);
299 }
300
301 /*
302 * General preemption call. Puts the current LWP back on its run queue
303 * and performs an involuntary context switch. Different from yield()
304 * in that:
305 *
306 * - It's counted differently (involuntary vs. voluntary).
307 * - Realtime threads go to the head of their runqueue vs. tail for yield().
308 */
309 void
310 preempt(void)
311 {
312 struct lwp *l = curlwp;
313
314 KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
315 lwp_lock(l);
316
317 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
318 KASSERT(l->l_stat == LSONPROC);
319
320 spc_lock(l->l_cpu);
321 l->l_pflag |= LP_PREEMPTING;
322 mi_switch(l);
323 KERNEL_LOCK(l->l_biglocks, l);
324 }
325
326 /*
327 * Return true if the current LWP should yield the processor. Intended to
328 * be used by long-running code in kernel.
329 */
330 inline bool
331 preempt_needed(void)
332 {
333 lwp_t *l = curlwp;
334 int needed;
335
336 KPREEMPT_DISABLE(l);
337 needed = l->l_cpu->ci_want_resched;
338 KPREEMPT_ENABLE(l);
339
340 return (needed != 0);
341 }
342
343 /*
344 * A breathing point for long running code in kernel.
345 */
346 void
347 preempt_point(void)
348 {
349
350 if (__predict_false(preempt_needed())) {
351 preempt();
352 }
353 }
354
355 /*
356 * Handle a request made by another agent to preempt the current LWP
357 * in-kernel. Usually called when l_dopreempt may be non-zero.
358 *
359 * Character addresses for lockstat only.
360 */
361 static char kpreempt_is_disabled;
362 static char kernel_lock_held;
363 static char is_softint_lwp;
364 static char spl_is_raised;
365
366 bool
367 kpreempt(uintptr_t where)
368 {
369 uintptr_t failed;
370 lwp_t *l;
371 int s, dop, lsflag;
372
373 l = curlwp;
374 failed = 0;
375 while ((dop = l->l_dopreempt) != 0) {
376 if (l->l_stat != LSONPROC) {
377 /*
378 * About to block (or die), let it happen.
379 * Doesn't really count as "preemption has
380 * been blocked", since we're going to
381 * context switch.
382 */
383 atomic_swap_uint(&l->l_dopreempt, 0);
384 return true;
385 }
386 KASSERT((l->l_flag & LW_IDLE) == 0);
387 if (__predict_false(l->l_nopreempt != 0)) {
388 /* LWP holds preemption disabled, explicitly. */
389 if ((dop & DOPREEMPT_COUNTED) == 0) {
390 kpreempt_ev_crit.ev_count++;
391 }
392 failed = (uintptr_t)&kpreempt_is_disabled;
393 break;
394 }
395 if (__predict_false((l->l_pflag & LP_INTR) != 0)) {
396 /* Can't preempt soft interrupts yet. */
397 atomic_swap_uint(&l->l_dopreempt, 0);
398 failed = (uintptr_t)&is_softint_lwp;
399 break;
400 }
401 s = splsched();
402 if (__predict_false(l->l_blcnt != 0 ||
403 curcpu()->ci_biglock_wanted != NULL)) {
404 /* Hold or want kernel_lock, code is not MT safe. */
405 splx(s);
406 if ((dop & DOPREEMPT_COUNTED) == 0) {
407 kpreempt_ev_klock.ev_count++;
408 }
409 failed = (uintptr_t)&kernel_lock_held;
410 break;
411 }
412 if (__predict_false(!cpu_kpreempt_enter(where, s))) {
413 /*
414 * It may be that the IPL is too high.
415 * kpreempt_enter() can schedule an
416 * interrupt to retry later.
417 */
418 splx(s);
419 failed = (uintptr_t)&spl_is_raised;
420 break;
421 }
422 /* Do it! */
423 if (__predict_true((dop & DOPREEMPT_COUNTED) == 0)) {
424 kpreempt_ev_immed.ev_count++;
425 }
426 lwp_lock(l);
427 l->l_pflag |= LP_PREEMPTING;
428 spc_lock(l->l_cpu);
429 mi_switch(l);
430 l->l_nopreempt++;
431 splx(s);
432
433 /* Take care of any MD cleanup. */
434 cpu_kpreempt_exit(where);
435 l->l_nopreempt--;
436 }
437
438 if (__predict_true(!failed)) {
439 return false;
440 }
441
442 /* Record preemption failure for reporting via lockstat. */
443 atomic_or_uint(&l->l_dopreempt, DOPREEMPT_COUNTED);
444 lsflag = 0;
445 LOCKSTAT_ENTER(lsflag);
446 if (__predict_false(lsflag)) {
447 if (where == 0) {
448 where = (uintptr_t)__builtin_return_address(0);
449 }
450 /* Preemption is on, might recurse, so make it atomic. */
451 if (atomic_cas_ptr_ni((void *)&l->l_pfailaddr, NULL,
452 (void *)where) == NULL) {
453 LOCKSTAT_START_TIMER(lsflag, l->l_pfailtime);
454 l->l_pfaillock = failed;
455 }
456 }
457 LOCKSTAT_EXIT(lsflag);
458 return true;
459 }
460
461 /*
462 * Return true if preemption is explicitly disabled.
463 */
464 bool
465 kpreempt_disabled(void)
466 {
467 const lwp_t *l = curlwp;
468
469 return l->l_nopreempt != 0 || l->l_stat == LSZOMB ||
470 (l->l_flag & LW_IDLE) != 0 || (l->l_pflag & LP_INTR) != 0 ||
471 cpu_kpreempt_disabled();
472 }
473
474 /*
475 * Disable kernel preemption.
476 */
477 void
478 kpreempt_disable(void)
479 {
480
481 KPREEMPT_DISABLE(curlwp);
482 }
483
484 /*
485 * Reenable kernel preemption.
486 */
487 void
488 kpreempt_enable(void)
489 {
490
491 KPREEMPT_ENABLE(curlwp);
492 }
493
494 /*
495 * Compute the amount of time during which the current lwp was running.
496 *
497 * - update l_rtime unless it's an idle lwp.
498 */
499
500 void
501 updatertime(lwp_t *l, const struct bintime *now)
502 {
503 static bool backwards = false;
504
505 if (__predict_false(l->l_flag & LW_IDLE))
506 return;
507
508 if (__predict_false(bintimecmp(now, &l->l_stime, <)) && !backwards) {
509 char caller[128];
510
511 #ifdef DDB
512 db_symstr(caller, sizeof(caller),
513 (db_expr_t)(intptr_t)__builtin_return_address(0),
514 DB_STGY_PROC);
515 #else
516 snprintf(caller, sizeof(caller), "%p",
517 __builtin_return_address(0));
518 #endif
519 backwards = true;
520 printf("WARNING: lwp %ld (%s%s%s) flags 0x%x:"
521 " timecounter went backwards"
522 " from (%jd + 0x%016"PRIx64"/2^64) sec"
523 " to (%jd + 0x%016"PRIx64"/2^64) sec"
524 " in %s\n",
525 (long)l->l_lid,
526 l->l_proc->p_comm,
527 l->l_name ? " " : "",
528 l->l_name ? l->l_name : "",
529 l->l_pflag,
530 (intmax_t)l->l_stime.sec, l->l_stime.frac,
531 (intmax_t)now->sec, now->frac,
532 caller);
533 }
534
535 /* rtime += now - stime */
536 bintime_add(&l->l_rtime, now);
537 bintime_sub(&l->l_rtime, &l->l_stime);
538 }
539
540 /*
541 * Select next LWP from the current CPU to run..
542 */
543 static inline lwp_t *
544 nextlwp(struct cpu_info *ci, struct schedstate_percpu *spc)
545 {
546 lwp_t *newl;
547
548 /*
549 * Let sched_nextlwp() select the LWP to run the CPU next.
550 * If no LWP is runnable, select the idle LWP.
551 *
552 * On arrival here LWPs on a run queue are locked by spc_mutex which
553 * is currently held. Idle LWPs are always locked by spc_lwplock,
554 * which may or may not be held here. On exit from this code block,
555 * in all cases newl is locked by spc_lwplock.
556 */
557 newl = sched_nextlwp();
558 if (newl != NULL) {
559 sched_dequeue(newl);
560 KASSERT(lwp_locked(newl, spc->spc_mutex));
561 KASSERT(newl->l_cpu == ci);
562 newl->l_stat = LSONPROC;
563 newl->l_pflag |= LP_RUNNING;
564 spc->spc_curpriority = lwp_eprio(newl);
565 spc->spc_flags &= ~(SPCF_SWITCHCLEAR | SPCF_IDLE);
566 lwp_setlock(newl, spc->spc_lwplock);
567 } else {
568 /*
569 * The idle LWP does not get set to LSONPROC, because
570 * otherwise it screws up the output from top(1) etc.
571 */
572 newl = ci->ci_data.cpu_idlelwp;
573 newl->l_pflag |= LP_RUNNING;
574 spc->spc_curpriority = PRI_IDLE;
575 spc->spc_flags = (spc->spc_flags & ~SPCF_SWITCHCLEAR) |
576 SPCF_IDLE;
577 }
578
579 /*
580 * Only clear want_resched if there are no pending (slow) software
581 * interrupts. We can do this without an atomic, because no new
582 * LWPs can appear in the queue due to our hold on spc_mutex, and
583 * the update to ci_want_resched will become globally visible before
584 * the release of spc_mutex becomes globally visible.
585 */
586 if (ci->ci_data.cpu_softints == 0)
587 ci->ci_want_resched = 0;
588
589 return newl;
590 }
591
592 /*
593 * The machine independent parts of context switch.
594 *
595 * NOTE: l->l_cpu is not changed in this routine, because an LWP never
596 * changes its own l_cpu (that would screw up curcpu on many ports and could
597 * cause all kinds of other evil stuff). l_cpu is always changed by some
598 * other actor, when it's known the LWP is not running (the LP_RUNNING flag
599 * is checked under lock).
600 */
601 void
602 mi_switch(lwp_t *l)
603 {
604 struct cpu_info *ci;
605 struct schedstate_percpu *spc;
606 struct lwp *newl;
607 kmutex_t *lock;
608 int oldspl;
609 struct bintime bt;
610 bool returning;
611
612 KASSERT(lwp_locked(l, NULL));
613 KASSERT(kpreempt_disabled());
614 KASSERT(mutex_owned(curcpu()->ci_schedstate.spc_mutex));
615 KASSERTMSG(l->l_blcnt == 0, "kernel_lock leaked");
616
617 kstack_check_magic(l);
618
619 binuptime(&bt);
620
621 KASSERTMSG(l == curlwp, "l %p curlwp %p", l, curlwp);
622 KASSERT((l->l_pflag & LP_RUNNING) != 0);
623 KASSERT(l->l_cpu == curcpu() || l->l_stat == LSRUN);
624 ci = curcpu();
625 spc = &ci->ci_schedstate;
626 returning = false;
627 newl = NULL;
628
629 /*
630 * If we have been asked to switch to a specific LWP, then there
631 * is no need to inspect the run queues. If a soft interrupt is
632 * blocking, then return to the interrupted thread without adjusting
633 * VM context or its start time: neither have been changed in order
634 * to take the interrupt.
635 */
636 if (l->l_switchto != NULL) {
637 if ((l->l_pflag & LP_INTR) != 0) {
638 returning = true;
639 softint_block(l);
640 if ((l->l_pflag & LP_TIMEINTR) != 0)
641 updatertime(l, &bt);
642 }
643 newl = l->l_switchto;
644 l->l_switchto = NULL;
645 }
646 #ifndef __HAVE_FAST_SOFTINTS
647 else if (ci->ci_data.cpu_softints != 0) {
648 /* There are pending soft interrupts, so pick one. */
649 newl = softint_picklwp();
650 newl->l_stat = LSONPROC;
651 newl->l_pflag |= LP_RUNNING;
652 }
653 #endif /* !__HAVE_FAST_SOFTINTS */
654
655 /*
656 * If on the CPU and we have gotten this far, then we must yield.
657 */
658 if (l->l_stat == LSONPROC && l != newl) {
659 KASSERT(lwp_locked(l, spc->spc_lwplock));
660 KASSERT((l->l_flag & LW_IDLE) == 0);
661 l->l_stat = LSRUN;
662 lwp_setlock(l, spc->spc_mutex);
663 sched_enqueue(l);
664 sched_preempted(l);
665
666 /*
667 * Handle migration. Note that "migrating LWP" may
668 * be reset here, if interrupt/preemption happens
669 * early in idle LWP.
670 */
671 if (l->l_target_cpu != NULL && (l->l_pflag & LP_BOUND) == 0) {
672 KASSERT((l->l_pflag & LP_INTR) == 0);
673 spc->spc_migrating = l;
674 }
675 }
676
677 /* Pick new LWP to run. */
678 if (newl == NULL) {
679 newl = nextlwp(ci, spc);
680 }
681
682 /* Items that must be updated with the CPU locked. */
683 if (!returning) {
684 /* Count time spent in current system call */
685 SYSCALL_TIME_SLEEP(l);
686
687 updatertime(l, &bt);
688
689 /* Update the new LWP's start time. */
690 newl->l_stime = bt;
691
692 /*
693 * ci_curlwp changes when a fast soft interrupt occurs.
694 * We use ci_onproc to keep track of which kernel or
695 * user thread is running 'underneath' the software
696 * interrupt. This is important for time accounting,
697 * itimers and forcing user threads to preempt (aston).
698 */
699 ci->ci_onproc = newl;
700 }
701
702 /*
703 * Preemption related tasks. Must be done holding spc_mutex. Clear
704 * l_dopreempt without an atomic - it's only ever set non-zero by
705 * sched_resched_cpu() which also holds spc_mutex, and only ever
706 * cleared by the LWP itself (us) with atomics when not under lock.
707 */
708 l->l_dopreempt = 0;
709 if (__predict_false(l->l_pfailaddr != 0)) {
710 LOCKSTAT_FLAG(lsflag);
711 LOCKSTAT_ENTER(lsflag);
712 LOCKSTAT_STOP_TIMER(lsflag, l->l_pfailtime);
713 LOCKSTAT_EVENT_RA(lsflag, l->l_pfaillock, LB_NOPREEMPT|LB_SPIN,
714 1, l->l_pfailtime, l->l_pfailaddr);
715 LOCKSTAT_EXIT(lsflag);
716 l->l_pfailtime = 0;
717 l->l_pfaillock = 0;
718 l->l_pfailaddr = 0;
719 }
720
721 if (l != newl) {
722 struct lwp *prevlwp;
723
724 /* Release all locks, but leave the current LWP locked */
725 if (l->l_mutex == spc->spc_mutex) {
726 /*
727 * Drop spc_lwplock, if the current LWP has been moved
728 * to the run queue (it is now locked by spc_mutex).
729 */
730 mutex_spin_exit(spc->spc_lwplock);
731 } else {
732 /*
733 * Otherwise, drop the spc_mutex, we are done with the
734 * run queues.
735 */
736 mutex_spin_exit(spc->spc_mutex);
737 }
738
739 /* We're down to only one lock, so do debug checks. */
740 LOCKDEBUG_BARRIER(l->l_mutex, 1);
741
742 /* Count the context switch. */
743 CPU_COUNT(CPU_COUNT_NSWTCH, 1);
744 l->l_ncsw++;
745 if ((l->l_pflag & LP_PREEMPTING) != 0) {
746 l->l_nivcsw++;
747 l->l_pflag &= ~LP_PREEMPTING;
748 }
749
750 /*
751 * Increase the count of spin-mutexes before the release
752 * of the last lock - we must remain at IPL_SCHED after
753 * releasing the lock.
754 */
755 KASSERTMSG(ci->ci_mtx_count == -1,
756 "%s: cpu%u: ci_mtx_count (%d) != -1 "
757 "(block with spin-mutex held)",
758 __func__, cpu_index(ci), ci->ci_mtx_count);
759 oldspl = MUTEX_SPIN_OLDSPL(ci);
760 ci->ci_mtx_count = -2;
761
762 /* Update status for lwpctl, if present. */
763 if (l->l_lwpctl != NULL) {
764 l->l_lwpctl->lc_curcpu = (l->l_stat == LSZOMB ?
765 LWPCTL_CPU_EXITED : LWPCTL_CPU_NONE);
766 }
767
768 /*
769 * If curlwp is a soft interrupt LWP, there's nobody on the
770 * other side to unlock - we're returning into an assembly
771 * trampoline. Unlock now. This is safe because this is a
772 * kernel LWP and is bound to current CPU: the worst anyone
773 * else will do to it, is to put it back onto this CPU's run
774 * queue (and the CPU is busy here right now!).
775 */
776 if (returning) {
777 /* Keep IPL_SCHED after this; MD code will fix up. */
778 l->l_pflag &= ~LP_RUNNING;
779 lwp_unlock(l);
780 } else {
781 /* A normal LWP: save old VM context. */
782 pmap_deactivate(l);
783 }
784
785 /*
786 * If DTrace has set the active vtime enum to anything
787 * other than INACTIVE (0), then it should have set the
788 * function to call.
789 */
790 if (__predict_false(dtrace_vtime_active)) {
791 (*dtrace_vtime_switch_func)(newl);
792 }
793
794 /*
795 * We must ensure not to come here from inside a read section.
796 */
797 KASSERT(pserialize_not_in_read_section());
798
799 /* Switch to the new LWP.. */
800 #ifdef MULTIPROCESSOR
801 KASSERT(curlwp == ci->ci_curlwp);
802 #endif
803 KASSERTMSG(l == curlwp, "l %p curlwp %p", l, curlwp);
804 prevlwp = cpu_switchto(l, newl, returning);
805 ci = curcpu();
806 #ifdef MULTIPROCESSOR
807 KASSERT(curlwp == ci->ci_curlwp);
808 #endif
809 KASSERTMSG(l == curlwp, "l %p curlwp %p prevlwp %p",
810 l, curlwp, prevlwp);
811 KASSERT(prevlwp != NULL);
812 KASSERT(l->l_cpu == ci);
813 KASSERT(ci->ci_mtx_count == -2);
814
815 /*
816 * Immediately mark the previous LWP as no longer running
817 * and unlock (to keep lock wait times short as possible).
818 * We'll still be at IPL_SCHED afterwards. If a zombie,
819 * don't touch after clearing LP_RUNNING as it could be
820 * reaped by another CPU. Issue a memory barrier to ensure
821 * this.
822 *
823 * atomic_store_release matches atomic_load_acquire in
824 * lwp_free.
825 */
826 KASSERT((prevlwp->l_pflag & LP_RUNNING) != 0);
827 lock = prevlwp->l_mutex;
828 if (__predict_false(prevlwp->l_stat == LSZOMB)) {
829 atomic_store_release(&prevlwp->l_pflag,
830 prevlwp->l_pflag & ~LP_RUNNING);
831 } else {
832 prevlwp->l_pflag &= ~LP_RUNNING;
833 }
834 mutex_spin_exit(lock);
835
836 /*
837 * Switched away - we have new curlwp.
838 * Restore VM context and IPL.
839 */
840 pmap_activate(l);
841 pcu_switchpoint(l);
842
843 /* Update status for lwpctl, if present. */
844 if (l->l_lwpctl != NULL) {
845 l->l_lwpctl->lc_curcpu = (int)cpu_index(ci);
846 l->l_lwpctl->lc_pctr++;
847 }
848
849 /*
850 * Normalize the spin mutex count and restore the previous
851 * SPL. Note that, unless the caller disabled preemption,
852 * we can be preempted at any time after this splx().
853 */
854 KASSERT(l->l_cpu == ci);
855 KASSERT(ci->ci_mtx_count == -1);
856 ci->ci_mtx_count = 0;
857 splx(oldspl);
858 } else {
859 /* Nothing to do - just unlock and return. */
860 mutex_spin_exit(spc->spc_mutex);
861 l->l_pflag &= ~LP_PREEMPTING;
862 lwp_unlock(l);
863 }
864
865 KASSERT(l == curlwp);
866 KASSERT(l->l_stat == LSONPROC || (l->l_flag & LW_IDLE) != 0);
867
868 SYSCALL_TIME_WAKEUP(l);
869 LOCKDEBUG_BARRIER(NULL, 1);
870 }
871
872 /*
873 * setrunnable: change LWP state to be runnable, placing it on the run queue.
874 *
875 * Call with the process and LWP locked. Will return with the LWP unlocked.
876 */
877 void
878 setrunnable(struct lwp *l)
879 {
880 struct proc *p = l->l_proc;
881 struct cpu_info *ci;
882 kmutex_t *oldlock;
883
884 KASSERT((l->l_flag & LW_IDLE) == 0);
885 KASSERT((l->l_flag & LW_DBGSUSPEND) == 0);
886 KASSERT(mutex_owned(p->p_lock));
887 KASSERT(lwp_locked(l, NULL));
888 KASSERT(l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex);
889
890 switch (l->l_stat) {
891 case LSSTOP:
892 /*
893 * If we're being traced (possibly because someone attached us
894 * while we were stopped), check for a signal from the debugger.
895 */
896 if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xsig != 0)
897 signotify(l);
898 p->p_nrlwps++;
899 break;
900 case LSSUSPENDED:
901 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
902 l->l_flag &= ~LW_WSUSPEND;
903 p->p_nrlwps++;
904 cv_broadcast(&p->p_lwpcv);
905 break;
906 case LSSLEEP:
907 KASSERT(l->l_wchan != NULL);
908 break;
909 case LSIDL:
910 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
911 break;
912 default:
913 panic("setrunnable: lwp %p state was %d", l, l->l_stat);
914 }
915
916 /*
917 * If the LWP was sleeping, start it again.
918 */
919 if (l->l_wchan != NULL) {
920 l->l_stat = LSSLEEP;
921 /* lwp_unsleep() will release the lock. */
922 lwp_unsleep(l, true);
923 return;
924 }
925
926 /*
927 * If the LWP is still on the CPU, mark it as LSONPROC. It may be
928 * about to call mi_switch(), in which case it will yield.
929 */
930 if ((l->l_pflag & LP_RUNNING) != 0) {
931 l->l_stat = LSONPROC;
932 l->l_slptime = 0;
933 lwp_unlock(l);
934 return;
935 }
936
937 /*
938 * Look for a CPU to run.
939 * Set the LWP runnable.
940 */
941 ci = sched_takecpu(l);
942 l->l_cpu = ci;
943 spc_lock(ci);
944 oldlock = lwp_setlock(l, l->l_cpu->ci_schedstate.spc_mutex);
945 sched_setrunnable(l);
946 l->l_stat = LSRUN;
947 l->l_slptime = 0;
948 sched_enqueue(l);
949 sched_resched_lwp(l, true);
950 /* SPC & LWP now unlocked. */
951 mutex_spin_exit(oldlock);
952 }
953
954 /*
955 * suspendsched:
956 *
957 * Convert all non-LW_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
958 */
959 void
960 suspendsched(void)
961 {
962 CPU_INFO_ITERATOR cii;
963 struct cpu_info *ci;
964 struct lwp *l;
965 struct proc *p;
966
967 /*
968 * We do this by process in order not to violate the locking rules.
969 */
970 mutex_enter(&proc_lock);
971 PROCLIST_FOREACH(p, &allproc) {
972 mutex_enter(p->p_lock);
973 if ((p->p_flag & PK_SYSTEM) != 0) {
974 mutex_exit(p->p_lock);
975 continue;
976 }
977
978 if (p->p_stat != SSTOP) {
979 if (p->p_stat != SZOMB && p->p_stat != SDEAD) {
980 p->p_pptr->p_nstopchild++;
981 p->p_waited = 0;
982 }
983 p->p_stat = SSTOP;
984 }
985
986 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
987 if (l == curlwp)
988 continue;
989
990 lwp_lock(l);
991
992 /*
993 * Set L_WREBOOT so that the LWP will suspend itself
994 * when it tries to return to user mode. We want to
995 * try and get to get as many LWPs as possible to
996 * the user / kernel boundary, so that they will
997 * release any locks that they hold.
998 */
999 l->l_flag |= (LW_WREBOOT | LW_WSUSPEND);
1000
1001 if (l->l_stat == LSSLEEP &&
1002 (l->l_flag & LW_SINTR) != 0) {
1003 /* setrunnable() will release the lock. */
1004 setrunnable(l);
1005 continue;
1006 }
1007
1008 lwp_unlock(l);
1009 }
1010
1011 mutex_exit(p->p_lock);
1012 }
1013 mutex_exit(&proc_lock);
1014
1015 /*
1016 * Kick all CPUs to make them preempt any LWPs running in user mode.
1017 * They'll trap into the kernel and suspend themselves in userret().
1018 *
1019 * Unusually, we don't hold any other scheduler object locked, which
1020 * would keep preemption off for sched_resched_cpu(), so disable it
1021 * explicitly.
1022 */
1023 kpreempt_disable();
1024 for (CPU_INFO_FOREACH(cii, ci)) {
1025 spc_lock(ci);
1026 sched_resched_cpu(ci, PRI_KERNEL, true);
1027 /* spc now unlocked */
1028 }
1029 kpreempt_enable();
1030 }
1031
1032 /*
1033 * sched_unsleep:
1034 *
1035 * The is called when the LWP has not been awoken normally but instead
1036 * interrupted: for example, if the sleep timed out. Because of this,
1037 * it's not a valid action for running or idle LWPs.
1038 */
1039 static void
1040 sched_unsleep(struct lwp *l, bool cleanup)
1041 {
1042
1043 lwp_unlock(l);
1044 panic("sched_unsleep");
1045 }
1046
1047 static void
1048 sched_changepri(struct lwp *l, pri_t pri)
1049 {
1050 struct schedstate_percpu *spc;
1051 struct cpu_info *ci;
1052
1053 KASSERT(lwp_locked(l, NULL));
1054
1055 ci = l->l_cpu;
1056 spc = &ci->ci_schedstate;
1057
1058 if (l->l_stat == LSRUN) {
1059 KASSERT(lwp_locked(l, spc->spc_mutex));
1060 sched_dequeue(l);
1061 l->l_priority = pri;
1062 sched_enqueue(l);
1063 sched_resched_lwp(l, false);
1064 } else if (l->l_stat == LSONPROC && l->l_class != SCHED_OTHER) {
1065 /* On priority drop, only evict realtime LWPs. */
1066 KASSERT(lwp_locked(l, spc->spc_lwplock));
1067 l->l_priority = pri;
1068 spc_lock(ci);
1069 sched_resched_cpu(ci, spc->spc_maxpriority, true);
1070 /* spc now unlocked */
1071 } else {
1072 l->l_priority = pri;
1073 }
1074 }
1075
1076 static void
1077 sched_lendpri(struct lwp *l, pri_t pri)
1078 {
1079 struct schedstate_percpu *spc;
1080 struct cpu_info *ci;
1081
1082 KASSERT(lwp_locked(l, NULL));
1083
1084 ci = l->l_cpu;
1085 spc = &ci->ci_schedstate;
1086
1087 if (l->l_stat == LSRUN) {
1088 KASSERT(lwp_locked(l, spc->spc_mutex));
1089 sched_dequeue(l);
1090 l->l_inheritedprio = pri;
1091 l->l_auxprio = MAX(l->l_inheritedprio, l->l_protectprio);
1092 sched_enqueue(l);
1093 sched_resched_lwp(l, false);
1094 } else if (l->l_stat == LSONPROC && l->l_class != SCHED_OTHER) {
1095 /* On priority drop, only evict realtime LWPs. */
1096 KASSERT(lwp_locked(l, spc->spc_lwplock));
1097 l->l_inheritedprio = pri;
1098 l->l_auxprio = MAX(l->l_inheritedprio, l->l_protectprio);
1099 spc_lock(ci);
1100 sched_resched_cpu(ci, spc->spc_maxpriority, true);
1101 /* spc now unlocked */
1102 } else {
1103 l->l_inheritedprio = pri;
1104 l->l_auxprio = MAX(l->l_inheritedprio, l->l_protectprio);
1105 }
1106 }
1107
1108 struct lwp *
1109 syncobj_noowner(wchan_t wchan)
1110 {
1111
1112 return NULL;
1113 }
1114
1115 /* Decay 95% of proc::p_pctcpu in 60 seconds, ccpu = exp(-1/20) */
1116 const fixpt_t ccpu = 0.95122942450071400909 * FSCALE;
1117
1118 /*
1119 * Constants for averages over 1, 5 and 15 minutes when sampling at
1120 * 5 second intervals.
1121 */
1122 static const fixpt_t cexp[ ] = {
1123 0.9200444146293232 * FSCALE, /* exp(-1/12) */
1124 0.9834714538216174 * FSCALE, /* exp(-1/60) */
1125 0.9944598480048967 * FSCALE, /* exp(-1/180) */
1126 };
1127
1128 /*
1129 * sched_pstats:
1130 *
1131 * => Update process statistics and check CPU resource allocation.
1132 * => Call scheduler-specific hook to eventually adjust LWP priorities.
1133 * => Compute load average of a quantity on 1, 5 and 15 minute intervals.
1134 */
1135 void
1136 sched_pstats(void)
1137 {
1138 struct loadavg *avg = &averunnable;
1139 const int clkhz = (stathz != 0 ? stathz : hz);
1140 static bool backwardslwp = false;
1141 static bool backwardsproc = false;
1142 static u_int lavg_count = 0;
1143 struct proc *p;
1144 int nrun;
1145
1146 sched_pstats_ticks++;
1147 if (++lavg_count >= 5) {
1148 lavg_count = 0;
1149 nrun = 0;
1150 }
1151 mutex_enter(&proc_lock);
1152 PROCLIST_FOREACH(p, &allproc) {
1153 struct lwp *l;
1154 struct rlimit *rlim;
1155 time_t runtm;
1156 int sig;
1157
1158 /* Increment sleep time (if sleeping), ignore overflow. */
1159 mutex_enter(p->p_lock);
1160 runtm = p->p_rtime.sec;
1161 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1162 fixpt_t lpctcpu;
1163 u_int lcpticks;
1164
1165 if (__predict_false((l->l_flag & LW_IDLE) != 0))
1166 continue;
1167 lwp_lock(l);
1168 if (__predict_false(l->l_rtime.sec < 0) &&
1169 !backwardslwp) {
1170 backwardslwp = true;
1171 printf("WARNING: lwp %ld (%s%s%s): "
1172 "negative runtime: "
1173 "(%jd + 0x%016"PRIx64"/2^64) sec\n",
1174 (long)l->l_lid,
1175 l->l_proc->p_comm,
1176 l->l_name ? " " : "",
1177 l->l_name ? l->l_name : "",
1178 (intmax_t)l->l_rtime.sec,
1179 l->l_rtime.frac);
1180 }
1181 runtm += l->l_rtime.sec;
1182 l->l_swtime++;
1183 sched_lwp_stats(l);
1184
1185 /* For load average calculation. */
1186 if (__predict_false(lavg_count == 0) &&
1187 (l->l_flag & (LW_SINTR | LW_SYSTEM)) == 0) {
1188 switch (l->l_stat) {
1189 case LSSLEEP:
1190 if (l->l_slptime > 1) {
1191 break;
1192 }
1193 /* FALLTHROUGH */
1194 case LSRUN:
1195 case LSONPROC:
1196 case LSIDL:
1197 nrun++;
1198 }
1199 }
1200 lwp_unlock(l);
1201
1202 l->l_pctcpu = (l->l_pctcpu * ccpu) >> FSHIFT;
1203 if (l->l_slptime != 0)
1204 continue;
1205
1206 lpctcpu = l->l_pctcpu;
1207 lcpticks = atomic_swap_uint(&l->l_cpticks, 0);
1208 lpctcpu += ((FSCALE - ccpu) *
1209 (lcpticks * FSCALE / clkhz)) >> FSHIFT;
1210 l->l_pctcpu = lpctcpu;
1211 }
1212 /* Calculating p_pctcpu only for ps(1) */
1213 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
1214
1215 if (__predict_false(runtm < 0)) {
1216 if (!backwardsproc) {
1217 backwardsproc = true;
1218 printf("WARNING: pid %ld (%s): "
1219 "negative runtime; "
1220 "monotonic clock has gone backwards\n",
1221 (long)p->p_pid, p->p_comm);
1222 }
1223 mutex_exit(p->p_lock);
1224 continue;
1225 }
1226
1227 /*
1228 * Check if the process exceeds its CPU resource allocation.
1229 * If over the hard limit, kill it with SIGKILL.
1230 * If over the soft limit, send SIGXCPU and raise
1231 * the soft limit a little.
1232 */
1233 rlim = &p->p_rlimit[RLIMIT_CPU];
1234 sig = 0;
1235 if (__predict_false(runtm >= rlim->rlim_cur)) {
1236 if (runtm >= rlim->rlim_max) {
1237 sig = SIGKILL;
1238 log(LOG_NOTICE,
1239 "pid %d, command %s, is killed: %s\n",
1240 p->p_pid, p->p_comm, "exceeded RLIMIT_CPU");
1241 uprintf("pid %d, command %s, is killed: %s\n",
1242 p->p_pid, p->p_comm, "exceeded RLIMIT_CPU");
1243 } else {
1244 sig = SIGXCPU;
1245 if (rlim->rlim_cur < rlim->rlim_max)
1246 rlim->rlim_cur += 5;
1247 }
1248 }
1249 mutex_exit(p->p_lock);
1250 if (__predict_false(sig)) {
1251 KASSERT((p->p_flag & PK_SYSTEM) == 0);
1252 psignal(p, sig);
1253 }
1254 }
1255
1256 /* Load average calculation. */
1257 if (__predict_false(lavg_count == 0)) {
1258 int i;
1259 CTASSERT(__arraycount(cexp) == __arraycount(avg->ldavg));
1260 for (i = 0; i < __arraycount(cexp); i++) {
1261 avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
1262 nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
1263 }
1264 }
1265
1266 /* Lightning bolt. */
1267 cv_broadcast(&lbolt);
1268
1269 mutex_exit(&proc_lock);
1270 }
1271