kern_synch.c revision 1.339 1 /* $NetBSD: kern_synch.c,v 1.339 2020/02/15 18:12:15 ad Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2009, 2019
5 * The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10 * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and
11 * Daniel Sieger.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 /*-
36 * Copyright (c) 1982, 1986, 1990, 1991, 1993
37 * The Regents of the University of California. All rights reserved.
38 * (c) UNIX System Laboratories, Inc.
39 * All or some portions of this file are derived from material licensed
40 * to the University of California by American Telephone and Telegraph
41 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
42 * the permission of UNIX System Laboratories, Inc.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 * 3. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * SUCH DAMAGE.
67 *
68 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
69 */
70
71 #include <sys/cdefs.h>
72 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.339 2020/02/15 18:12:15 ad Exp $");
73
74 #include "opt_kstack.h"
75 #include "opt_dtrace.h"
76
77 #define __MUTEX_PRIVATE
78
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/proc.h>
82 #include <sys/kernel.h>
83 #include <sys/cpu.h>
84 #include <sys/pserialize.h>
85 #include <sys/resourcevar.h>
86 #include <sys/sched.h>
87 #include <sys/syscall_stats.h>
88 #include <sys/sleepq.h>
89 #include <sys/lockdebug.h>
90 #include <sys/evcnt.h>
91 #include <sys/intr.h>
92 #include <sys/lwpctl.h>
93 #include <sys/atomic.h>
94 #include <sys/syslog.h>
95
96 #include <uvm/uvm_extern.h>
97
98 #include <dev/lockstat.h>
99
100 #include <sys/dtrace_bsd.h>
101 int dtrace_vtime_active=0;
102 dtrace_vtime_switch_func_t dtrace_vtime_switch_func;
103
104 static void sched_unsleep(struct lwp *, bool);
105 static void sched_changepri(struct lwp *, pri_t);
106 static void sched_lendpri(struct lwp *, pri_t);
107
108 syncobj_t sleep_syncobj = {
109 .sobj_flag = SOBJ_SLEEPQ_SORTED,
110 .sobj_unsleep = sleepq_unsleep,
111 .sobj_changepri = sleepq_changepri,
112 .sobj_lendpri = sleepq_lendpri,
113 .sobj_owner = syncobj_noowner,
114 };
115
116 syncobj_t sched_syncobj = {
117 .sobj_flag = SOBJ_SLEEPQ_SORTED,
118 .sobj_unsleep = sched_unsleep,
119 .sobj_changepri = sched_changepri,
120 .sobj_lendpri = sched_lendpri,
121 .sobj_owner = syncobj_noowner,
122 };
123
124 /* "Lightning bolt": once a second sleep address. */
125 kcondvar_t lbolt __cacheline_aligned;
126
127 u_int sched_pstats_ticks __cacheline_aligned;
128
129 /* Preemption event counters. */
130 static struct evcnt kpreempt_ev_crit __cacheline_aligned;
131 static struct evcnt kpreempt_ev_klock __cacheline_aligned;
132 static struct evcnt kpreempt_ev_immed __cacheline_aligned;
133
134 void
135 synch_init(void)
136 {
137
138 cv_init(&lbolt, "lbolt");
139
140 evcnt_attach_dynamic(&kpreempt_ev_crit, EVCNT_TYPE_MISC, NULL,
141 "kpreempt", "defer: critical section");
142 evcnt_attach_dynamic(&kpreempt_ev_klock, EVCNT_TYPE_MISC, NULL,
143 "kpreempt", "defer: kernel_lock");
144 evcnt_attach_dynamic(&kpreempt_ev_immed, EVCNT_TYPE_MISC, NULL,
145 "kpreempt", "immediate");
146 }
147
148 /*
149 * OBSOLETE INTERFACE
150 *
151 * General sleep call. Suspends the current LWP until a wakeup is
152 * performed on the specified identifier. The LWP will then be made
153 * runnable with the specified priority. Sleeps at most timo/hz seconds (0
154 * means no timeout). If pri includes PCATCH flag, signals are checked
155 * before and after sleeping, else signals are not checked. Returns 0 if
156 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
157 * signal needs to be delivered, ERESTART is returned if the current system
158 * call should be restarted if possible, and EINTR is returned if the system
159 * call should be interrupted by the signal (return EINTR).
160 */
161 int
162 tsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo)
163 {
164 struct lwp *l = curlwp;
165 sleepq_t *sq;
166 kmutex_t *mp;
167
168 KASSERT((l->l_pflag & LP_INTR) == 0);
169 KASSERT(ident != &lbolt);
170
171 if (sleepq_dontsleep(l)) {
172 (void)sleepq_abort(NULL, 0);
173 return 0;
174 }
175
176 l->l_kpriority = true;
177 sq = sleeptab_lookup(&sleeptab, ident, &mp);
178 sleepq_enter(sq, l, mp);
179 sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj);
180 return sleepq_block(timo, priority & PCATCH);
181 }
182
183 int
184 mtsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
185 kmutex_t *mtx)
186 {
187 struct lwp *l = curlwp;
188 sleepq_t *sq;
189 kmutex_t *mp;
190 int error;
191
192 KASSERT((l->l_pflag & LP_INTR) == 0);
193 KASSERT(ident != &lbolt);
194
195 if (sleepq_dontsleep(l)) {
196 (void)sleepq_abort(mtx, (priority & PNORELOCK) != 0);
197 return 0;
198 }
199
200 l->l_kpriority = true;
201 sq = sleeptab_lookup(&sleeptab, ident, &mp);
202 sleepq_enter(sq, l, mp);
203 sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj);
204 mutex_exit(mtx);
205 error = sleepq_block(timo, priority & PCATCH);
206
207 if ((priority & PNORELOCK) == 0)
208 mutex_enter(mtx);
209
210 return error;
211 }
212
213 /*
214 * General sleep call for situations where a wake-up is not expected.
215 */
216 int
217 kpause(const char *wmesg, bool intr, int timo, kmutex_t *mtx)
218 {
219 struct lwp *l = curlwp;
220 kmutex_t *mp;
221 sleepq_t *sq;
222 int error;
223
224 KASSERT(!(timo == 0 && intr == false));
225
226 if (sleepq_dontsleep(l))
227 return sleepq_abort(NULL, 0);
228
229 if (mtx != NULL)
230 mutex_exit(mtx);
231 l->l_kpriority = true;
232 sq = sleeptab_lookup(&sleeptab, l, &mp);
233 sleepq_enter(sq, l, mp);
234 sleepq_enqueue(sq, l, wmesg, &sleep_syncobj);
235 error = sleepq_block(timo, intr);
236 if (mtx != NULL)
237 mutex_enter(mtx);
238
239 return error;
240 }
241
242 /*
243 * OBSOLETE INTERFACE
244 *
245 * Make all LWPs sleeping on the specified identifier runnable.
246 */
247 void
248 wakeup(wchan_t ident)
249 {
250 sleepq_t *sq;
251 kmutex_t *mp;
252
253 if (__predict_false(cold))
254 return;
255
256 sq = sleeptab_lookup(&sleeptab, ident, &mp);
257 sleepq_wake(sq, ident, (u_int)-1, mp);
258 }
259
260 /*
261 * General yield call. Puts the current LWP back on its run queue and
262 * performs a voluntary context switch. Should only be called when the
263 * current LWP explicitly requests it (eg sched_yield(2)).
264 */
265 void
266 yield(void)
267 {
268 struct lwp *l = curlwp;
269
270 KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
271 lwp_lock(l);
272
273 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
274 KASSERT(l->l_stat == LSONPROC);
275
276 /* Voluntary - ditch kpriority boost. */
277 l->l_kpriority = false;
278 spc_lock(l->l_cpu);
279 mi_switch(l);
280 KERNEL_LOCK(l->l_biglocks, l);
281 }
282
283 /*
284 * General preemption call. Puts the current LWP back on its run queue
285 * and performs an involuntary context switch.
286 */
287 void
288 preempt(void)
289 {
290 struct lwp *l = curlwp;
291
292 KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
293 lwp_lock(l);
294
295 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
296 KASSERT(l->l_stat == LSONPROC);
297
298 /* Involuntary - keep kpriority boost. */
299 l->l_pflag |= LP_PREEMPTING;
300 spc_lock(l->l_cpu);
301 mi_switch(l);
302 KERNEL_LOCK(l->l_biglocks, l);
303 }
304
305 /*
306 * Handle a request made by another agent to preempt the current LWP
307 * in-kernel. Usually called when l_dopreempt may be non-zero.
308 *
309 * Character addresses for lockstat only.
310 */
311 static char kpreempt_is_disabled;
312 static char kernel_lock_held;
313 static char is_softint_lwp;
314 static char spl_is_raised;
315
316 bool
317 kpreempt(uintptr_t where)
318 {
319 uintptr_t failed;
320 lwp_t *l;
321 int s, dop, lsflag;
322
323 l = curlwp;
324 failed = 0;
325 while ((dop = l->l_dopreempt) != 0) {
326 if (l->l_stat != LSONPROC) {
327 /*
328 * About to block (or die), let it happen.
329 * Doesn't really count as "preemption has
330 * been blocked", since we're going to
331 * context switch.
332 */
333 atomic_swap_uint(&l->l_dopreempt, 0);
334 return true;
335 }
336 if (__predict_false((l->l_flag & LW_IDLE) != 0)) {
337 /* Can't preempt idle loop, don't count as failure. */
338 atomic_swap_uint(&l->l_dopreempt, 0);
339 return true;
340 }
341 if (__predict_false(l->l_nopreempt != 0)) {
342 /* LWP holds preemption disabled, explicitly. */
343 if ((dop & DOPREEMPT_COUNTED) == 0) {
344 kpreempt_ev_crit.ev_count++;
345 }
346 failed = (uintptr_t)&kpreempt_is_disabled;
347 break;
348 }
349 if (__predict_false((l->l_pflag & LP_INTR) != 0)) {
350 /* Can't preempt soft interrupts yet. */
351 atomic_swap_uint(&l->l_dopreempt, 0);
352 failed = (uintptr_t)&is_softint_lwp;
353 break;
354 }
355 s = splsched();
356 if (__predict_false(l->l_blcnt != 0 ||
357 curcpu()->ci_biglock_wanted != NULL)) {
358 /* Hold or want kernel_lock, code is not MT safe. */
359 splx(s);
360 if ((dop & DOPREEMPT_COUNTED) == 0) {
361 kpreempt_ev_klock.ev_count++;
362 }
363 failed = (uintptr_t)&kernel_lock_held;
364 break;
365 }
366 if (__predict_false(!cpu_kpreempt_enter(where, s))) {
367 /*
368 * It may be that the IPL is too high.
369 * kpreempt_enter() can schedule an
370 * interrupt to retry later.
371 */
372 splx(s);
373 failed = (uintptr_t)&spl_is_raised;
374 break;
375 }
376 /* Do it! */
377 if (__predict_true((dop & DOPREEMPT_COUNTED) == 0)) {
378 kpreempt_ev_immed.ev_count++;
379 }
380 lwp_lock(l);
381 /* Involuntary - keep kpriority boost. */
382 l->l_pflag |= LP_PREEMPTING;
383 spc_lock(l->l_cpu);
384 mi_switch(l);
385 l->l_nopreempt++;
386 splx(s);
387
388 /* Take care of any MD cleanup. */
389 cpu_kpreempt_exit(where);
390 l->l_nopreempt--;
391 }
392
393 if (__predict_true(!failed)) {
394 return false;
395 }
396
397 /* Record preemption failure for reporting via lockstat. */
398 atomic_or_uint(&l->l_dopreempt, DOPREEMPT_COUNTED);
399 lsflag = 0;
400 LOCKSTAT_ENTER(lsflag);
401 if (__predict_false(lsflag)) {
402 if (where == 0) {
403 where = (uintptr_t)__builtin_return_address(0);
404 }
405 /* Preemption is on, might recurse, so make it atomic. */
406 if (atomic_cas_ptr_ni((void *)&l->l_pfailaddr, NULL,
407 (void *)where) == NULL) {
408 LOCKSTAT_START_TIMER(lsflag, l->l_pfailtime);
409 l->l_pfaillock = failed;
410 }
411 }
412 LOCKSTAT_EXIT(lsflag);
413 return true;
414 }
415
416 /*
417 * Return true if preemption is explicitly disabled.
418 */
419 bool
420 kpreempt_disabled(void)
421 {
422 const lwp_t *l = curlwp;
423
424 return l->l_nopreempt != 0 || l->l_stat == LSZOMB ||
425 (l->l_flag & LW_IDLE) != 0 || (l->l_pflag & LP_INTR) != 0 ||
426 cpu_kpreempt_disabled();
427 }
428
429 /*
430 * Disable kernel preemption.
431 */
432 void
433 kpreempt_disable(void)
434 {
435
436 KPREEMPT_DISABLE(curlwp);
437 }
438
439 /*
440 * Reenable kernel preemption.
441 */
442 void
443 kpreempt_enable(void)
444 {
445
446 KPREEMPT_ENABLE(curlwp);
447 }
448
449 /*
450 * Compute the amount of time during which the current lwp was running.
451 *
452 * - update l_rtime unless it's an idle lwp.
453 */
454
455 void
456 updatertime(lwp_t *l, const struct bintime *now)
457 {
458
459 if (__predict_false(l->l_flag & LW_IDLE))
460 return;
461
462 /* rtime += now - stime */
463 bintime_add(&l->l_rtime, now);
464 bintime_sub(&l->l_rtime, &l->l_stime);
465 }
466
467 /*
468 * Select next LWP from the current CPU to run..
469 */
470 static inline lwp_t *
471 nextlwp(struct cpu_info *ci, struct schedstate_percpu *spc)
472 {
473 lwp_t *newl;
474
475 /*
476 * Let sched_nextlwp() select the LWP to run the CPU next.
477 * If no LWP is runnable, select the idle LWP.
478 *
479 * Note that spc_lwplock might not necessary be held, and
480 * new thread would be unlocked after setting the LWP-lock.
481 */
482 newl = sched_nextlwp();
483 if (newl != NULL) {
484 sched_dequeue(newl);
485 KASSERT(lwp_locked(newl, spc->spc_mutex));
486 KASSERT(newl->l_cpu == ci);
487 lwp_setlock(newl, spc->spc_lwplock);
488 spc->spc_flags &= ~(SPCF_SWITCHCLEAR | SPCF_IDLE);
489 } else {
490 newl = ci->ci_data.cpu_idlelwp;
491 spc->spc_flags = (spc->spc_flags & ~SPCF_SWITCHCLEAR) |
492 SPCF_IDLE;
493 }
494 newl->l_stat = LSONPROC;
495 newl->l_pflag |= LP_RUNNING;
496
497 /*
498 * Only clear want_resched if there are no pending (slow) software
499 * interrupts. We can do this without an atomic, because no new
500 * LWPs can appear in the queue due to our hold on spc_mutex, and
501 * the update to ci_want_resched will become globally visible before
502 * the release of spc_mutex becomes globally visible.
503 */
504 ci->ci_want_resched = ci->ci_data.cpu_softints;
505 spc->spc_curpriority = lwp_eprio(newl);
506
507 return newl;
508 }
509
510 /*
511 * The machine independent parts of context switch.
512 *
513 * NOTE: l->l_cpu is not changed in this routine, because an LWP never
514 * changes its own l_cpu (that would screw up curcpu on many ports and could
515 * cause all kinds of other evil stuff). l_cpu is always changed by some
516 * other actor, when it's known the LWP is not running (the LP_RUNNING flag
517 * is checked under lock).
518 */
519 void
520 mi_switch(lwp_t *l)
521 {
522 struct cpu_info *ci;
523 struct schedstate_percpu *spc;
524 struct lwp *newl;
525 kmutex_t *lock;
526 int oldspl;
527 struct bintime bt;
528 bool returning;
529
530 KASSERT(lwp_locked(l, NULL));
531 KASSERT(kpreempt_disabled());
532 KASSERT(mutex_owned(curcpu()->ci_schedstate.spc_mutex));
533 KASSERTMSG(l->l_blcnt == 0, "kernel_lock leaked");
534
535 kstack_check_magic(l);
536
537 binuptime(&bt);
538
539 KASSERTMSG(l == curlwp, "l %p curlwp %p", l, curlwp);
540 KASSERT((l->l_pflag & LP_RUNNING) != 0);
541 KASSERT(l->l_cpu == curcpu() || l->l_stat == LSRUN);
542 ci = curcpu();
543 spc = &ci->ci_schedstate;
544 returning = false;
545 newl = NULL;
546
547 /*
548 * If we have been asked to switch to a specific LWP, then there
549 * is no need to inspect the run queues. If a soft interrupt is
550 * blocking, then return to the interrupted thread without adjusting
551 * VM context or its start time: neither have been changed in order
552 * to take the interrupt.
553 */
554 if (l->l_switchto != NULL) {
555 if ((l->l_pflag & LP_INTR) != 0) {
556 returning = true;
557 softint_block(l);
558 if ((l->l_pflag & LP_TIMEINTR) != 0)
559 updatertime(l, &bt);
560 }
561 newl = l->l_switchto;
562 l->l_switchto = NULL;
563 }
564 #ifndef __HAVE_FAST_SOFTINTS
565 else if (ci->ci_data.cpu_softints != 0) {
566 /* There are pending soft interrupts, so pick one. */
567 newl = softint_picklwp();
568 newl->l_stat = LSONPROC;
569 newl->l_pflag |= LP_RUNNING;
570 }
571 #endif /* !__HAVE_FAST_SOFTINTS */
572
573 /*
574 * If on the CPU and we have gotten this far, then we must yield.
575 */
576 if (l->l_stat == LSONPROC && l != newl) {
577 KASSERT(lwp_locked(l, spc->spc_lwplock));
578 KASSERT((l->l_flag & LW_IDLE) == 0);
579 l->l_stat = LSRUN;
580 lwp_setlock(l, spc->spc_mutex);
581 sched_enqueue(l);
582 sched_preempted(l);
583
584 /*
585 * Handle migration. Note that "migrating LWP" may
586 * be reset here, if interrupt/preemption happens
587 * early in idle LWP.
588 */
589 if (l->l_target_cpu != NULL && (l->l_pflag & LP_BOUND) == 0) {
590 KASSERT((l->l_pflag & LP_INTR) == 0);
591 spc->spc_migrating = l;
592 }
593 }
594
595 /* Pick new LWP to run. */
596 if (newl == NULL) {
597 newl = nextlwp(ci, spc);
598 }
599
600 /* Items that must be updated with the CPU locked. */
601 if (!returning) {
602 /* Count time spent in current system call */
603 SYSCALL_TIME_SLEEP(l);
604
605 updatertime(l, &bt);
606
607 /* Update the new LWP's start time. */
608 newl->l_stime = bt;
609
610 /*
611 * ci_curlwp changes when a fast soft interrupt occurs.
612 * We use ci_onproc to keep track of which kernel or
613 * user thread is running 'underneath' the software
614 * interrupt. This is important for time accounting,
615 * itimers and forcing user threads to preempt (aston).
616 */
617 ci->ci_onproc = newl;
618 }
619
620 /*
621 * Preemption related tasks. Must be done holding spc_mutex. Clear
622 * l_dopreempt without an atomic - it's only ever set non-zero by
623 * sched_resched_cpu() which also holds spc_mutex, and only ever
624 * cleared by the LWP itself (us) with atomics when not under lock.
625 */
626 l->l_dopreempt = 0;
627 if (__predict_false(l->l_pfailaddr != 0)) {
628 LOCKSTAT_FLAG(lsflag);
629 LOCKSTAT_ENTER(lsflag);
630 LOCKSTAT_STOP_TIMER(lsflag, l->l_pfailtime);
631 LOCKSTAT_EVENT_RA(lsflag, l->l_pfaillock, LB_NOPREEMPT|LB_SPIN,
632 1, l->l_pfailtime, l->l_pfailaddr);
633 LOCKSTAT_EXIT(lsflag);
634 l->l_pfailtime = 0;
635 l->l_pfaillock = 0;
636 l->l_pfailaddr = 0;
637 }
638
639 if (l != newl) {
640 struct lwp *prevlwp;
641
642 /* Release all locks, but leave the current LWP locked */
643 if (l->l_mutex == spc->spc_mutex) {
644 /*
645 * Drop spc_lwplock, if the current LWP has been moved
646 * to the run queue (it is now locked by spc_mutex).
647 */
648 mutex_spin_exit(spc->spc_lwplock);
649 } else {
650 /*
651 * Otherwise, drop the spc_mutex, we are done with the
652 * run queues.
653 */
654 mutex_spin_exit(spc->spc_mutex);
655 }
656
657 /* We're down to only one lock, so do debug checks. */
658 LOCKDEBUG_BARRIER(l->l_mutex, 1);
659
660 /* Count the context switch. */
661 CPU_COUNT(CPU_COUNT_NSWTCH, 1);
662 l->l_ncsw++;
663 if ((l->l_pflag & LP_PREEMPTING) != 0) {
664 l->l_nivcsw++;
665 l->l_pflag &= ~LP_PREEMPTING;
666 }
667
668 /*
669 * Increase the count of spin-mutexes before the release
670 * of the last lock - we must remain at IPL_SCHED after
671 * releasing the lock.
672 */
673 KASSERTMSG(ci->ci_mtx_count == -1,
674 "%s: cpu%u: ci_mtx_count (%d) != -1 "
675 "(block with spin-mutex held)",
676 __func__, cpu_index(ci), ci->ci_mtx_count);
677 oldspl = MUTEX_SPIN_OLDSPL(ci);
678 ci->ci_mtx_count = -2;
679
680 /* Update status for lwpctl, if present. */
681 if (l->l_lwpctl != NULL) {
682 l->l_lwpctl->lc_curcpu = (l->l_stat == LSZOMB ?
683 LWPCTL_CPU_EXITED : LWPCTL_CPU_NONE);
684 }
685
686 /*
687 * If curlwp is a soft interrupt LWP, there's nobody on the
688 * other side to unlock - we're returning into an assembly
689 * trampoline. Unlock now. This is safe because this is a
690 * kernel LWP and is bound to current CPU: the worst anyone
691 * else will do to it, is to put it back onto this CPU's run
692 * queue (and the CPU is busy here right now!).
693 */
694 if (returning) {
695 /* Keep IPL_SCHED after this; MD code will fix up. */
696 l->l_pflag &= ~LP_RUNNING;
697 lwp_unlock(l);
698 } else {
699 /* A normal LWP: save old VM context. */
700 pmap_deactivate(l);
701 }
702
703 /*
704 * If DTrace has set the active vtime enum to anything
705 * other than INACTIVE (0), then it should have set the
706 * function to call.
707 */
708 if (__predict_false(dtrace_vtime_active)) {
709 (*dtrace_vtime_switch_func)(newl);
710 }
711
712 /*
713 * We must ensure not to come here from inside a read section.
714 */
715 KASSERT(pserialize_not_in_read_section());
716
717 /* Switch to the new LWP.. */
718 #ifdef MULTIPROCESSOR
719 KASSERT(curlwp == ci->ci_curlwp);
720 #endif
721 KASSERTMSG(l == curlwp, "l %p curlwp %p", l, curlwp);
722 prevlwp = cpu_switchto(l, newl, returning);
723 ci = curcpu();
724 #ifdef MULTIPROCESSOR
725 KASSERT(curlwp == ci->ci_curlwp);
726 #endif
727 KASSERTMSG(l == curlwp, "l %p curlwp %p prevlwp %p",
728 l, curlwp, prevlwp);
729 KASSERT(prevlwp != NULL);
730 KASSERT(l->l_cpu == ci);
731 KASSERT(ci->ci_mtx_count == -2);
732
733 /*
734 * Immediately mark the previous LWP as no longer running
735 * and unlock (to keep lock wait times short as possible).
736 * We'll still be at IPL_SCHED afterwards. If a zombie,
737 * don't touch after clearing LP_RUNNING as it could be
738 * reaped by another CPU. Issue a memory barrier to ensure
739 * this.
740 */
741 KASSERT((prevlwp->l_pflag & LP_RUNNING) != 0);
742 lock = prevlwp->l_mutex;
743 if (__predict_false(prevlwp->l_stat == LSZOMB)) {
744 membar_sync();
745 }
746 prevlwp->l_pflag &= ~LP_RUNNING;
747 mutex_spin_exit(lock);
748
749 /*
750 * Switched away - we have new curlwp.
751 * Restore VM context and IPL.
752 */
753 pmap_activate(l);
754 pcu_switchpoint(l);
755
756 /* Update status for lwpctl, if present. */
757 if (l->l_lwpctl != NULL) {
758 l->l_lwpctl->lc_curcpu = (int)cpu_index(ci);
759 l->l_lwpctl->lc_pctr++;
760 }
761
762 /*
763 * Normalize the spin mutex count and restore the previous
764 * SPL. Note that, unless the caller disabled preemption,
765 * we can be preempted at any time after this splx().
766 */
767 KASSERT(l->l_cpu == ci);
768 KASSERT(ci->ci_mtx_count == -1);
769 ci->ci_mtx_count = 0;
770 splx(oldspl);
771 } else {
772 /* Nothing to do - just unlock and return. */
773 mutex_spin_exit(spc->spc_mutex);
774 l->l_pflag &= ~LP_PREEMPTING;
775 lwp_unlock(l);
776 }
777
778 KASSERT(l == curlwp);
779 KASSERT(l->l_stat == LSONPROC);
780
781 SYSCALL_TIME_WAKEUP(l);
782 LOCKDEBUG_BARRIER(NULL, 1);
783 }
784
785 /*
786 * setrunnable: change LWP state to be runnable, placing it on the run queue.
787 *
788 * Call with the process and LWP locked. Will return with the LWP unlocked.
789 */
790 void
791 setrunnable(struct lwp *l)
792 {
793 struct proc *p = l->l_proc;
794 struct cpu_info *ci;
795 kmutex_t *oldlock;
796
797 KASSERT((l->l_flag & LW_IDLE) == 0);
798 KASSERT((l->l_flag & LW_DBGSUSPEND) == 0);
799 KASSERT(mutex_owned(p->p_lock));
800 KASSERT(lwp_locked(l, NULL));
801 KASSERT(l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex);
802
803 switch (l->l_stat) {
804 case LSSTOP:
805 /*
806 * If we're being traced (possibly because someone attached us
807 * while we were stopped), check for a signal from the debugger.
808 */
809 if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xsig != 0)
810 signotify(l);
811 p->p_nrlwps++;
812 break;
813 case LSSUSPENDED:
814 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
815 l->l_flag &= ~LW_WSUSPEND;
816 p->p_nrlwps++;
817 cv_broadcast(&p->p_lwpcv);
818 break;
819 case LSSLEEP:
820 KASSERT(l->l_wchan != NULL);
821 break;
822 case LSIDL:
823 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
824 break;
825 default:
826 panic("setrunnable: lwp %p state was %d", l, l->l_stat);
827 }
828
829 /*
830 * If the LWP was sleeping, start it again.
831 */
832 if (l->l_wchan != NULL) {
833 l->l_stat = LSSLEEP;
834 /* lwp_unsleep() will release the lock. */
835 lwp_unsleep(l, true);
836 return;
837 }
838
839 /*
840 * If the LWP is still on the CPU, mark it as LSONPROC. It may be
841 * about to call mi_switch(), in which case it will yield.
842 */
843 if ((l->l_pflag & LP_RUNNING) != 0) {
844 l->l_stat = LSONPROC;
845 l->l_slptime = 0;
846 lwp_unlock(l);
847 return;
848 }
849
850 /*
851 * Look for a CPU to run.
852 * Set the LWP runnable.
853 */
854 ci = sched_takecpu(l);
855 l->l_cpu = ci;
856 spc_lock(ci);
857 oldlock = lwp_setlock(l, l->l_cpu->ci_schedstate.spc_mutex);
858 sched_setrunnable(l);
859 l->l_stat = LSRUN;
860 l->l_slptime = 0;
861 sched_enqueue(l);
862 sched_resched_lwp(l, true);
863 /* SPC & LWP now unlocked. */
864 mutex_spin_exit(oldlock);
865 }
866
867 /*
868 * suspendsched:
869 *
870 * Convert all non-LW_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
871 */
872 void
873 suspendsched(void)
874 {
875 CPU_INFO_ITERATOR cii;
876 struct cpu_info *ci;
877 struct lwp *l;
878 struct proc *p;
879
880 /*
881 * We do this by process in order not to violate the locking rules.
882 */
883 mutex_enter(proc_lock);
884 PROCLIST_FOREACH(p, &allproc) {
885 mutex_enter(p->p_lock);
886 if ((p->p_flag & PK_SYSTEM) != 0) {
887 mutex_exit(p->p_lock);
888 continue;
889 }
890
891 if (p->p_stat != SSTOP) {
892 if (p->p_stat != SZOMB && p->p_stat != SDEAD) {
893 p->p_pptr->p_nstopchild++;
894 p->p_waited = 0;
895 }
896 p->p_stat = SSTOP;
897 }
898
899 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
900 if (l == curlwp)
901 continue;
902
903 lwp_lock(l);
904
905 /*
906 * Set L_WREBOOT so that the LWP will suspend itself
907 * when it tries to return to user mode. We want to
908 * try and get to get as many LWPs as possible to
909 * the user / kernel boundary, so that they will
910 * release any locks that they hold.
911 */
912 l->l_flag |= (LW_WREBOOT | LW_WSUSPEND);
913
914 if (l->l_stat == LSSLEEP &&
915 (l->l_flag & LW_SINTR) != 0) {
916 /* setrunnable() will release the lock. */
917 setrunnable(l);
918 continue;
919 }
920
921 lwp_unlock(l);
922 }
923
924 mutex_exit(p->p_lock);
925 }
926 mutex_exit(proc_lock);
927
928 /*
929 * Kick all CPUs to make them preempt any LWPs running in user mode.
930 * They'll trap into the kernel and suspend themselves in userret().
931 *
932 * Unusually, we don't hold any other scheduler object locked, which
933 * would keep preemption off for sched_resched_cpu(), so disable it
934 * explicitly.
935 */
936 kpreempt_disable();
937 for (CPU_INFO_FOREACH(cii, ci)) {
938 spc_lock(ci);
939 sched_resched_cpu(ci, PRI_KERNEL, true);
940 /* spc now unlocked */
941 }
942 kpreempt_enable();
943 }
944
945 /*
946 * sched_unsleep:
947 *
948 * The is called when the LWP has not been awoken normally but instead
949 * interrupted: for example, if the sleep timed out. Because of this,
950 * it's not a valid action for running or idle LWPs.
951 */
952 static void
953 sched_unsleep(struct lwp *l, bool cleanup)
954 {
955
956 lwp_unlock(l);
957 panic("sched_unsleep");
958 }
959
960 static void
961 sched_changepri(struct lwp *l, pri_t pri)
962 {
963 struct schedstate_percpu *spc;
964 struct cpu_info *ci;
965
966 KASSERT(lwp_locked(l, NULL));
967
968 ci = l->l_cpu;
969 spc = &ci->ci_schedstate;
970
971 if (l->l_stat == LSRUN) {
972 KASSERT(lwp_locked(l, spc->spc_mutex));
973 sched_dequeue(l);
974 l->l_priority = pri;
975 sched_enqueue(l);
976 sched_resched_lwp(l, false);
977 } else if (l->l_stat == LSONPROC && l->l_class != SCHED_OTHER) {
978 /* On priority drop, only evict realtime LWPs. */
979 KASSERT(lwp_locked(l, spc->spc_lwplock));
980 l->l_priority = pri;
981 spc_lock(ci);
982 sched_resched_cpu(ci, spc->spc_maxpriority, true);
983 /* spc now unlocked */
984 } else {
985 l->l_priority = pri;
986 }
987 }
988
989 static void
990 sched_lendpri(struct lwp *l, pri_t pri)
991 {
992 struct schedstate_percpu *spc;
993 struct cpu_info *ci;
994
995 KASSERT(lwp_locked(l, NULL));
996
997 ci = l->l_cpu;
998 spc = &ci->ci_schedstate;
999
1000 if (l->l_stat == LSRUN) {
1001 KASSERT(lwp_locked(l, spc->spc_mutex));
1002 sched_dequeue(l);
1003 l->l_inheritedprio = pri;
1004 l->l_auxprio = MAX(l->l_inheritedprio, l->l_protectprio);
1005 sched_enqueue(l);
1006 sched_resched_lwp(l, false);
1007 } else if (l->l_stat == LSONPROC && l->l_class != SCHED_OTHER) {
1008 /* On priority drop, only evict realtime LWPs. */
1009 KASSERT(lwp_locked(l, spc->spc_lwplock));
1010 l->l_inheritedprio = pri;
1011 l->l_auxprio = MAX(l->l_inheritedprio, l->l_protectprio);
1012 spc_lock(ci);
1013 sched_resched_cpu(ci, spc->spc_maxpriority, true);
1014 /* spc now unlocked */
1015 } else {
1016 l->l_inheritedprio = pri;
1017 l->l_auxprio = MAX(l->l_inheritedprio, l->l_protectprio);
1018 }
1019 }
1020
1021 struct lwp *
1022 syncobj_noowner(wchan_t wchan)
1023 {
1024
1025 return NULL;
1026 }
1027
1028 /* Decay 95% of proc::p_pctcpu in 60 seconds, ccpu = exp(-1/20) */
1029 const fixpt_t ccpu = 0.95122942450071400909 * FSCALE;
1030
1031 /*
1032 * Constants for averages over 1, 5 and 15 minutes when sampling at
1033 * 5 second intervals.
1034 */
1035 static const fixpt_t cexp[ ] = {
1036 0.9200444146293232 * FSCALE, /* exp(-1/12) */
1037 0.9834714538216174 * FSCALE, /* exp(-1/60) */
1038 0.9944598480048967 * FSCALE, /* exp(-1/180) */
1039 };
1040
1041 /*
1042 * sched_pstats:
1043 *
1044 * => Update process statistics and check CPU resource allocation.
1045 * => Call scheduler-specific hook to eventually adjust LWP priorities.
1046 * => Compute load average of a quantity on 1, 5 and 15 minute intervals.
1047 */
1048 void
1049 sched_pstats(void)
1050 {
1051 extern struct loadavg averunnable;
1052 struct loadavg *avg = &averunnable;
1053 const int clkhz = (stathz != 0 ? stathz : hz);
1054 static bool backwards = false;
1055 static u_int lavg_count = 0;
1056 struct proc *p;
1057 int nrun;
1058
1059 sched_pstats_ticks++;
1060 if (++lavg_count >= 5) {
1061 lavg_count = 0;
1062 nrun = 0;
1063 }
1064 mutex_enter(proc_lock);
1065 PROCLIST_FOREACH(p, &allproc) {
1066 struct lwp *l;
1067 struct rlimit *rlim;
1068 time_t runtm;
1069 int sig;
1070
1071 /* Increment sleep time (if sleeping), ignore overflow. */
1072 mutex_enter(p->p_lock);
1073 runtm = p->p_rtime.sec;
1074 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1075 fixpt_t lpctcpu;
1076 u_int lcpticks;
1077
1078 if (__predict_false((l->l_flag & LW_IDLE) != 0))
1079 continue;
1080 lwp_lock(l);
1081 runtm += l->l_rtime.sec;
1082 l->l_swtime++;
1083 sched_lwp_stats(l);
1084
1085 /* For load average calculation. */
1086 if (__predict_false(lavg_count == 0) &&
1087 (l->l_flag & (LW_SINTR | LW_SYSTEM)) == 0) {
1088 switch (l->l_stat) {
1089 case LSSLEEP:
1090 if (l->l_slptime > 1) {
1091 break;
1092 }
1093 /* FALLTHROUGH */
1094 case LSRUN:
1095 case LSONPROC:
1096 case LSIDL:
1097 nrun++;
1098 }
1099 }
1100 lwp_unlock(l);
1101
1102 l->l_pctcpu = (l->l_pctcpu * ccpu) >> FSHIFT;
1103 if (l->l_slptime != 0)
1104 continue;
1105
1106 lpctcpu = l->l_pctcpu;
1107 lcpticks = atomic_swap_uint(&l->l_cpticks, 0);
1108 lpctcpu += ((FSCALE - ccpu) *
1109 (lcpticks * FSCALE / clkhz)) >> FSHIFT;
1110 l->l_pctcpu = lpctcpu;
1111 }
1112 /* Calculating p_pctcpu only for ps(1) */
1113 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
1114
1115 if (__predict_false(runtm < 0)) {
1116 if (!backwards) {
1117 backwards = true;
1118 printf("WARNING: negative runtime; "
1119 "monotonic clock has gone backwards\n");
1120 }
1121 mutex_exit(p->p_lock);
1122 continue;
1123 }
1124
1125 /*
1126 * Check if the process exceeds its CPU resource allocation.
1127 * If over the hard limit, kill it with SIGKILL.
1128 * If over the soft limit, send SIGXCPU and raise
1129 * the soft limit a little.
1130 */
1131 rlim = &p->p_rlimit[RLIMIT_CPU];
1132 sig = 0;
1133 if (__predict_false(runtm >= rlim->rlim_cur)) {
1134 if (runtm >= rlim->rlim_max) {
1135 sig = SIGKILL;
1136 log(LOG_NOTICE,
1137 "pid %d, command %s, is killed: %s\n",
1138 p->p_pid, p->p_comm, "exceeded RLIMIT_CPU");
1139 uprintf("pid %d, command %s, is killed: %s\n",
1140 p->p_pid, p->p_comm, "exceeded RLIMIT_CPU");
1141 } else {
1142 sig = SIGXCPU;
1143 if (rlim->rlim_cur < rlim->rlim_max)
1144 rlim->rlim_cur += 5;
1145 }
1146 }
1147 mutex_exit(p->p_lock);
1148 if (__predict_false(sig)) {
1149 KASSERT((p->p_flag & PK_SYSTEM) == 0);
1150 psignal(p, sig);
1151 }
1152 }
1153
1154 /* Load average calculation. */
1155 if (__predict_false(lavg_count == 0)) {
1156 int i;
1157 CTASSERT(__arraycount(cexp) == __arraycount(avg->ldavg));
1158 for (i = 0; i < __arraycount(cexp); i++) {
1159 avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
1160 nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
1161 }
1162 }
1163
1164 /* Lightning bolt. */
1165 cv_broadcast(&lbolt);
1166
1167 mutex_exit(proc_lock);
1168 }
1169