Home | History | Annotate | Line # | Download | only in kern
kern_synch.c revision 1.324
      1 /*	$NetBSD: kern_synch.c,v 1.324 2019/10/03 22:48:44 kamil Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2009
      5  *    The NetBSD Foundation, Inc.
      6  * All rights reserved.
      7  *
      8  * This code is derived from software contributed to The NetBSD Foundation
      9  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
     10  * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and
     11  * Daniel Sieger.
     12  *
     13  * Redistribution and use in source and binary forms, with or without
     14  * modification, are permitted provided that the following conditions
     15  * are met:
     16  * 1. Redistributions of source code must retain the above copyright
     17  *    notice, this list of conditions and the following disclaimer.
     18  * 2. Redistributions in binary form must reproduce the above copyright
     19  *    notice, this list of conditions and the following disclaimer in the
     20  *    documentation and/or other materials provided with the distribution.
     21  *
     22  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     24  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     25  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     32  * POSSIBILITY OF SUCH DAMAGE.
     33  */
     34 
     35 /*-
     36  * Copyright (c) 1982, 1986, 1990, 1991, 1993
     37  *	The Regents of the University of California.  All rights reserved.
     38  * (c) UNIX System Laboratories, Inc.
     39  * All or some portions of this file are derived from material licensed
     40  * to the University of California by American Telephone and Telegraph
     41  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     42  * the permission of UNIX System Laboratories, Inc.
     43  *
     44  * Redistribution and use in source and binary forms, with or without
     45  * modification, are permitted provided that the following conditions
     46  * are met:
     47  * 1. Redistributions of source code must retain the above copyright
     48  *    notice, this list of conditions and the following disclaimer.
     49  * 2. Redistributions in binary form must reproduce the above copyright
     50  *    notice, this list of conditions and the following disclaimer in the
     51  *    documentation and/or other materials provided with the distribution.
     52  * 3. Neither the name of the University nor the names of its contributors
     53  *    may be used to endorse or promote products derived from this software
     54  *    without specific prior written permission.
     55  *
     56  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     57  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     58  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     59  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     60  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     61  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     62  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     63  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     64  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     65  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     66  * SUCH DAMAGE.
     67  *
     68  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
     69  */
     70 
     71 #include <sys/cdefs.h>
     72 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.324 2019/10/03 22:48:44 kamil Exp $");
     73 
     74 #include "opt_kstack.h"
     75 #include "opt_dtrace.h"
     76 
     77 #define	__MUTEX_PRIVATE
     78 
     79 #include <sys/param.h>
     80 #include <sys/systm.h>
     81 #include <sys/proc.h>
     82 #include <sys/kernel.h>
     83 #include <sys/cpu.h>
     84 #include <sys/pserialize.h>
     85 #include <sys/resourcevar.h>
     86 #include <sys/sched.h>
     87 #include <sys/syscall_stats.h>
     88 #include <sys/sleepq.h>
     89 #include <sys/lockdebug.h>
     90 #include <sys/evcnt.h>
     91 #include <sys/intr.h>
     92 #include <sys/lwpctl.h>
     93 #include <sys/atomic.h>
     94 #include <sys/syslog.h>
     95 
     96 #include <uvm/uvm_extern.h>
     97 
     98 #include <dev/lockstat.h>
     99 
    100 #include <sys/dtrace_bsd.h>
    101 int                             dtrace_vtime_active=0;
    102 dtrace_vtime_switch_func_t      dtrace_vtime_switch_func;
    103 
    104 static void	sched_unsleep(struct lwp *, bool);
    105 static void	sched_changepri(struct lwp *, pri_t);
    106 static void	sched_lendpri(struct lwp *, pri_t);
    107 static void	resched_cpu(struct lwp *);
    108 
    109 syncobj_t sleep_syncobj = {
    110 	.sobj_flag	= SOBJ_SLEEPQ_SORTED,
    111 	.sobj_unsleep	= sleepq_unsleep,
    112 	.sobj_changepri	= sleepq_changepri,
    113 	.sobj_lendpri	= sleepq_lendpri,
    114 	.sobj_owner	= syncobj_noowner,
    115 };
    116 
    117 syncobj_t sched_syncobj = {
    118 	.sobj_flag	= SOBJ_SLEEPQ_SORTED,
    119 	.sobj_unsleep	= sched_unsleep,
    120 	.sobj_changepri	= sched_changepri,
    121 	.sobj_lendpri	= sched_lendpri,
    122 	.sobj_owner	= syncobj_noowner,
    123 };
    124 
    125 /* "Lightning bolt": once a second sleep address. */
    126 kcondvar_t		lbolt			__cacheline_aligned;
    127 
    128 u_int			sched_pstats_ticks	__cacheline_aligned;
    129 
    130 /* Preemption event counters. */
    131 static struct evcnt	kpreempt_ev_crit	__cacheline_aligned;
    132 static struct evcnt	kpreempt_ev_klock	__cacheline_aligned;
    133 static struct evcnt	kpreempt_ev_immed	__cacheline_aligned;
    134 
    135 void
    136 synch_init(void)
    137 {
    138 
    139 	cv_init(&lbolt, "lbolt");
    140 
    141 	evcnt_attach_dynamic(&kpreempt_ev_crit, EVCNT_TYPE_MISC, NULL,
    142 	   "kpreempt", "defer: critical section");
    143 	evcnt_attach_dynamic(&kpreempt_ev_klock, EVCNT_TYPE_MISC, NULL,
    144 	   "kpreempt", "defer: kernel_lock");
    145 	evcnt_attach_dynamic(&kpreempt_ev_immed, EVCNT_TYPE_MISC, NULL,
    146 	   "kpreempt", "immediate");
    147 }
    148 
    149 /*
    150  * OBSOLETE INTERFACE
    151  *
    152  * General sleep call.  Suspends the current LWP until a wakeup is
    153  * performed on the specified identifier.  The LWP will then be made
    154  * runnable with the specified priority.  Sleeps at most timo/hz seconds (0
    155  * means no timeout).  If pri includes PCATCH flag, signals are checked
    156  * before and after sleeping, else signals are not checked.  Returns 0 if
    157  * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
    158  * signal needs to be delivered, ERESTART is returned if the current system
    159  * call should be restarted if possible, and EINTR is returned if the system
    160  * call should be interrupted by the signal (return EINTR).
    161  */
    162 int
    163 tsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo)
    164 {
    165 	struct lwp *l = curlwp;
    166 	sleepq_t *sq;
    167 	kmutex_t *mp;
    168 
    169 	KASSERT((l->l_pflag & LP_INTR) == 0);
    170 	KASSERT(ident != &lbolt);
    171 
    172 	if (sleepq_dontsleep(l)) {
    173 		(void)sleepq_abort(NULL, 0);
    174 		return 0;
    175 	}
    176 
    177 	l->l_kpriority = true;
    178 	sq = sleeptab_lookup(&sleeptab, ident, &mp);
    179 	sleepq_enter(sq, l, mp);
    180 	sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj);
    181 	return sleepq_block(timo, priority & PCATCH);
    182 }
    183 
    184 int
    185 mtsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
    186 	kmutex_t *mtx)
    187 {
    188 	struct lwp *l = curlwp;
    189 	sleepq_t *sq;
    190 	kmutex_t *mp;
    191 	int error;
    192 
    193 	KASSERT((l->l_pflag & LP_INTR) == 0);
    194 	KASSERT(ident != &lbolt);
    195 
    196 	if (sleepq_dontsleep(l)) {
    197 		(void)sleepq_abort(mtx, (priority & PNORELOCK) != 0);
    198 		return 0;
    199 	}
    200 
    201 	l->l_kpriority = true;
    202 	sq = sleeptab_lookup(&sleeptab, ident, &mp);
    203 	sleepq_enter(sq, l, mp);
    204 	sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj);
    205 	mutex_exit(mtx);
    206 	error = sleepq_block(timo, priority & PCATCH);
    207 
    208 	if ((priority & PNORELOCK) == 0)
    209 		mutex_enter(mtx);
    210 
    211 	return error;
    212 }
    213 
    214 /*
    215  * General sleep call for situations where a wake-up is not expected.
    216  */
    217 int
    218 kpause(const char *wmesg, bool intr, int timo, kmutex_t *mtx)
    219 {
    220 	struct lwp *l = curlwp;
    221 	kmutex_t *mp;
    222 	sleepq_t *sq;
    223 	int error;
    224 
    225 	KASSERT(!(timo == 0 && intr == false));
    226 
    227 	if (sleepq_dontsleep(l))
    228 		return sleepq_abort(NULL, 0);
    229 
    230 	if (mtx != NULL)
    231 		mutex_exit(mtx);
    232 	l->l_kpriority = true;
    233 	sq = sleeptab_lookup(&sleeptab, l, &mp);
    234 	sleepq_enter(sq, l, mp);
    235 	sleepq_enqueue(sq, l, wmesg, &sleep_syncobj);
    236 	error = sleepq_block(timo, intr);
    237 	if (mtx != NULL)
    238 		mutex_enter(mtx);
    239 
    240 	return error;
    241 }
    242 
    243 /*
    244  * OBSOLETE INTERFACE
    245  *
    246  * Make all LWPs sleeping on the specified identifier runnable.
    247  */
    248 void
    249 wakeup(wchan_t ident)
    250 {
    251 	sleepq_t *sq;
    252 	kmutex_t *mp;
    253 
    254 	if (__predict_false(cold))
    255 		return;
    256 
    257 	sq = sleeptab_lookup(&sleeptab, ident, &mp);
    258 	sleepq_wake(sq, ident, (u_int)-1, mp);
    259 }
    260 
    261 /*
    262  * General yield call.  Puts the current LWP back on its run queue and
    263  * performs a voluntary context switch.  Should only be called when the
    264  * current LWP explicitly requests it (eg sched_yield(2)).
    265  */
    266 void
    267 yield(void)
    268 {
    269 	struct lwp *l = curlwp;
    270 
    271 	KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
    272 	lwp_lock(l);
    273 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
    274 	KASSERT(l->l_stat == LSONPROC);
    275 	l->l_kpriority = false;
    276 	(void)mi_switch(l);
    277 	KERNEL_LOCK(l->l_biglocks, l);
    278 }
    279 
    280 /*
    281  * General preemption call.  Puts the current LWP back on its run queue
    282  * and performs an involuntary context switch.
    283  */
    284 void
    285 preempt(void)
    286 {
    287 	struct lwp *l = curlwp;
    288 
    289 	KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
    290 	lwp_lock(l);
    291 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
    292 	KASSERT(l->l_stat == LSONPROC);
    293 	l->l_kpriority = false;
    294 	l->l_pflag |= LP_PREEMPTING;
    295 	(void)mi_switch(l);
    296 	KERNEL_LOCK(l->l_biglocks, l);
    297 }
    298 
    299 /*
    300  * Handle a request made by another agent to preempt the current LWP
    301  * in-kernel.  Usually called when l_dopreempt may be non-zero.
    302  *
    303  * Character addresses for lockstat only.
    304  */
    305 static char	in_critical_section;
    306 static char	kernel_lock_held;
    307 static char	is_softint;
    308 static char	cpu_kpreempt_enter_fail;
    309 
    310 bool
    311 kpreempt(uintptr_t where)
    312 {
    313 	uintptr_t failed;
    314 	lwp_t *l;
    315 	int s, dop, lsflag;
    316 
    317 	l = curlwp;
    318 	failed = 0;
    319 	while ((dop = l->l_dopreempt) != 0) {
    320 		if (l->l_stat != LSONPROC) {
    321 			/*
    322 			 * About to block (or die), let it happen.
    323 			 * Doesn't really count as "preemption has
    324 			 * been blocked", since we're going to
    325 			 * context switch.
    326 			 */
    327 			l->l_dopreempt = 0;
    328 			return true;
    329 		}
    330 		if (__predict_false((l->l_flag & LW_IDLE) != 0)) {
    331 			/* Can't preempt idle loop, don't count as failure. */
    332 			l->l_dopreempt = 0;
    333 			return true;
    334 		}
    335 		if (__predict_false(l->l_nopreempt != 0)) {
    336 			/* LWP holds preemption disabled, explicitly. */
    337 			if ((dop & DOPREEMPT_COUNTED) == 0) {
    338 				kpreempt_ev_crit.ev_count++;
    339 			}
    340 			failed = (uintptr_t)&in_critical_section;
    341 			break;
    342 		}
    343 		if (__predict_false((l->l_pflag & LP_INTR) != 0)) {
    344 			/* Can't preempt soft interrupts yet. */
    345 			l->l_dopreempt = 0;
    346 			failed = (uintptr_t)&is_softint;
    347 			break;
    348 		}
    349 		s = splsched();
    350 		if (__predict_false(l->l_blcnt != 0 ||
    351 		    curcpu()->ci_biglock_wanted != NULL)) {
    352 			/* Hold or want kernel_lock, code is not MT safe. */
    353 			splx(s);
    354 			if ((dop & DOPREEMPT_COUNTED) == 0) {
    355 				kpreempt_ev_klock.ev_count++;
    356 			}
    357 			failed = (uintptr_t)&kernel_lock_held;
    358 			break;
    359 		}
    360 		if (__predict_false(!cpu_kpreempt_enter(where, s))) {
    361 			/*
    362 			 * It may be that the IPL is too high.
    363 			 * kpreempt_enter() can schedule an
    364 			 * interrupt to retry later.
    365 			 */
    366 			splx(s);
    367 			failed = (uintptr_t)&cpu_kpreempt_enter_fail;
    368 			break;
    369 		}
    370 		/* Do it! */
    371 		if (__predict_true((dop & DOPREEMPT_COUNTED) == 0)) {
    372 			kpreempt_ev_immed.ev_count++;
    373 		}
    374 		lwp_lock(l);
    375 		mi_switch(l);
    376 		l->l_nopreempt++;
    377 		splx(s);
    378 
    379 		/* Take care of any MD cleanup. */
    380 		cpu_kpreempt_exit(where);
    381 		l->l_nopreempt--;
    382 	}
    383 
    384 	if (__predict_true(!failed)) {
    385 		return false;
    386 	}
    387 
    388 	/* Record preemption failure for reporting via lockstat. */
    389 	atomic_or_uint(&l->l_dopreempt, DOPREEMPT_COUNTED);
    390 	lsflag = 0;
    391 	LOCKSTAT_ENTER(lsflag);
    392 	if (__predict_false(lsflag)) {
    393 		if (where == 0) {
    394 			where = (uintptr_t)__builtin_return_address(0);
    395 		}
    396 		/* Preemption is on, might recurse, so make it atomic. */
    397 		if (atomic_cas_ptr_ni((void *)&l->l_pfailaddr, NULL,
    398 		    (void *)where) == NULL) {
    399 			LOCKSTAT_START_TIMER(lsflag, l->l_pfailtime);
    400 			l->l_pfaillock = failed;
    401 		}
    402 	}
    403 	LOCKSTAT_EXIT(lsflag);
    404 	return true;
    405 }
    406 
    407 /*
    408  * Return true if preemption is explicitly disabled.
    409  */
    410 bool
    411 kpreempt_disabled(void)
    412 {
    413 	const lwp_t *l = curlwp;
    414 
    415 	return l->l_nopreempt != 0 || l->l_stat == LSZOMB ||
    416 	    (l->l_flag & LW_IDLE) != 0 || cpu_kpreempt_disabled();
    417 }
    418 
    419 /*
    420  * Disable kernel preemption.
    421  */
    422 void
    423 kpreempt_disable(void)
    424 {
    425 
    426 	KPREEMPT_DISABLE(curlwp);
    427 }
    428 
    429 /*
    430  * Reenable kernel preemption.
    431  */
    432 void
    433 kpreempt_enable(void)
    434 {
    435 
    436 	KPREEMPT_ENABLE(curlwp);
    437 }
    438 
    439 /*
    440  * Compute the amount of time during which the current lwp was running.
    441  *
    442  * - update l_rtime unless it's an idle lwp.
    443  */
    444 
    445 void
    446 updatertime(lwp_t *l, const struct bintime *now)
    447 {
    448 
    449 	if (__predict_false(l->l_flag & LW_IDLE))
    450 		return;
    451 
    452 	/* rtime += now - stime */
    453 	bintime_add(&l->l_rtime, now);
    454 	bintime_sub(&l->l_rtime, &l->l_stime);
    455 }
    456 
    457 /*
    458  * Select next LWP from the current CPU to run..
    459  */
    460 static inline lwp_t *
    461 nextlwp(struct cpu_info *ci, struct schedstate_percpu *spc)
    462 {
    463 	lwp_t *newl;
    464 
    465 	/*
    466 	 * Let sched_nextlwp() select the LWP to run the CPU next.
    467 	 * If no LWP is runnable, select the idle LWP.
    468 	 *
    469 	 * Note that spc_lwplock might not necessary be held, and
    470 	 * new thread would be unlocked after setting the LWP-lock.
    471 	 */
    472 	newl = sched_nextlwp();
    473 	if (newl != NULL) {
    474 		sched_dequeue(newl);
    475 		KASSERT(lwp_locked(newl, spc->spc_mutex));
    476 		KASSERT(newl->l_cpu == ci);
    477 		newl->l_stat = LSONPROC;
    478 		newl->l_pflag |= LP_RUNNING;
    479 		lwp_setlock(newl, spc->spc_lwplock);
    480 	} else {
    481 		newl = ci->ci_data.cpu_idlelwp;
    482 		newl->l_stat = LSONPROC;
    483 		newl->l_pflag |= LP_RUNNING;
    484 	}
    485 
    486 	/*
    487 	 * Only clear want_resched if there are no pending (slow)
    488 	 * software interrupts.
    489 	 */
    490 	ci->ci_want_resched = ci->ci_data.cpu_softints;
    491 	spc->spc_flags &= ~SPCF_SWITCHCLEAR;
    492 	spc->spc_curpriority = lwp_eprio(newl);
    493 
    494 	return newl;
    495 }
    496 
    497 /*
    498  * The machine independent parts of context switch.
    499  *
    500  * Returns 1 if another LWP was actually run.
    501  */
    502 int
    503 mi_switch(lwp_t *l)
    504 {
    505 	struct cpu_info *ci;
    506 	struct schedstate_percpu *spc;
    507 	struct lwp *newl;
    508 	int retval, oldspl;
    509 	struct bintime bt;
    510 	bool returning;
    511 
    512 	KASSERT(lwp_locked(l, NULL));
    513 	KASSERT(kpreempt_disabled());
    514 	LOCKDEBUG_BARRIER(l->l_mutex, 1);
    515 
    516 	kstack_check_magic(l);
    517 
    518 	binuptime(&bt);
    519 
    520 	KASSERTMSG(l == curlwp, "l %p curlwp %p", l, curlwp);
    521 	KASSERT((l->l_pflag & LP_RUNNING) != 0);
    522 	KASSERT(l->l_cpu == curcpu());
    523 	ci = l->l_cpu;
    524 	spc = &ci->ci_schedstate;
    525 	returning = false;
    526 	newl = NULL;
    527 
    528 	/*
    529 	 * If we have been asked to switch to a specific LWP, then there
    530 	 * is no need to inspect the run queues.  If a soft interrupt is
    531 	 * blocking, then return to the interrupted thread without adjusting
    532 	 * VM context or its start time: neither have been changed in order
    533 	 * to take the interrupt.
    534 	 */
    535 	if (l->l_switchto != NULL) {
    536 		if ((l->l_pflag & LP_INTR) != 0) {
    537 			returning = true;
    538 			softint_block(l);
    539 			if ((l->l_pflag & LP_TIMEINTR) != 0)
    540 				updatertime(l, &bt);
    541 		}
    542 		newl = l->l_switchto;
    543 		l->l_switchto = NULL;
    544 	}
    545 #ifndef __HAVE_FAST_SOFTINTS
    546 	else if (ci->ci_data.cpu_softints != 0) {
    547 		/* There are pending soft interrupts, so pick one. */
    548 		newl = softint_picklwp();
    549 		newl->l_stat = LSONPROC;
    550 		newl->l_pflag |= LP_RUNNING;
    551 	}
    552 #endif	/* !__HAVE_FAST_SOFTINTS */
    553 
    554 	/* Count time spent in current system call */
    555 	if (!returning) {
    556 		SYSCALL_TIME_SLEEP(l);
    557 
    558 		updatertime(l, &bt);
    559 	}
    560 
    561 	/* Lock the runqueue */
    562 	KASSERT(l->l_stat != LSRUN);
    563 	mutex_spin_enter(spc->spc_mutex);
    564 
    565 	/*
    566 	 * If on the CPU and we have gotten this far, then we must yield.
    567 	 */
    568 	if (l->l_stat == LSONPROC && l != newl) {
    569 		KASSERT(lwp_locked(l, spc->spc_lwplock));
    570 		if ((l->l_flag & LW_IDLE) == 0) {
    571 			l->l_stat = LSRUN;
    572 			lwp_setlock(l, spc->spc_mutex);
    573 			sched_enqueue(l, true);
    574 			/*
    575 			 * Handle migration.  Note that "migrating LWP" may
    576 			 * be reset here, if interrupt/preemption happens
    577 			 * early in idle LWP.
    578 			 */
    579 			if (l->l_target_cpu != NULL &&
    580 			    (l->l_pflag & LP_BOUND) == 0) {
    581 				KASSERT((l->l_pflag & LP_INTR) == 0);
    582 				spc->spc_migrating = l;
    583 			}
    584 		} else
    585 			l->l_stat = LSIDL;
    586 	}
    587 
    588 	/* Pick new LWP to run. */
    589 	if (newl == NULL) {
    590 		newl = nextlwp(ci, spc);
    591 	}
    592 
    593 	/* Items that must be updated with the CPU locked. */
    594 	if (!returning) {
    595 		/* Update the new LWP's start time. */
    596 		newl->l_stime = bt;
    597 
    598 		/*
    599 		 * ci_curlwp changes when a fast soft interrupt occurs.
    600 		 * We use cpu_onproc to keep track of which kernel or
    601 		 * user thread is running 'underneath' the software
    602 		 * interrupt.  This is important for time accounting,
    603 		 * itimers and forcing user threads to preempt (aston).
    604 		 */
    605 		ci->ci_data.cpu_onproc = newl;
    606 	}
    607 
    608 	/*
    609 	 * Preemption related tasks.  Must be done with the current
    610 	 * CPU locked.
    611 	 */
    612 	cpu_did_resched(l);
    613 	l->l_dopreempt = 0;
    614 	if (__predict_false(l->l_pfailaddr != 0)) {
    615 		LOCKSTAT_FLAG(lsflag);
    616 		LOCKSTAT_ENTER(lsflag);
    617 		LOCKSTAT_STOP_TIMER(lsflag, l->l_pfailtime);
    618 		LOCKSTAT_EVENT_RA(lsflag, l->l_pfaillock, LB_NOPREEMPT|LB_SPIN,
    619 		    1, l->l_pfailtime, l->l_pfailaddr);
    620 		LOCKSTAT_EXIT(lsflag);
    621 		l->l_pfailtime = 0;
    622 		l->l_pfaillock = 0;
    623 		l->l_pfailaddr = 0;
    624 	}
    625 
    626 	if (l != newl) {
    627 		struct lwp *prevlwp;
    628 
    629 		/* Release all locks, but leave the current LWP locked */
    630 		if (l->l_mutex == spc->spc_mutex) {
    631 			/*
    632 			 * Drop spc_lwplock, if the current LWP has been moved
    633 			 * to the run queue (it is now locked by spc_mutex).
    634 			 */
    635 			mutex_spin_exit(spc->spc_lwplock);
    636 		} else {
    637 			/*
    638 			 * Otherwise, drop the spc_mutex, we are done with the
    639 			 * run queues.
    640 			 */
    641 			mutex_spin_exit(spc->spc_mutex);
    642 		}
    643 
    644 		/*
    645 		 * Mark that context switch is going to be performed
    646 		 * for this LWP, to protect it from being switched
    647 		 * to on another CPU.
    648 		 */
    649 		KASSERT(l->l_ctxswtch == 0);
    650 		l->l_ctxswtch = 1;
    651 		l->l_ncsw++;
    652 		if ((l->l_pflag & LP_PREEMPTING) != 0)
    653 			l->l_nivcsw++;
    654 		l->l_pflag &= ~LP_PREEMPTING;
    655 		KASSERT((l->l_pflag & LP_RUNNING) != 0);
    656 		l->l_pflag &= ~LP_RUNNING;
    657 
    658 		/*
    659 		 * Increase the count of spin-mutexes before the release
    660 		 * of the last lock - we must remain at IPL_SCHED during
    661 		 * the context switch.
    662 		 */
    663 		KASSERTMSG(ci->ci_mtx_count == -1,
    664 		    "%s: cpu%u: ci_mtx_count (%d) != -1 "
    665 		    "(block with spin-mutex held)",
    666 		     __func__, cpu_index(ci), ci->ci_mtx_count);
    667 		oldspl = MUTEX_SPIN_OLDSPL(ci);
    668 		ci->ci_mtx_count--;
    669 		lwp_unlock(l);
    670 
    671 		/* Count the context switch on this CPU. */
    672 		ci->ci_data.cpu_nswtch++;
    673 
    674 		/* Update status for lwpctl, if present. */
    675 		if (l->l_lwpctl != NULL)
    676 			l->l_lwpctl->lc_curcpu = LWPCTL_CPU_NONE;
    677 
    678 		/*
    679 		 * Save old VM context, unless a soft interrupt
    680 		 * handler is blocking.
    681 		 */
    682 		if (!returning)
    683 			pmap_deactivate(l);
    684 
    685 		/*
    686 		 * We may need to spin-wait if 'newl' is still
    687 		 * context switching on another CPU.
    688 		 */
    689 		if (__predict_false(newl->l_ctxswtch != 0)) {
    690 			u_int count;
    691 			count = SPINLOCK_BACKOFF_MIN;
    692 			while (newl->l_ctxswtch)
    693 				SPINLOCK_BACKOFF(count);
    694 		}
    695 
    696 		/*
    697 		 * If DTrace has set the active vtime enum to anything
    698 		 * other than INACTIVE (0), then it should have set the
    699 		 * function to call.
    700 		 */
    701 		if (__predict_false(dtrace_vtime_active)) {
    702 			(*dtrace_vtime_switch_func)(newl);
    703 		}
    704 
    705 		/*
    706 		 * We must ensure not to come here from inside a read section.
    707 		 */
    708 		KASSERT(pserialize_not_in_read_section());
    709 
    710 		/* Switch to the new LWP.. */
    711 #ifdef MULTIPROCESSOR
    712 		KASSERT(curlwp == ci->ci_curlwp);
    713 #endif
    714 		KASSERTMSG(l == curlwp, "l %p curlwp %p", l, curlwp);
    715 		prevlwp = cpu_switchto(l, newl, returning);
    716 		ci = curcpu();
    717 #ifdef MULTIPROCESSOR
    718 		KASSERT(curlwp == ci->ci_curlwp);
    719 #endif
    720 		KASSERTMSG(l == curlwp, "l %p curlwp %p prevlwp %p",
    721 		    l, curlwp, prevlwp);
    722 
    723 		/*
    724 		 * Switched away - we have new curlwp.
    725 		 * Restore VM context and IPL.
    726 		 */
    727 		pmap_activate(l);
    728 		pcu_switchpoint(l);
    729 
    730 		if (prevlwp != NULL) {
    731 			/* Normalize the count of the spin-mutexes */
    732 			ci->ci_mtx_count++;
    733 			/* Unmark the state of context switch */
    734 			membar_exit();
    735 			prevlwp->l_ctxswtch = 0;
    736 		}
    737 
    738 		/* Update status for lwpctl, if present. */
    739 		if (l->l_lwpctl != NULL) {
    740 			l->l_lwpctl->lc_curcpu = (int)cpu_index(ci);
    741 			l->l_lwpctl->lc_pctr++;
    742 		}
    743 
    744 		/* Note trip through cpu_switchto(). */
    745 		pserialize_switchpoint();
    746 
    747 		KASSERT(l->l_cpu == ci);
    748 		splx(oldspl);
    749 		/*
    750 		 * note that, unless the caller disabled preemption,
    751 		 * we can be preempted at any time after the above splx() call.
    752 		 */
    753 		retval = 1;
    754 	} else {
    755 		/* Nothing to do - just unlock and return. */
    756 		pserialize_switchpoint();
    757 		mutex_spin_exit(spc->spc_mutex);
    758 		l->l_pflag &= ~LP_PREEMPTING;
    759 		lwp_unlock(l);
    760 		retval = 0;
    761 	}
    762 
    763 	KASSERT(l == curlwp);
    764 	KASSERT(l->l_stat == LSONPROC);
    765 
    766 	SYSCALL_TIME_WAKEUP(l);
    767 	LOCKDEBUG_BARRIER(NULL, 1);
    768 
    769 	return retval;
    770 }
    771 
    772 /*
    773  * The machine independent parts of context switch to oblivion.
    774  * Does not return.  Call with the LWP unlocked.
    775  */
    776 void
    777 lwp_exit_switchaway(lwp_t *l)
    778 {
    779 	struct cpu_info *ci;
    780 	struct lwp *newl;
    781 	struct bintime bt;
    782 
    783 	ci = l->l_cpu;
    784 
    785 	KASSERT(kpreempt_disabled());
    786 	KASSERT(l->l_stat == LSZOMB || l->l_stat == LSIDL);
    787 	KASSERT(ci == curcpu());
    788 	LOCKDEBUG_BARRIER(NULL, 0);
    789 
    790 	kstack_check_magic(l);
    791 
    792 	/* Count time spent in current system call */
    793 	SYSCALL_TIME_SLEEP(l);
    794 	binuptime(&bt);
    795 	updatertime(l, &bt);
    796 
    797 	/* Must stay at IPL_SCHED even after releasing run queue lock. */
    798 	(void)splsched();
    799 
    800 	/*
    801 	 * Let sched_nextlwp() select the LWP to run the CPU next.
    802 	 * If no LWP is runnable, select the idle LWP.
    803 	 *
    804 	 * Note that spc_lwplock might not necessary be held, and
    805 	 * new thread would be unlocked after setting the LWP-lock.
    806 	 */
    807 	spc_lock(ci);
    808 #ifndef __HAVE_FAST_SOFTINTS
    809 	if (ci->ci_data.cpu_softints != 0) {
    810 		/* There are pending soft interrupts, so pick one. */
    811 		newl = softint_picklwp();
    812 		newl->l_stat = LSONPROC;
    813 		newl->l_pflag |= LP_RUNNING;
    814 	} else
    815 #endif	/* !__HAVE_FAST_SOFTINTS */
    816 	{
    817 		newl = nextlwp(ci, &ci->ci_schedstate);
    818 	}
    819 
    820 	/* Update the new LWP's start time. */
    821 	newl->l_stime = bt;
    822 	l->l_pflag &= ~LP_RUNNING;
    823 
    824 	/*
    825 	 * ci_curlwp changes when a fast soft interrupt occurs.
    826 	 * We use cpu_onproc to keep track of which kernel or
    827 	 * user thread is running 'underneath' the software
    828 	 * interrupt.  This is important for time accounting,
    829 	 * itimers and forcing user threads to preempt (aston).
    830 	 */
    831 	ci->ci_data.cpu_onproc = newl;
    832 
    833 	/*
    834 	 * Preemption related tasks.  Must be done with the current
    835 	 * CPU locked.
    836 	 */
    837 	cpu_did_resched(l);
    838 
    839 	/* Unlock the run queue. */
    840 	spc_unlock(ci);
    841 
    842 	/* Count the context switch on this CPU. */
    843 	ci->ci_data.cpu_nswtch++;
    844 
    845 	/* Update status for lwpctl, if present. */
    846 	if (l->l_lwpctl != NULL)
    847 		l->l_lwpctl->lc_curcpu = LWPCTL_CPU_EXITED;
    848 
    849 	/*
    850 	 * We may need to spin-wait if 'newl' is still
    851 	 * context switching on another CPU.
    852 	 */
    853 	if (__predict_false(newl->l_ctxswtch != 0)) {
    854 		u_int count;
    855 		count = SPINLOCK_BACKOFF_MIN;
    856 		while (newl->l_ctxswtch)
    857 			SPINLOCK_BACKOFF(count);
    858 	}
    859 
    860 	/*
    861 	 * If DTrace has set the active vtime enum to anything
    862 	 * other than INACTIVE (0), then it should have set the
    863 	 * function to call.
    864 	 */
    865 	if (__predict_false(dtrace_vtime_active)) {
    866 		(*dtrace_vtime_switch_func)(newl);
    867 	}
    868 
    869 	/* Switch to the new LWP.. */
    870 	(void)cpu_switchto(NULL, newl, false);
    871 
    872 	for (;;) continue;	/* XXX: convince gcc about "noreturn" */
    873 	/* NOTREACHED */
    874 }
    875 
    876 /*
    877  * setrunnable: change LWP state to be runnable, placing it on the run queue.
    878  *
    879  * Call with the process and LWP locked.  Will return with the LWP unlocked.
    880  */
    881 void
    882 setrunnable(struct lwp *l)
    883 {
    884 	struct proc *p = l->l_proc;
    885 	struct cpu_info *ci;
    886 
    887 	KASSERT((l->l_flag & LW_IDLE) == 0);
    888 	KASSERT((l->l_flag & LW_DBGSUSPEND) == 0);
    889 	KASSERT(mutex_owned(p->p_lock));
    890 	KASSERT(lwp_locked(l, NULL));
    891 	KASSERT(l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex);
    892 
    893 	switch (l->l_stat) {
    894 	case LSSTOP:
    895 		/*
    896 		 * If we're being traced (possibly because someone attached us
    897 		 * while we were stopped), check for a signal from the debugger.
    898 		 */
    899 		if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xsig != 0)
    900 			signotify(l);
    901 		p->p_nrlwps++;
    902 		break;
    903 	case LSSUSPENDED:
    904 		l->l_flag &= ~LW_WSUSPEND;
    905 		p->p_nrlwps++;
    906 		cv_broadcast(&p->p_lwpcv);
    907 		break;
    908 	case LSSLEEP:
    909 		KASSERT(l->l_wchan != NULL);
    910 		break;
    911 	default:
    912 		panic("setrunnable: lwp %p state was %d", l, l->l_stat);
    913 	}
    914 
    915 	/*
    916 	 * If the LWP was sleeping, start it again.
    917 	 */
    918 	if (l->l_wchan != NULL) {
    919 		l->l_stat = LSSLEEP;
    920 		/* lwp_unsleep() will release the lock. */
    921 		lwp_unsleep(l, true);
    922 		return;
    923 	}
    924 
    925 	/*
    926 	 * If the LWP is still on the CPU, mark it as LSONPROC.  It may be
    927 	 * about to call mi_switch(), in which case it will yield.
    928 	 */
    929 	if ((l->l_pflag & LP_RUNNING) != 0) {
    930 		l->l_stat = LSONPROC;
    931 		l->l_slptime = 0;
    932 		lwp_unlock(l);
    933 		return;
    934 	}
    935 
    936 	/*
    937 	 * Look for a CPU to run.
    938 	 * Set the LWP runnable.
    939 	 */
    940 	ci = sched_takecpu(l);
    941 	l->l_cpu = ci;
    942 	spc_lock(ci);
    943 	lwp_unlock_to(l, ci->ci_schedstate.spc_mutex);
    944 	sched_setrunnable(l);
    945 	l->l_stat = LSRUN;
    946 	l->l_slptime = 0;
    947 
    948 	sched_enqueue(l, false);
    949 	resched_cpu(l);
    950 	lwp_unlock(l);
    951 }
    952 
    953 /*
    954  * suspendsched:
    955  *
    956  *	Convert all non-LW_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
    957  */
    958 void
    959 suspendsched(void)
    960 {
    961 	CPU_INFO_ITERATOR cii;
    962 	struct cpu_info *ci;
    963 	struct lwp *l;
    964 	struct proc *p;
    965 
    966 	/*
    967 	 * We do this by process in order not to violate the locking rules.
    968 	 */
    969 	mutex_enter(proc_lock);
    970 	PROCLIST_FOREACH(p, &allproc) {
    971 		mutex_enter(p->p_lock);
    972 		if ((p->p_flag & PK_SYSTEM) != 0) {
    973 			mutex_exit(p->p_lock);
    974 			continue;
    975 		}
    976 
    977 		if (p->p_stat != SSTOP) {
    978 			if (p->p_stat != SZOMB && p->p_stat != SDEAD) {
    979 				p->p_pptr->p_nstopchild++;
    980 				p->p_waited = 0;
    981 			}
    982 			p->p_stat = SSTOP;
    983 		}
    984 
    985 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    986 			if (l == curlwp)
    987 				continue;
    988 
    989 			lwp_lock(l);
    990 
    991 			/*
    992 			 * Set L_WREBOOT so that the LWP will suspend itself
    993 			 * when it tries to return to user mode.  We want to
    994 			 * try and get to get as many LWPs as possible to
    995 			 * the user / kernel boundary, so that they will
    996 			 * release any locks that they hold.
    997 			 */
    998 			l->l_flag |= (LW_WREBOOT | LW_WSUSPEND);
    999 
   1000 			if (l->l_stat == LSSLEEP &&
   1001 			    (l->l_flag & LW_SINTR) != 0) {
   1002 				/* setrunnable() will release the lock. */
   1003 				setrunnable(l);
   1004 				continue;
   1005 			}
   1006 
   1007 			lwp_unlock(l);
   1008 		}
   1009 
   1010 		mutex_exit(p->p_lock);
   1011 	}
   1012 	mutex_exit(proc_lock);
   1013 
   1014 	/*
   1015 	 * Kick all CPUs to make them preempt any LWPs running in user mode.
   1016 	 * They'll trap into the kernel and suspend themselves in userret().
   1017 	 */
   1018 	for (CPU_INFO_FOREACH(cii, ci)) {
   1019 		spc_lock(ci);
   1020 		cpu_need_resched(ci, RESCHED_IMMED);
   1021 		spc_unlock(ci);
   1022 	}
   1023 }
   1024 
   1025 /*
   1026  * sched_unsleep:
   1027  *
   1028  *	The is called when the LWP has not been awoken normally but instead
   1029  *	interrupted: for example, if the sleep timed out.  Because of this,
   1030  *	it's not a valid action for running or idle LWPs.
   1031  */
   1032 static void
   1033 sched_unsleep(struct lwp *l, bool cleanup)
   1034 {
   1035 
   1036 	lwp_unlock(l);
   1037 	panic("sched_unsleep");
   1038 }
   1039 
   1040 static void
   1041 resched_cpu(struct lwp *l)
   1042 {
   1043 	struct cpu_info *ci = l->l_cpu;
   1044 
   1045 	KASSERT(lwp_locked(l, NULL));
   1046 	if (lwp_eprio(l) > ci->ci_schedstate.spc_curpriority)
   1047 		cpu_need_resched(ci, 0);
   1048 }
   1049 
   1050 static void
   1051 sched_changepri(struct lwp *l, pri_t pri)
   1052 {
   1053 
   1054 	KASSERT(lwp_locked(l, NULL));
   1055 
   1056 	if (l->l_stat == LSRUN) {
   1057 		KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
   1058 		sched_dequeue(l);
   1059 		l->l_priority = pri;
   1060 		sched_enqueue(l, false);
   1061 	} else {
   1062 		l->l_priority = pri;
   1063 	}
   1064 	resched_cpu(l);
   1065 }
   1066 
   1067 static void
   1068 sched_lendpri(struct lwp *l, pri_t pri)
   1069 {
   1070 
   1071 	KASSERT(lwp_locked(l, NULL));
   1072 
   1073 	if (l->l_stat == LSRUN) {
   1074 		KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
   1075 		sched_dequeue(l);
   1076 		l->l_inheritedprio = pri;
   1077 		l->l_auxprio = MAX(l->l_inheritedprio, l->l_protectprio);
   1078 		sched_enqueue(l, false);
   1079 	} else {
   1080 		l->l_inheritedprio = pri;
   1081 		l->l_auxprio = MAX(l->l_inheritedprio, l->l_protectprio);
   1082 	}
   1083 	resched_cpu(l);
   1084 }
   1085 
   1086 struct lwp *
   1087 syncobj_noowner(wchan_t wchan)
   1088 {
   1089 
   1090 	return NULL;
   1091 }
   1092 
   1093 /* Decay 95% of proc::p_pctcpu in 60 seconds, ccpu = exp(-1/20) */
   1094 const fixpt_t ccpu = 0.95122942450071400909 * FSCALE;
   1095 
   1096 /*
   1097  * Constants for averages over 1, 5 and 15 minutes when sampling at
   1098  * 5 second intervals.
   1099  */
   1100 static const fixpt_t cexp[ ] = {
   1101 	0.9200444146293232 * FSCALE,	/* exp(-1/12) */
   1102 	0.9834714538216174 * FSCALE,	/* exp(-1/60) */
   1103 	0.9944598480048967 * FSCALE,	/* exp(-1/180) */
   1104 };
   1105 
   1106 /*
   1107  * sched_pstats:
   1108  *
   1109  * => Update process statistics and check CPU resource allocation.
   1110  * => Call scheduler-specific hook to eventually adjust LWP priorities.
   1111  * => Compute load average of a quantity on 1, 5 and 15 minute intervals.
   1112  */
   1113 void
   1114 sched_pstats(void)
   1115 {
   1116 	extern struct loadavg averunnable;
   1117 	struct loadavg *avg = &averunnable;
   1118 	const int clkhz = (stathz != 0 ? stathz : hz);
   1119 	static bool backwards = false;
   1120 	static u_int lavg_count = 0;
   1121 	struct proc *p;
   1122 	int nrun;
   1123 
   1124 	sched_pstats_ticks++;
   1125 	if (++lavg_count >= 5) {
   1126 		lavg_count = 0;
   1127 		nrun = 0;
   1128 	}
   1129 	mutex_enter(proc_lock);
   1130 	PROCLIST_FOREACH(p, &allproc) {
   1131 		struct lwp *l;
   1132 		struct rlimit *rlim;
   1133 		time_t runtm;
   1134 		int sig;
   1135 
   1136 		/* Increment sleep time (if sleeping), ignore overflow. */
   1137 		mutex_enter(p->p_lock);
   1138 		runtm = p->p_rtime.sec;
   1139 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
   1140 			fixpt_t lpctcpu;
   1141 			u_int lcpticks;
   1142 
   1143 			if (__predict_false((l->l_flag & LW_IDLE) != 0))
   1144 				continue;
   1145 			lwp_lock(l);
   1146 			runtm += l->l_rtime.sec;
   1147 			l->l_swtime++;
   1148 			sched_lwp_stats(l);
   1149 
   1150 			/* For load average calculation. */
   1151 			if (__predict_false(lavg_count == 0) &&
   1152 			    (l->l_flag & (LW_SINTR | LW_SYSTEM)) == 0) {
   1153 				switch (l->l_stat) {
   1154 				case LSSLEEP:
   1155 					if (l->l_slptime > 1) {
   1156 						break;
   1157 					}
   1158 					/* FALLTHROUGH */
   1159 				case LSRUN:
   1160 				case LSONPROC:
   1161 				case LSIDL:
   1162 					nrun++;
   1163 				}
   1164 			}
   1165 			lwp_unlock(l);
   1166 
   1167 			l->l_pctcpu = (l->l_pctcpu * ccpu) >> FSHIFT;
   1168 			if (l->l_slptime != 0)
   1169 				continue;
   1170 
   1171 			lpctcpu = l->l_pctcpu;
   1172 			lcpticks = atomic_swap_uint(&l->l_cpticks, 0);
   1173 			lpctcpu += ((FSCALE - ccpu) *
   1174 			    (lcpticks * FSCALE / clkhz)) >> FSHIFT;
   1175 			l->l_pctcpu = lpctcpu;
   1176 		}
   1177 		/* Calculating p_pctcpu only for ps(1) */
   1178 		p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
   1179 
   1180 		if (__predict_false(runtm < 0)) {
   1181 			if (!backwards) {
   1182 				backwards = true;
   1183 				printf("WARNING: negative runtime; "
   1184 				    "monotonic clock has gone backwards\n");
   1185 			}
   1186 			mutex_exit(p->p_lock);
   1187 			continue;
   1188 		}
   1189 
   1190 		/*
   1191 		 * Check if the process exceeds its CPU resource allocation.
   1192 		 * If over the hard limit, kill it with SIGKILL.
   1193 		 * If over the soft limit, send SIGXCPU and raise
   1194 		 * the soft limit a little.
   1195 		 */
   1196 		rlim = &p->p_rlimit[RLIMIT_CPU];
   1197 		sig = 0;
   1198 		if (__predict_false(runtm >= rlim->rlim_cur)) {
   1199 			if (runtm >= rlim->rlim_max) {
   1200 				sig = SIGKILL;
   1201 				log(LOG_NOTICE,
   1202 				    "pid %d, command %s, is killed: %s\n",
   1203 				    p->p_pid, p->p_comm, "exceeded RLIMIT_CPU");
   1204 				uprintf("pid %d, command %s, is killed: %s\n",
   1205 				    p->p_pid, p->p_comm, "exceeded RLIMIT_CPU");
   1206 			} else {
   1207 				sig = SIGXCPU;
   1208 				if (rlim->rlim_cur < rlim->rlim_max)
   1209 					rlim->rlim_cur += 5;
   1210 			}
   1211 		}
   1212 		mutex_exit(p->p_lock);
   1213 		if (__predict_false(sig)) {
   1214 			KASSERT((p->p_flag & PK_SYSTEM) == 0);
   1215 			psignal(p, sig);
   1216 		}
   1217 	}
   1218 	mutex_exit(proc_lock);
   1219 
   1220 	/* Load average calculation. */
   1221 	if (__predict_false(lavg_count == 0)) {
   1222 		int i;
   1223 		CTASSERT(__arraycount(cexp) == __arraycount(avg->ldavg));
   1224 		for (i = 0; i < __arraycount(cexp); i++) {
   1225 			avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
   1226 			    nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
   1227 		}
   1228 	}
   1229 
   1230 	/* Lightning bolt. */
   1231 	cv_broadcast(&lbolt);
   1232 }
   1233