Home | History | Annotate | Line # | Download | only in kern
kern_synch.c revision 1.186.2.19
      1  1.186.2.19        ad /*	$NetBSD: kern_synch.c,v 1.186.2.19 2007/10/18 15:47:33 ad Exp $	*/
      2        1.63   thorpej 
      3        1.63   thorpej /*-
      4       1.174        ad  * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
      5        1.63   thorpej  * All rights reserved.
      6        1.63   thorpej  *
      7        1.63   thorpej  * This code is derived from software contributed to The NetBSD Foundation
      8        1.63   thorpej  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9   1.186.2.6        ad  * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and
     10   1.186.2.6        ad  * Daniel Sieger.
     11        1.63   thorpej  *
     12        1.63   thorpej  * Redistribution and use in source and binary forms, with or without
     13        1.63   thorpej  * modification, are permitted provided that the following conditions
     14        1.63   thorpej  * are met:
     15        1.63   thorpej  * 1. Redistributions of source code must retain the above copyright
     16        1.63   thorpej  *    notice, this list of conditions and the following disclaimer.
     17        1.63   thorpej  * 2. Redistributions in binary form must reproduce the above copyright
     18        1.63   thorpej  *    notice, this list of conditions and the following disclaimer in the
     19        1.63   thorpej  *    documentation and/or other materials provided with the distribution.
     20        1.63   thorpej  * 3. All advertising materials mentioning features or use of this software
     21        1.63   thorpej  *    must display the following acknowledgement:
     22        1.63   thorpej  *	This product includes software developed by the NetBSD
     23        1.63   thorpej  *	Foundation, Inc. and its contributors.
     24        1.63   thorpej  * 4. Neither the name of The NetBSD Foundation nor the names of its
     25        1.63   thorpej  *    contributors may be used to endorse or promote products derived
     26        1.63   thorpej  *    from this software without specific prior written permission.
     27        1.63   thorpej  *
     28        1.63   thorpej  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     29        1.63   thorpej  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     30        1.63   thorpej  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     31        1.63   thorpej  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     32        1.63   thorpej  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     33        1.63   thorpej  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     34        1.63   thorpej  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     35        1.63   thorpej  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     36        1.63   thorpej  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     37        1.63   thorpej  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     38        1.63   thorpej  * POSSIBILITY OF SUCH DAMAGE.
     39        1.63   thorpej  */
     40        1.26       cgd 
     41        1.26       cgd /*-
     42        1.26       cgd  * Copyright (c) 1982, 1986, 1990, 1991, 1993
     43        1.26       cgd  *	The Regents of the University of California.  All rights reserved.
     44        1.26       cgd  * (c) UNIX System Laboratories, Inc.
     45        1.26       cgd  * All or some portions of this file are derived from material licensed
     46        1.26       cgd  * to the University of California by American Telephone and Telegraph
     47        1.26       cgd  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     48        1.26       cgd  * the permission of UNIX System Laboratories, Inc.
     49        1.26       cgd  *
     50        1.26       cgd  * Redistribution and use in source and binary forms, with or without
     51        1.26       cgd  * modification, are permitted provided that the following conditions
     52        1.26       cgd  * are met:
     53        1.26       cgd  * 1. Redistributions of source code must retain the above copyright
     54        1.26       cgd  *    notice, this list of conditions and the following disclaimer.
     55        1.26       cgd  * 2. Redistributions in binary form must reproduce the above copyright
     56        1.26       cgd  *    notice, this list of conditions and the following disclaimer in the
     57        1.26       cgd  *    documentation and/or other materials provided with the distribution.
     58       1.136       agc  * 3. Neither the name of the University nor the names of its contributors
     59        1.26       cgd  *    may be used to endorse or promote products derived from this software
     60        1.26       cgd  *    without specific prior written permission.
     61        1.26       cgd  *
     62        1.26       cgd  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     63        1.26       cgd  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     64        1.26       cgd  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     65        1.26       cgd  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     66        1.26       cgd  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     67        1.26       cgd  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     68        1.26       cgd  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     69        1.26       cgd  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     70        1.26       cgd  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     71        1.26       cgd  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     72        1.26       cgd  * SUCH DAMAGE.
     73        1.26       cgd  *
     74        1.50      fvdl  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
     75        1.26       cgd  */
     76       1.106     lukem 
     77       1.106     lukem #include <sys/cdefs.h>
     78  1.186.2.19        ad __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.186.2.19 2007/10/18 15:47:33 ad Exp $");
     79        1.48       mrg 
     80       1.109      yamt #include "opt_kstack.h"
     81        1.82   thorpej #include "opt_lockdebug.h"
     82        1.83   thorpej #include "opt_multiprocessor.h"
     83       1.110    briggs #include "opt_perfctrs.h"
     84        1.26       cgd 
     85       1.174        ad #define	__MUTEX_PRIVATE
     86       1.174        ad 
     87        1.26       cgd #include <sys/param.h>
     88        1.26       cgd #include <sys/systm.h>
     89        1.26       cgd #include <sys/proc.h>
     90        1.26       cgd #include <sys/kernel.h>
     91       1.111    briggs #if defined(PERFCTRS)
     92       1.110    briggs #include <sys/pmc.h>
     93       1.111    briggs #endif
     94   1.186.2.6        ad #include <sys/cpu.h>
     95        1.26       cgd #include <sys/resourcevar.h>
     96        1.55      ross #include <sys/sched.h>
     97       1.179       dsl #include <sys/syscall_stats.h>
     98       1.174        ad #include <sys/sleepq.h>
     99       1.174        ad #include <sys/lockdebug.h>
    100   1.186.2.9        ad #include <sys/evcnt.h>
    101  1.186.2.10        ad #include <sys/intr.h>
    102        1.47       mrg 
    103        1.47       mrg #include <uvm/uvm_extern.h>
    104        1.47       mrg 
    105   1.186.2.9        ad callout_t sched_pstats_ch;
    106   1.186.2.6        ad unsigned int sched_pstats_ticks;
    107        1.34  christos 
    108   1.186.2.6        ad kcondvar_t	lbolt;			/* once a second sleep address */
    109        1.26       cgd 
    110   1.186.2.6        ad static void	sched_unsleep(struct lwp *);
    111   1.186.2.6        ad static void	sched_changepri(struct lwp *, pri_t);
    112   1.186.2.6        ad static void	sched_lendpri(struct lwp *, pri_t);
    113       1.122   thorpej 
    114       1.174        ad syncobj_t sleep_syncobj = {
    115       1.174        ad 	SOBJ_SLEEPQ_SORTED,
    116       1.174        ad 	sleepq_unsleep,
    117       1.184      yamt 	sleepq_changepri,
    118       1.184      yamt 	sleepq_lendpri,
    119       1.184      yamt 	syncobj_noowner,
    120       1.174        ad };
    121       1.174        ad 
    122       1.174        ad syncobj_t sched_syncobj = {
    123       1.174        ad 	SOBJ_SLEEPQ_SORTED,
    124       1.174        ad 	sched_unsleep,
    125       1.184      yamt 	sched_changepri,
    126       1.184      yamt 	sched_lendpri,
    127       1.184      yamt 	syncobj_noowner,
    128       1.174        ad };
    129       1.122   thorpej 
    130        1.26       cgd /*
    131       1.174        ad  * During autoconfiguration or after a panic, a sleep will simply lower the
    132       1.174        ad  * priority briefly to allow interrupts, then return.  The priority to be
    133       1.174        ad  * used (safepri) is machine-dependent, thus this value is initialized and
    134       1.174        ad  * maintained in the machine-dependent layers.  This priority will typically
    135       1.174        ad  * be 0, or the lowest priority that is safe for use on the interrupt stack;
    136       1.174        ad  * it can be made higher to block network software interrupts after panics.
    137        1.26       cgd  */
    138       1.174        ad int	safepri;
    139        1.26       cgd 
    140        1.26       cgd /*
    141       1.174        ad  * OBSOLETE INTERFACE
    142       1.174        ad  *
    143        1.26       cgd  * General sleep call.  Suspends the current process until a wakeup is
    144        1.26       cgd  * performed on the specified identifier.  The process will then be made
    145       1.174        ad  * runnable with the specified priority.  Sleeps at most timo/hz seconds (0
    146       1.174        ad  * means no timeout).  If pri includes PCATCH flag, signals are checked
    147        1.26       cgd  * before and after sleeping, else signals are not checked.  Returns 0 if
    148        1.26       cgd  * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
    149        1.26       cgd  * signal needs to be delivered, ERESTART is returned if the current system
    150        1.26       cgd  * call should be restarted if possible, and EINTR is returned if the system
    151        1.26       cgd  * call should be interrupted by the signal (return EINTR).
    152        1.77   thorpej  *
    153       1.174        ad  * The interlock is held until we are on a sleep queue. The interlock will
    154       1.174        ad  * be locked before returning back to the caller unless the PNORELOCK flag
    155       1.174        ad  * is specified, in which case the interlock will always be unlocked upon
    156       1.174        ad  * return.
    157        1.26       cgd  */
    158        1.26       cgd int
    159       1.185      yamt ltsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
    160       1.174        ad 	volatile struct simplelock *interlock)
    161        1.26       cgd {
    162       1.122   thorpej 	struct lwp *l = curlwp;
    163       1.174        ad 	sleepq_t *sq;
    164   1.186.2.5        ad 	int error;
    165        1.26       cgd 
    166  1.186.2.19        ad 	KASSERT((l->l_pflag & LP_INTR) == 0);
    167  1.186.2.19        ad 
    168       1.174        ad 	if (sleepq_dontsleep(l)) {
    169       1.174        ad 		(void)sleepq_abort(NULL, 0);
    170       1.174        ad 		if ((priority & PNORELOCK) != 0)
    171        1.77   thorpej 			simple_unlock(interlock);
    172       1.174        ad 		return 0;
    173        1.26       cgd 	}
    174        1.78  sommerfe 
    175       1.174        ad 	sq = sleeptab_lookup(&sleeptab, ident);
    176       1.174        ad 	sleepq_enter(sq, l);
    177   1.186.2.8        ad 	sleepq_enqueue(sq, sched_kpri(l), ident, wmesg, &sleep_syncobj);
    178   1.186.2.6        ad 
    179   1.186.2.6        ad 	if (interlock != NULL) {
    180   1.186.2.6        ad 		KASSERT(simple_lock_held(interlock));
    181       1.174        ad 		simple_unlock(interlock);
    182   1.186.2.6        ad 	}
    183   1.186.2.6        ad 
    184   1.186.2.5        ad 	error = sleepq_block(timo, priority & PCATCH);
    185       1.126        pk 
    186       1.174        ad 	if (interlock != NULL && (priority & PNORELOCK) == 0)
    187       1.126        pk 		simple_lock(interlock);
    188       1.174        ad 
    189       1.174        ad 	return error;
    190        1.26       cgd }
    191        1.26       cgd 
    192   1.186.2.1        ad int
    193   1.186.2.1        ad mtsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
    194   1.186.2.1        ad 	kmutex_t *mtx)
    195   1.186.2.1        ad {
    196   1.186.2.1        ad 	struct lwp *l = curlwp;
    197   1.186.2.1        ad 	sleepq_t *sq;
    198   1.186.2.5        ad 	int error;
    199   1.186.2.1        ad 
    200  1.186.2.19        ad 	KASSERT((l->l_pflag & LP_INTR) == 0);
    201  1.186.2.19        ad 
    202   1.186.2.1        ad 	if (sleepq_dontsleep(l)) {
    203   1.186.2.1        ad 		(void)sleepq_abort(mtx, (priority & PNORELOCK) != 0);
    204   1.186.2.1        ad 		return 0;
    205   1.186.2.1        ad 	}
    206   1.186.2.1        ad 
    207   1.186.2.1        ad 	sq = sleeptab_lookup(&sleeptab, ident);
    208   1.186.2.1        ad 	sleepq_enter(sq, l);
    209   1.186.2.8        ad 	sleepq_enqueue(sq, sched_kpri(l), ident, wmesg, &sleep_syncobj);
    210   1.186.2.1        ad 	mutex_exit(mtx);
    211   1.186.2.5        ad 	error = sleepq_block(timo, priority & PCATCH);
    212   1.186.2.1        ad 
    213   1.186.2.1        ad 	if ((priority & PNORELOCK) == 0)
    214   1.186.2.1        ad 		mutex_enter(mtx);
    215   1.186.2.1        ad 
    216   1.186.2.1        ad 	return error;
    217   1.186.2.1        ad }
    218   1.186.2.1        ad 
    219        1.26       cgd /*
    220       1.174        ad  * General sleep call for situations where a wake-up is not expected.
    221        1.26       cgd  */
    222       1.174        ad int
    223       1.182   thorpej kpause(const char *wmesg, bool intr, int timo, kmutex_t *mtx)
    224        1.26       cgd {
    225       1.174        ad 	struct lwp *l = curlwp;
    226       1.174        ad 	sleepq_t *sq;
    227       1.174        ad 	int error;
    228        1.26       cgd 
    229       1.174        ad 	if (sleepq_dontsleep(l))
    230       1.174        ad 		return sleepq_abort(NULL, 0);
    231        1.26       cgd 
    232       1.174        ad 	if (mtx != NULL)
    233       1.174        ad 		mutex_exit(mtx);
    234       1.174        ad 	sq = sleeptab_lookup(&sleeptab, l);
    235       1.174        ad 	sleepq_enter(sq, l);
    236   1.186.2.5        ad 	sleepq_enqueue(sq, sched_kpri(l), l, wmesg, &sleep_syncobj);
    237   1.186.2.5        ad 	error = sleepq_block(timo, intr);
    238       1.174        ad 	if (mtx != NULL)
    239       1.174        ad 		mutex_enter(mtx);
    240        1.83   thorpej 
    241       1.174        ad 	return error;
    242       1.139        cl }
    243       1.139        cl 
    244        1.26       cgd /*
    245       1.174        ad  * OBSOLETE INTERFACE
    246       1.174        ad  *
    247        1.26       cgd  * Make all processes sleeping on the specified identifier runnable.
    248        1.26       cgd  */
    249        1.26       cgd void
    250       1.174        ad wakeup(wchan_t ident)
    251        1.26       cgd {
    252       1.174        ad 	sleepq_t *sq;
    253        1.83   thorpej 
    254       1.174        ad 	if (cold)
    255       1.174        ad 		return;
    256        1.83   thorpej 
    257       1.174        ad 	sq = sleeptab_lookup(&sleeptab, ident);
    258       1.174        ad 	sleepq_wake(sq, ident, (u_int)-1);
    259        1.63   thorpej }
    260        1.63   thorpej 
    261        1.63   thorpej /*
    262       1.174        ad  * OBSOLETE INTERFACE
    263       1.174        ad  *
    264        1.63   thorpej  * Make the highest priority process first in line on the specified
    265        1.63   thorpej  * identifier runnable.
    266        1.63   thorpej  */
    267       1.174        ad void
    268       1.174        ad wakeup_one(wchan_t ident)
    269        1.63   thorpej {
    270       1.174        ad 	sleepq_t *sq;
    271        1.63   thorpej 
    272       1.174        ad 	if (cold)
    273       1.174        ad 		return;
    274   1.186.2.6        ad 
    275       1.174        ad 	sq = sleeptab_lookup(&sleeptab, ident);
    276       1.174        ad 	sleepq_wake(sq, ident, 1);
    277       1.174        ad }
    278        1.63   thorpej 
    279       1.117  gmcgarry 
    280       1.117  gmcgarry /*
    281       1.117  gmcgarry  * General yield call.  Puts the current process back on its run queue and
    282       1.117  gmcgarry  * performs a voluntary context switch.  Should only be called when the
    283  1.186.2.17        ad  * current process explicitly requests it (eg sched_yield(2)).
    284       1.117  gmcgarry  */
    285       1.117  gmcgarry void
    286       1.117  gmcgarry yield(void)
    287       1.117  gmcgarry {
    288       1.122   thorpej 	struct lwp *l = curlwp;
    289       1.117  gmcgarry 
    290       1.174        ad 	KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
    291       1.174        ad 	lwp_lock(l);
    292   1.186.2.6        ad 	KASSERT(lwp_locked(l, &l->l_cpu->ci_schedstate.spc_lwplock));
    293   1.186.2.6        ad 	KASSERT(l->l_stat == LSONPROC);
    294  1.186.2.17        ad 	/* XXX Only do this for timeshared threads. */
    295  1.186.2.17        ad 	l->l_priority = 0;
    296   1.186.2.6        ad 	(void)mi_switch(l);
    297       1.174        ad 	KERNEL_LOCK(l->l_biglocks, l);
    298        1.69   thorpej }
    299        1.69   thorpej 
    300        1.69   thorpej /*
    301        1.69   thorpej  * General preemption call.  Puts the current process back on its run queue
    302       1.156    rpaulo  * and performs an involuntary context switch.
    303        1.69   thorpej  */
    304        1.69   thorpej void
    305       1.174        ad preempt(void)
    306        1.69   thorpej {
    307       1.122   thorpej 	struct lwp *l = curlwp;
    308        1.69   thorpej 
    309       1.174        ad 	KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
    310       1.174        ad 	lwp_lock(l);
    311   1.186.2.6        ad 	KASSERT(lwp_locked(l, &l->l_cpu->ci_schedstate.spc_lwplock));
    312   1.186.2.6        ad 	KASSERT(l->l_stat == LSONPROC);
    313   1.186.2.6        ad 	l->l_priority = l->l_usrpri;
    314       1.174        ad 	l->l_nivcsw++;
    315   1.186.2.6        ad 	(void)mi_switch(l);
    316       1.174        ad 	KERNEL_LOCK(l->l_biglocks, l);
    317        1.69   thorpej }
    318        1.69   thorpej 
    319        1.69   thorpej /*
    320   1.186.2.6        ad  * Compute the amount of time during which the current lwp was running.
    321       1.130   nathanw  *
    322   1.186.2.6        ad  * - update l_rtime unless it's an idle lwp.
    323   1.186.2.6        ad  */
    324   1.186.2.6        ad 
    325  1.186.2.11        ad void
    326  1.186.2.14      yamt updatertime(lwp_t *l, const struct timeval *tv)
    327   1.186.2.6        ad {
    328   1.186.2.6        ad 	long s, u;
    329   1.186.2.6        ad 
    330  1.186.2.11        ad 	if ((l->l_flag & LW_IDLE) != 0)
    331   1.186.2.6        ad 		return;
    332   1.186.2.6        ad 
    333  1.186.2.11        ad 	u = l->l_rtime.tv_usec + (tv->tv_usec - l->l_stime.tv_usec);
    334  1.186.2.11        ad 	s = l->l_rtime.tv_sec + (tv->tv_sec - l->l_stime.tv_sec);
    335   1.186.2.6        ad 	if (u < 0) {
    336   1.186.2.6        ad 		u += 1000000;
    337   1.186.2.6        ad 		s--;
    338   1.186.2.6        ad 	} else if (u >= 1000000) {
    339   1.186.2.6        ad 		u -= 1000000;
    340   1.186.2.6        ad 		s++;
    341   1.186.2.6        ad 	}
    342   1.186.2.6        ad 	l->l_rtime.tv_usec = u;
    343   1.186.2.6        ad 	l->l_rtime.tv_sec = s;
    344   1.186.2.6        ad }
    345   1.186.2.6        ad 
    346   1.186.2.6        ad /*
    347   1.186.2.6        ad  * The machine independent parts of context switch.
    348   1.186.2.6        ad  *
    349   1.186.2.6        ad  * Returns 1 if another LWP was actually run.
    350        1.26       cgd  */
    351       1.122   thorpej int
    352  1.186.2.11        ad mi_switch(lwp_t *l)
    353        1.26       cgd {
    354        1.76   thorpej 	struct schedstate_percpu *spc;
    355   1.186.2.6        ad 	struct lwp *newl;
    356       1.174        ad 	int retval, oldspl;
    357  1.186.2.17        ad 	struct cpu_info *ci;
    358  1.186.2.11        ad 	struct timeval tv;
    359   1.186.2.8        ad 	bool returning;
    360        1.26       cgd 
    361   1.186.2.3        ad 	KASSERT(lwp_locked(l, NULL));
    362   1.186.2.6        ad 	LOCKDEBUG_BARRIER(l->l_mutex, 1);
    363       1.174        ad 
    364       1.174        ad #ifdef KSTACK_CHECK_MAGIC
    365       1.174        ad 	kstack_check_magic(l);
    366       1.174        ad #endif
    367        1.83   thorpej 
    368  1.186.2.11        ad 	microtime(&tv);
    369  1.186.2.11        ad 
    370        1.90  sommerfe 	/*
    371       1.174        ad 	 * It's safe to read the per CPU schedstate unlocked here, as all we
    372       1.174        ad 	 * are after is the run time and that's guarenteed to have been last
    373       1.174        ad 	 * updated by this CPU.
    374        1.90  sommerfe 	 */
    375  1.186.2.17        ad 	ci = l->l_cpu;
    376  1.186.2.17        ad 	KDASSERT(ci == curcpu());
    377        1.26       cgd 
    378       1.113  gmcgarry 	/*
    379   1.186.2.6        ad 	 * Process is about to yield the CPU; clear the appropriate
    380   1.186.2.6        ad 	 * scheduling flags.
    381       1.113  gmcgarry 	 */
    382  1.186.2.17        ad 	spc = &ci->ci_schedstate;
    383  1.186.2.10        ad 	returning = false;
    384  1.186.2.10        ad 	newl = NULL;
    385   1.186.2.9        ad 
    386  1.186.2.11        ad 	/*
    387  1.186.2.11        ad 	 * If we have been asked to switch to a specific LWP, then there
    388  1.186.2.11        ad 	 * is no need to inspect the run queues.  If a soft interrupt is
    389  1.186.2.11        ad 	 * blocking, then return to the interrupted thread without adjusting
    390  1.186.2.11        ad 	 * VM context or its start time: neither have been changed in order
    391  1.186.2.11        ad 	 * to take the interrupt.
    392  1.186.2.11        ad 	 */
    393  1.186.2.10        ad 	if (l->l_switchto != NULL) {
    394  1.186.2.19        ad 		if ((l->l_pflag & LP_INTR) != 0) {
    395  1.186.2.10        ad 			returning = true;
    396  1.186.2.16      yamt 			softint_block(l);
    397  1.186.2.11        ad 			if ((l->l_flag & LW_TIMEINTR) != 0)
    398  1.186.2.11        ad 				updatertime(l, &tv);
    399  1.186.2.10        ad 		}
    400  1.186.2.10        ad 		newl = l->l_switchto;
    401  1.186.2.10        ad 		l->l_switchto = NULL;
    402  1.186.2.10        ad 	}
    403  1.186.2.19        ad #ifndef __HAVE_FAST_SOFTINTS
    404  1.186.2.19        ad 	else if (ci->ci_data.cpu_softints != 0) {
    405  1.186.2.19        ad 		/* There are pending soft interrupts, so pick one. */
    406  1.186.2.19        ad 		newl = softint_picklwp();
    407  1.186.2.19        ad 		newl->l_stat = LSONPROC;
    408  1.186.2.19        ad 		newl->l_flag |= LW_RUNNING;
    409  1.186.2.19        ad 	}
    410  1.186.2.19        ad #endif	/* !__HAVE_FAST_SOFTINTS */
    411   1.186.2.8        ad 
    412  1.186.2.17        ad 	/* Count time spent in current system call */
    413  1.186.2.10        ad 	if (!returning) {
    414   1.186.2.8        ad 		SYSCALL_TIME_SLEEP(l);
    415   1.186.2.8        ad 
    416   1.186.2.8        ad 		/*
    417   1.186.2.8        ad 		 * XXXSMP If we are using h/w performance counters,
    418   1.186.2.8        ad 		 * save context.
    419   1.186.2.8        ad 		 */
    420   1.186.2.8        ad #if PERFCTRS
    421   1.186.2.8        ad 		if (PMC_ENABLED(l->l_proc)) {
    422   1.186.2.8        ad 			pmc_save_context(l->l_proc);
    423   1.186.2.8        ad 		}
    424   1.186.2.8        ad #endif
    425  1.186.2.11        ad 		updatertime(l, &tv);
    426   1.186.2.8        ad 	}
    427       1.113  gmcgarry 
    428       1.113  gmcgarry 	/*
    429       1.174        ad 	 * If on the CPU and we have gotten this far, then we must yield.
    430       1.113  gmcgarry 	 */
    431   1.186.2.6        ad 	mutex_spin_enter(spc->spc_mutex);
    432       1.174        ad 	KASSERT(l->l_stat != LSRUN);
    433  1.186.2.19        ad 	if (l->l_stat == LSONPROC && l != newl) {
    434   1.186.2.6        ad 		KASSERT(lwp_locked(l, &spc->spc_lwplock));
    435   1.186.2.6        ad 		if ((l->l_flag & LW_IDLE) == 0) {
    436   1.186.2.6        ad 			l->l_stat = LSRUN;
    437   1.186.2.6        ad 			lwp_setlock(l, spc->spc_mutex);
    438   1.186.2.6        ad 			sched_enqueue(l, true);
    439   1.186.2.6        ad 		} else
    440   1.186.2.6        ad 			l->l_stat = LSIDL;
    441       1.174        ad 	}
    442       1.174        ad 
    443       1.174        ad 	/*
    444   1.186.2.6        ad 	 * Let sched_nextlwp() select the LWP to run the CPU next.
    445   1.186.2.6        ad 	 * If no LWP is runnable, switch to the idle LWP.
    446       1.174        ad 	 */
    447  1.186.2.10        ad 	if (newl == NULL) {
    448   1.186.2.8        ad 		newl = sched_nextlwp();
    449  1.186.2.10        ad 		if (newl != NULL) {
    450   1.186.2.8        ad 			sched_dequeue(newl);
    451   1.186.2.8        ad 			KASSERT(lwp_locked(newl, spc->spc_mutex));
    452   1.186.2.8        ad 			newl->l_stat = LSONPROC;
    453  1.186.2.17        ad 			newl->l_cpu = ci;
    454   1.186.2.8        ad 			newl->l_flag |= LW_RUNNING;
    455   1.186.2.8        ad 			lwp_setlock(newl, &spc->spc_lwplock);
    456   1.186.2.8        ad 		} else {
    457  1.186.2.17        ad 			newl = ci->ci_data.cpu_idlelwp;
    458   1.186.2.8        ad 			newl->l_stat = LSONPROC;
    459   1.186.2.8        ad 			newl->l_flag |= LW_RUNNING;
    460   1.186.2.8        ad 		}
    461  1.186.2.19        ad 		/*
    462  1.186.2.19        ad 		 * Only clear want_resched if there are no
    463  1.186.2.19        ad 		 * pending (slow) software interrupts.
    464  1.186.2.19        ad 		 */
    465  1.186.2.19        ad 		ci->ci_want_resched = ci->ci_data.cpu_softints;
    466  1.186.2.12        ad 		spc->spc_flags &= ~SPCF_SWITCHCLEAR;
    467   1.186.2.6        ad 	}
    468       1.174        ad 
    469  1.186.2.11        ad 	/* Update the new LWP's start time while it is still locked. */
    470  1.186.2.17        ad 	if (!returning) {
    471  1.186.2.11        ad 		newl->l_stime = tv;
    472  1.186.2.17        ad 		/*
    473  1.186.2.17        ad 		 * XXX The following may be done unlocked if newl != NULL
    474  1.186.2.17        ad 		 * above.
    475  1.186.2.17        ad 		 */
    476  1.186.2.17        ad 		newl->l_priority = newl->l_usrpri;
    477  1.186.2.17        ad 	}
    478  1.186.2.17        ad 
    479  1.186.2.17        ad 	spc->spc_curpriority = newl->l_usrpri;
    480  1.186.2.11        ad 
    481   1.186.2.6        ad 	if (l != newl) {
    482   1.186.2.6        ad 		struct lwp *prevlwp;
    483       1.174        ad 
    484   1.186.2.6        ad 		/*
    485   1.186.2.6        ad 		 * If the old LWP has been moved to a run queue above,
    486   1.186.2.6        ad 		 * drop the general purpose LWP lock: it's now locked
    487   1.186.2.6        ad 		 * by the scheduler lock.
    488   1.186.2.6        ad 		 *
    489   1.186.2.6        ad 		 * Otherwise, drop the scheduler lock.  We're done with
    490   1.186.2.6        ad 		 * the run queues for now.
    491   1.186.2.6        ad 		 */
    492   1.186.2.6        ad 		if (l->l_mutex == spc->spc_mutex) {
    493   1.186.2.6        ad 			mutex_spin_exit(&spc->spc_lwplock);
    494   1.186.2.6        ad 		} else {
    495   1.186.2.6        ad 			mutex_spin_exit(spc->spc_mutex);
    496   1.186.2.6        ad 		}
    497   1.186.2.6        ad 
    498   1.186.2.6        ad 		/* Unlocked, but for statistics only. */
    499   1.186.2.6        ad 		uvmexp.swtch++;
    500   1.186.2.6        ad 
    501   1.186.2.8        ad 		/*
    502   1.186.2.8        ad 		 * Save old VM context, unless a soft interrupt
    503   1.186.2.8        ad 		 * handler is blocking.
    504   1.186.2.8        ad 		 */
    505   1.186.2.8        ad 		if (!returning)
    506   1.186.2.8        ad 			pmap_deactivate(l);
    507   1.186.2.6        ad 
    508   1.186.2.6        ad 		/* Switch to the new LWP.. */
    509   1.186.2.6        ad 		l->l_ncsw++;
    510   1.186.2.6        ad 		l->l_flag &= ~LW_RUNNING;
    511  1.186.2.17        ad 		oldspl = MUTEX_SPIN_OLDSPL(ci);
    512   1.186.2.8        ad 		prevlwp = cpu_switchto(l, newl, returning);
    513       1.174        ad 
    514   1.186.2.6        ad 		/*
    515   1.186.2.6        ad 		 * .. we have switched away and are now back so we must
    516   1.186.2.6        ad 		 * be the new curlwp.  prevlwp is who we replaced.
    517   1.186.2.6        ad 		 */
    518   1.186.2.6        ad 		if (prevlwp != NULL) {
    519   1.186.2.6        ad 			curcpu()->ci_mtx_oldspl = oldspl;
    520   1.186.2.6        ad 			lwp_unlock(prevlwp);
    521   1.186.2.6        ad 		} else {
    522   1.186.2.6        ad 			splx(oldspl);
    523   1.186.2.6        ad 		}
    524   1.186.2.6        ad 
    525   1.186.2.6        ad 		/* Restore VM context. */
    526   1.186.2.6        ad 		pmap_activate(l);
    527   1.186.2.6        ad 		retval = 1;
    528   1.186.2.6        ad 	} else {
    529   1.186.2.6        ad 		/* Nothing to do - just unlock and return. */
    530   1.186.2.6        ad 		mutex_spin_exit(spc->spc_mutex);
    531   1.186.2.6        ad 		lwp_unlock(l);
    532       1.122   thorpej 		retval = 0;
    533       1.122   thorpej 	}
    534       1.110    briggs 
    535   1.186.2.6        ad 	KASSERT(l == curlwp);
    536   1.186.2.6        ad 	KASSERT(l->l_stat == LSONPROC);
    537  1.186.2.11        ad 	KASSERT(l->l_cpu == curcpu());
    538   1.186.2.6        ad 
    539       1.110    briggs 	/*
    540       1.174        ad 	 * XXXSMP If we are using h/w performance counters, restore context.
    541        1.26       cgd 	 */
    542       1.114  gmcgarry #if PERFCTRS
    543       1.175  christos 	if (PMC_ENABLED(l->l_proc)) {
    544       1.175  christos 		pmc_restore_context(l->l_proc);
    545       1.166  christos 	}
    546       1.114  gmcgarry #endif
    547       1.110    briggs 
    548  1.186.2.17        ad 	/*
    549  1.186.2.17        ad 	 * We're running again; record our new start time.  We might
    550  1.186.2.17        ad 	 * be running on a new CPU now, so don't use the cached
    551  1.186.2.17        ad 	 * schedstate_percpu pointer.
    552  1.186.2.17        ad 	 */
    553       1.180       dsl 	SYSCALL_TIME_WAKEUP(l);
    554  1.186.2.17        ad 	KASSERT(curlwp == l);
    555  1.186.2.17        ad 	KDASSERT(l->l_cpu == curcpu());
    556   1.186.2.6        ad 	LOCKDEBUG_BARRIER(NULL, 1);
    557       1.169      yamt 
    558       1.122   thorpej 	return retval;
    559        1.26       cgd }
    560        1.26       cgd 
    561        1.26       cgd /*
    562       1.174        ad  * Change process state to be runnable, placing it on the run queue if it is
    563       1.174        ad  * in memory, and awakening the swapper if it isn't in memory.
    564       1.174        ad  *
    565       1.174        ad  * Call with the process and LWP locked.  Will return with the LWP unlocked.
    566        1.26       cgd  */
    567        1.26       cgd void
    568       1.122   thorpej setrunnable(struct lwp *l)
    569        1.26       cgd {
    570       1.122   thorpej 	struct proc *p = l->l_proc;
    571       1.174        ad 	sigset_t *ss;
    572        1.26       cgd 
    573   1.186.2.6        ad 	KASSERT((l->l_flag & LW_IDLE) == 0);
    574       1.183        ad 	KASSERT(mutex_owned(&p->p_smutex));
    575       1.183        ad 	KASSERT(lwp_locked(l, NULL));
    576        1.83   thorpej 
    577       1.122   thorpej 	switch (l->l_stat) {
    578       1.122   thorpej 	case LSSTOP:
    579        1.33   mycroft 		/*
    580        1.33   mycroft 		 * If we're being traced (possibly because someone attached us
    581        1.33   mycroft 		 * while we were stopped), check for a signal from the debugger.
    582        1.33   mycroft 		 */
    583       1.174        ad 		if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0) {
    584       1.174        ad 			if ((sigprop[p->p_xstat] & SA_TOLWP) != 0)
    585       1.174        ad 				ss = &l->l_sigpend.sp_set;
    586       1.174        ad 			else
    587       1.174        ad 				ss = &p->p_sigpend.sp_set;
    588       1.174        ad 			sigaddset(ss, p->p_xstat);
    589       1.174        ad 			signotify(l);
    590        1.53   mycroft 		}
    591       1.174        ad 		p->p_nrlwps++;
    592        1.26       cgd 		break;
    593       1.174        ad 	case LSSUSPENDED:
    594       1.178     pavel 		l->l_flag &= ~LW_WSUSPEND;
    595       1.174        ad 		p->p_nrlwps++;
    596  1.186.2.12        ad 		cv_broadcast(&p->p_lwpcv);
    597       1.122   thorpej 		break;
    598       1.174        ad 	case LSSLEEP:
    599       1.174        ad 		KASSERT(l->l_wchan != NULL);
    600        1.26       cgd 		break;
    601       1.174        ad 	default:
    602       1.174        ad 		panic("setrunnable: lwp %p state was %d", l, l->l_stat);
    603        1.26       cgd 	}
    604       1.139        cl 
    605       1.174        ad 	/*
    606       1.174        ad 	 * If the LWP was sleeping interruptably, then it's OK to start it
    607       1.174        ad 	 * again.  If not, mark it as still sleeping.
    608       1.174        ad 	 */
    609       1.174        ad 	if (l->l_wchan != NULL) {
    610       1.174        ad 		l->l_stat = LSSLEEP;
    611       1.183        ad 		/* lwp_unsleep() will release the lock. */
    612       1.183        ad 		lwp_unsleep(l);
    613       1.174        ad 		return;
    614       1.174        ad 	}
    615       1.139        cl 
    616       1.174        ad 	/*
    617       1.174        ad 	 * If the LWP is still on the CPU, mark it as LSONPROC.  It may be
    618       1.174        ad 	 * about to call mi_switch(), in which case it will yield.
    619       1.174        ad 	 */
    620   1.186.2.6        ad 	if ((l->l_flag & LW_RUNNING) != 0) {
    621       1.174        ad 		l->l_stat = LSONPROC;
    622       1.174        ad 		l->l_slptime = 0;
    623       1.174        ad 		lwp_unlock(l);
    624       1.174        ad 		return;
    625       1.174        ad 	}
    626       1.122   thorpej 
    627       1.174        ad 	/*
    628       1.174        ad 	 * Set the LWP runnable.  If it's swapped out, we need to wake the swapper
    629       1.174        ad 	 * to bring it back in.  Otherwise, enter it into a run queue.
    630       1.174        ad 	 */
    631   1.186.2.7        ad 	if (l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex) {
    632   1.186.2.7        ad 		spc_lock(l->l_cpu);
    633   1.186.2.7        ad 		lwp_unlock_to(l, l->l_cpu->ci_schedstate.spc_mutex);
    634   1.186.2.7        ad 	}
    635   1.186.2.7        ad 
    636   1.186.2.6        ad 	sched_setrunnable(l);
    637       1.174        ad 	l->l_stat = LSRUN;
    638       1.122   thorpej 	l->l_slptime = 0;
    639       1.174        ad 
    640       1.178     pavel 	if (l->l_flag & LW_INMEM) {
    641   1.186.2.6        ad 		sched_enqueue(l, false);
    642   1.186.2.6        ad 		resched_cpu(l);
    643       1.174        ad 		lwp_unlock(l);
    644       1.174        ad 	} else {
    645       1.174        ad 		lwp_unlock(l);
    646       1.177        ad 		uvm_kick_scheduler();
    647       1.174        ad 	}
    648        1.26       cgd }
    649        1.26       cgd 
    650        1.26       cgd /*
    651       1.174        ad  * suspendsched:
    652       1.174        ad  *
    653       1.174        ad  *	Convert all non-L_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
    654       1.174        ad  */
    655        1.94    bouyer void
    656       1.174        ad suspendsched(void)
    657        1.94    bouyer {
    658       1.174        ad 	CPU_INFO_ITERATOR cii;
    659       1.174        ad 	struct cpu_info *ci;
    660       1.122   thorpej 	struct lwp *l;
    661       1.174        ad 	struct proc *p;
    662        1.94    bouyer 
    663        1.94    bouyer 	/*
    664       1.174        ad 	 * We do this by process in order not to violate the locking rules.
    665        1.94    bouyer 	 */
    666   1.186.2.8        ad 	mutex_enter(&proclist_lock);
    667       1.174        ad 	PROCLIST_FOREACH(p, &allproc) {
    668       1.174        ad 		mutex_enter(&p->p_smutex);
    669       1.174        ad 
    670       1.178     pavel 		if ((p->p_flag & PK_SYSTEM) != 0) {
    671       1.174        ad 			mutex_exit(&p->p_smutex);
    672        1.94    bouyer 			continue;
    673       1.174        ad 		}
    674       1.174        ad 
    675       1.174        ad 		p->p_stat = SSTOP;
    676       1.174        ad 
    677       1.174        ad 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    678       1.174        ad 			if (l == curlwp)
    679       1.174        ad 				continue;
    680       1.174        ad 
    681       1.174        ad 			lwp_lock(l);
    682       1.122   thorpej 
    683        1.97     enami 			/*
    684       1.174        ad 			 * Set L_WREBOOT so that the LWP will suspend itself
    685       1.174        ad 			 * when it tries to return to user mode.  We want to
    686       1.174        ad 			 * try and get to get as many LWPs as possible to
    687       1.174        ad 			 * the user / kernel boundary, so that they will
    688       1.174        ad 			 * release any locks that they hold.
    689        1.97     enami 			 */
    690       1.178     pavel 			l->l_flag |= (LW_WREBOOT | LW_WSUSPEND);
    691       1.174        ad 
    692       1.174        ad 			if (l->l_stat == LSSLEEP &&
    693       1.178     pavel 			    (l->l_flag & LW_SINTR) != 0) {
    694       1.174        ad 				/* setrunnable() will release the lock. */
    695       1.174        ad 				setrunnable(l);
    696       1.174        ad 				continue;
    697       1.174        ad 			}
    698       1.174        ad 
    699       1.174        ad 			lwp_unlock(l);
    700        1.94    bouyer 		}
    701       1.174        ad 
    702       1.174        ad 		mutex_exit(&p->p_smutex);
    703        1.94    bouyer 	}
    704   1.186.2.8        ad 	mutex_exit(&proclist_lock);
    705       1.174        ad 
    706       1.174        ad 	/*
    707       1.174        ad 	 * Kick all CPUs to make them preempt any LWPs running in user mode.
    708       1.174        ad 	 * They'll trap into the kernel and suspend themselves in userret().
    709       1.174        ad 	 */
    710  1.186.2.13        ad 	for (CPU_INFO_FOREACH(cii, ci)) {
    711  1.186.2.13        ad 		spc_lock(ci);
    712  1.186.2.13        ad 		cpu_need_resched(ci, RESCHED_IMMED);
    713  1.186.2.13        ad 		spc_unlock(ci);
    714  1.186.2.13        ad 	}
    715       1.174        ad }
    716       1.174        ad 
    717       1.174        ad /*
    718       1.174        ad  * sched_kpri:
    719       1.174        ad  *
    720       1.174        ad  *	Scale a priority level to a kernel priority level, usually
    721       1.174        ad  *	for an LWP that is about to sleep.
    722       1.174        ad  */
    723       1.185      yamt pri_t
    724       1.174        ad sched_kpri(struct lwp *l)
    725       1.174        ad {
    726   1.186.2.8        ad 	pri_t pri;
    727   1.186.2.8        ad 
    728  1.186.2.19        ad #ifndef __HAVE_FAST_SOFTINTS
    729  1.186.2.19        ad 	/*
    730  1.186.2.19        ad 	 * Hack: if a user thread is being used to run a soft
    731  1.186.2.19        ad 	 * interrupt, we need to boost the priority here.
    732  1.186.2.19        ad 	 */
    733  1.186.2.19        ad 	if ((l->l_pflag & LP_INTR) != 0 && l->l_priority < PRI_KERNEL_RT)
    734  1.186.2.19        ad 		return softint_kpri(l);
    735  1.186.2.19        ad #endif
    736  1.186.2.19        ad 
    737       1.174        ad 	/*
    738   1.186.2.8        ad 	 * Scale user priorities (0 -> 63) up to kernel priorities
    739   1.186.2.8        ad 	 * in the range (64 -> 95).  This makes assumptions about
    740   1.186.2.8        ad 	 * the priority space and so should be kept in sync with
    741   1.186.2.8        ad 	 * param.h.
    742   1.186.2.8        ad 	 */
    743   1.186.2.8        ad 	if ((pri = l->l_usrpri) >= PRI_KERNEL)
    744   1.186.2.8        ad 		return pri;
    745       1.174        ad 
    746   1.186.2.8        ad 	return (pri >> 1) + PRI_KERNEL;
    747       1.174        ad }
    748       1.174        ad 
    749       1.174        ad /*
    750       1.174        ad  * sched_unsleep:
    751       1.174        ad  *
    752       1.174        ad  *	The is called when the LWP has not been awoken normally but instead
    753       1.174        ad  *	interrupted: for example, if the sleep timed out.  Because of this,
    754       1.174        ad  *	it's not a valid action for running or idle LWPs.
    755       1.174        ad  */
    756   1.186.2.6        ad static void
    757       1.174        ad sched_unsleep(struct lwp *l)
    758       1.174        ad {
    759       1.174        ad 
    760       1.174        ad 	lwp_unlock(l);
    761       1.174        ad 	panic("sched_unsleep");
    762       1.174        ad }
    763       1.174        ad 
    764   1.186.2.6        ad inline void
    765   1.186.2.6        ad resched_cpu(struct lwp *l)
    766   1.186.2.6        ad {
    767   1.186.2.6        ad 	struct cpu_info *ci;
    768   1.186.2.6        ad 	const pri_t pri = lwp_eprio(l);
    769   1.186.2.6        ad 
    770   1.186.2.6        ad 	/*
    771   1.186.2.6        ad 	 * XXXSMP
    772   1.186.2.6        ad 	 * Since l->l_cpu persists across a context switch,
    773   1.186.2.6        ad 	 * this gives us *very weak* processor affinity, in
    774   1.186.2.6        ad 	 * that we notify the CPU on which the process last
    775   1.186.2.6        ad 	 * ran that it should try to switch.
    776   1.186.2.6        ad 	 *
    777   1.186.2.6        ad 	 * This does not guarantee that the process will run on
    778   1.186.2.6        ad 	 * that processor next, because another processor might
    779   1.186.2.6        ad 	 * grab it the next time it performs a context switch.
    780   1.186.2.6        ad 	 *
    781   1.186.2.6        ad 	 * This also does not handle the case where its last
    782   1.186.2.6        ad 	 * CPU is running a higher-priority process, but every
    783   1.186.2.6        ad 	 * other CPU is running a lower-priority process.  There
    784   1.186.2.6        ad 	 * are ways to handle this situation, but they're not
    785   1.186.2.6        ad 	 * currently very pretty, and we also need to weigh the
    786   1.186.2.6        ad 	 * cost of moving a process from one CPU to another.
    787   1.186.2.6        ad 	 */
    788  1.186.2.13        ad 	ci = l->l_cpu;
    789  1.186.2.13        ad 	if (pri > ci->ci_schedstate.spc_curpriority)
    790   1.186.2.6        ad 		cpu_need_resched(ci, 0);
    791   1.186.2.6        ad }
    792   1.186.2.6        ad 
    793   1.186.2.6        ad static void
    794       1.185      yamt sched_changepri(struct lwp *l, pri_t pri)
    795       1.174        ad {
    796       1.174        ad 
    797   1.186.2.6        ad 	KASSERT(lwp_locked(l, NULL));
    798       1.174        ad 
    799       1.174        ad 	l->l_usrpri = pri;
    800   1.186.2.8        ad 	if (l->l_priority >= PRI_KERNEL)
    801       1.174        ad 		return;
    802       1.184      yamt 
    803       1.184      yamt 	if (l->l_stat != LSRUN || (l->l_flag & LW_INMEM) == 0) {
    804       1.174        ad 		l->l_priority = pri;
    805       1.174        ad 		return;
    806       1.157      yamt 	}
    807       1.174        ad 
    808   1.186.2.6        ad 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
    809   1.186.2.6        ad 
    810   1.186.2.6        ad 	sched_dequeue(l);
    811       1.174        ad 	l->l_priority = pri;
    812   1.186.2.6        ad 	sched_enqueue(l, false);
    813   1.186.2.6        ad 	resched_cpu(l);
    814       1.184      yamt }
    815       1.184      yamt 
    816   1.186.2.6        ad static void
    817       1.185      yamt sched_lendpri(struct lwp *l, pri_t pri)
    818       1.184      yamt {
    819       1.184      yamt 
    820   1.186.2.6        ad 	KASSERT(lwp_locked(l, NULL));
    821       1.184      yamt 
    822       1.184      yamt 	if (l->l_stat != LSRUN || (l->l_flag & LW_INMEM) == 0) {
    823       1.184      yamt 		l->l_inheritedprio = pri;
    824       1.184      yamt 		return;
    825       1.184      yamt 	}
    826       1.184      yamt 
    827   1.186.2.6        ad 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
    828   1.186.2.6        ad 
    829   1.186.2.6        ad 	sched_dequeue(l);
    830       1.184      yamt 	l->l_inheritedprio = pri;
    831   1.186.2.6        ad 	sched_enqueue(l, false);
    832   1.186.2.6        ad 	resched_cpu(l);
    833       1.184      yamt }
    834       1.184      yamt 
    835       1.184      yamt struct lwp *
    836       1.184      yamt syncobj_noowner(wchan_t wchan)
    837       1.184      yamt {
    838       1.184      yamt 
    839       1.184      yamt 	return NULL;
    840       1.151      yamt }
    841       1.151      yamt 
    842       1.113  gmcgarry 
    843   1.186.2.6        ad /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
    844   1.186.2.6        ad fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;		/* exp(-1/20) */
    845       1.115  nisimura 
    846       1.130   nathanw /*
    847   1.186.2.6        ad  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
    848   1.186.2.6        ad  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
    849   1.186.2.6        ad  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
    850   1.186.2.6        ad  *
    851   1.186.2.6        ad  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
    852   1.186.2.6        ad  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
    853   1.186.2.6        ad  *
    854   1.186.2.6        ad  * If you dont want to bother with the faster/more-accurate formula, you
    855   1.186.2.6        ad  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
    856   1.186.2.6        ad  * (more general) method of calculating the %age of CPU used by a process.
    857       1.134      matt  */
    858   1.186.2.6        ad #define	CCPU_SHIFT	(FSHIFT + 1)
    859       1.134      matt 
    860       1.134      matt /*
    861   1.186.2.6        ad  * sched_pstats:
    862   1.186.2.6        ad  *
    863   1.186.2.6        ad  * Update process statistics and check CPU resource allocation.
    864   1.186.2.6        ad  * Call scheduler-specific hook to eventually adjust process/LWP
    865   1.186.2.6        ad  * priorities.
    866       1.130   nathanw  */
    867   1.186.2.6        ad /* ARGSUSED */
    868       1.113  gmcgarry void
    869   1.186.2.6        ad sched_pstats(void *arg)
    870       1.113  gmcgarry {
    871   1.186.2.6        ad 	struct rlimit *rlim;
    872   1.186.2.6        ad 	struct lwp *l;
    873   1.186.2.6        ad 	struct proc *p;
    874   1.186.2.6        ad 	int minslp, sig, clkhz;
    875   1.186.2.6        ad 	long runtm;
    876       1.174        ad 
    877   1.186.2.6        ad 	sched_pstats_ticks++;
    878       1.113  gmcgarry 
    879   1.186.2.8        ad 	mutex_enter(&proclist_lock);
    880   1.186.2.6        ad 	PROCLIST_FOREACH(p, &allproc) {
    881   1.186.2.6        ad 		/*
    882   1.186.2.6        ad 		 * Increment time in/out of memory and sleep time (if
    883   1.186.2.6        ad 		 * sleeping).  We ignore overflow; with 16-bit int's
    884   1.186.2.6        ad 		 * (remember them?) overflow takes 45 days.
    885   1.186.2.6        ad 		 */
    886   1.186.2.6        ad 		minslp = 2;
    887   1.186.2.6        ad 		mutex_enter(&p->p_smutex);
    888   1.186.2.6        ad 		mutex_spin_enter(&p->p_stmutex);
    889   1.186.2.6        ad 		runtm = p->p_rtime.tv_sec;
    890   1.186.2.6        ad 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    891   1.186.2.6        ad 			if ((l->l_flag & LW_IDLE) != 0)
    892   1.186.2.6        ad 				continue;
    893   1.186.2.6        ad 			lwp_lock(l);
    894   1.186.2.6        ad 			runtm += l->l_rtime.tv_sec;
    895   1.186.2.6        ad 			l->l_swtime++;
    896   1.186.2.6        ad 			if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
    897   1.186.2.6        ad 			    l->l_stat == LSSUSPENDED) {
    898   1.186.2.6        ad 				l->l_slptime++;
    899   1.186.2.6        ad 				minslp = min(minslp, l->l_slptime);
    900   1.186.2.6        ad 			} else
    901   1.186.2.6        ad 				minslp = 0;
    902  1.186.2.18     rmind 			sched_pstats_hook(l);
    903   1.186.2.6        ad 			lwp_unlock(l);
    904       1.174        ad 
    905   1.186.2.6        ad 			/*
    906   1.186.2.6        ad 			 * p_pctcpu is only for ps.
    907   1.186.2.6        ad 			 */
    908   1.186.2.6        ad 			l->l_pctcpu = (l->l_pctcpu * ccpu) >> FSHIFT;
    909   1.186.2.6        ad 			if (l->l_slptime < 1) {
    910   1.186.2.6        ad 				clkhz = stathz != 0 ? stathz : hz;
    911   1.186.2.6        ad #if	(FSHIFT >= CCPU_SHIFT)
    912   1.186.2.6        ad 				l->l_pctcpu += (clkhz == 100) ?
    913   1.186.2.6        ad 				    ((fixpt_t)l->l_cpticks) <<
    914   1.186.2.6        ad 				        (FSHIFT - CCPU_SHIFT) :
    915   1.186.2.6        ad 				    100 * (((fixpt_t) p->p_cpticks)
    916   1.186.2.6        ad 				        << (FSHIFT - CCPU_SHIFT)) / clkhz;
    917   1.186.2.6        ad #else
    918   1.186.2.6        ad 				l->l_pctcpu += ((FSCALE - ccpu) *
    919   1.186.2.6        ad 				    (l->l_cpticks * FSCALE / clkhz)) >> FSHIFT;
    920       1.146      matt #endif
    921   1.186.2.6        ad 				l->l_cpticks = 0;
    922   1.186.2.6        ad 			}
    923   1.186.2.6        ad 		}
    924  1.186.2.18     rmind 
    925   1.186.2.6        ad 		p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
    926  1.186.2.18     rmind #ifdef SCHED_4BSD
    927  1.186.2.18     rmind 		/*
    928  1.186.2.18     rmind 		 * XXX: Workaround - belongs to sched_4bsd.c
    929  1.186.2.18     rmind 		 * If the process has slept the entire second,
    930  1.186.2.18     rmind 		 * stop recalculating its priority until it wakes up.
    931  1.186.2.18     rmind 		 */
    932  1.186.2.18     rmind 		if (minslp <= 1) {
    933  1.186.2.18     rmind 			extern fixpt_t decay_cpu(fixpt_t, fixpt_t);
    934  1.186.2.18     rmind 
    935  1.186.2.18     rmind 			fixpt_t loadfac = 2 * (averunnable.ldavg[0]);
    936  1.186.2.18     rmind 			p->p_estcpu = decay_cpu(loadfac, p->p_estcpu);
    937  1.186.2.18     rmind 		}
    938  1.186.2.18     rmind #endif
    939   1.186.2.6        ad 		mutex_spin_exit(&p->p_stmutex);
    940       1.174        ad 
    941   1.186.2.6        ad 		/*
    942   1.186.2.6        ad 		 * Check if the process exceeds its CPU resource allocation.
    943   1.186.2.6        ad 		 * If over max, kill it.
    944   1.186.2.6        ad 		 */
    945   1.186.2.6        ad 		rlim = &p->p_rlimit[RLIMIT_CPU];
    946   1.186.2.6        ad 		sig = 0;
    947   1.186.2.6        ad 		if (runtm >= rlim->rlim_cur) {
    948   1.186.2.6        ad 			if (runtm >= rlim->rlim_max)
    949   1.186.2.6        ad 				sig = SIGKILL;
    950   1.186.2.6        ad 			else {
    951   1.186.2.6        ad 				sig = SIGXCPU;
    952   1.186.2.6        ad 				if (rlim->rlim_cur < rlim->rlim_max)
    953   1.186.2.6        ad 					rlim->rlim_cur += 5;
    954   1.186.2.6        ad 			}
    955   1.186.2.6        ad 		}
    956   1.186.2.6        ad 		mutex_exit(&p->p_smutex);
    957   1.186.2.6        ad 		if (sig) {
    958   1.186.2.8        ad 			/* XXXAD */
    959   1.186.2.8        ad 			mutex_enter(&proclist_mutex);
    960   1.186.2.6        ad 			psignal(p, sig);
    961   1.186.2.8        ad 			mutex_enter(&proclist_mutex);
    962   1.186.2.6        ad 		}
    963       1.174        ad 	}
    964   1.186.2.8        ad 	mutex_exit(&proclist_lock);
    965   1.186.2.6        ad 	uvm_meter();
    966  1.186.2.12        ad 	cv_wakeup(&lbolt);
    967   1.186.2.6        ad 	callout_schedule(&sched_pstats_ch, hz);
    968       1.113  gmcgarry }
    969   1.186.2.9        ad 
    970   1.186.2.9        ad void
    971   1.186.2.9        ad sched_init(void)
    972   1.186.2.9        ad {
    973   1.186.2.9        ad 
    974   1.186.2.9        ad 	callout_init(&sched_pstats_ch, CALLOUT_MPSAFE);
    975   1.186.2.9        ad 	callout_setfunc(&sched_pstats_ch, sched_pstats, NULL);
    976   1.186.2.9        ad 	sched_setup();
    977   1.186.2.9        ad 	sched_pstats(NULL);
    978   1.186.2.9        ad }
    979