Home | History | Annotate | Line # | Download | only in kern
kern_synch.c revision 1.211.2.4
      1  1.211.2.4        ad /*	$NetBSD: kern_synch.c,v 1.211.2.4 2007/12/28 21:40:48 ad Exp $	*/
      2       1.63   thorpej 
      3       1.63   thorpej /*-
      4      1.174        ad  * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
      5       1.63   thorpej  * All rights reserved.
      6       1.63   thorpej  *
      7       1.63   thorpej  * This code is derived from software contributed to The NetBSD Foundation
      8       1.63   thorpej  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9      1.188      yamt  * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and
     10      1.188      yamt  * Daniel Sieger.
     11       1.63   thorpej  *
     12       1.63   thorpej  * Redistribution and use in source and binary forms, with or without
     13       1.63   thorpej  * modification, are permitted provided that the following conditions
     14       1.63   thorpej  * are met:
     15       1.63   thorpej  * 1. Redistributions of source code must retain the above copyright
     16       1.63   thorpej  *    notice, this list of conditions and the following disclaimer.
     17       1.63   thorpej  * 2. Redistributions in binary form must reproduce the above copyright
     18       1.63   thorpej  *    notice, this list of conditions and the following disclaimer in the
     19       1.63   thorpej  *    documentation and/or other materials provided with the distribution.
     20       1.63   thorpej  * 3. All advertising materials mentioning features or use of this software
     21       1.63   thorpej  *    must display the following acknowledgement:
     22       1.63   thorpej  *	This product includes software developed by the NetBSD
     23       1.63   thorpej  *	Foundation, Inc. and its contributors.
     24       1.63   thorpej  * 4. Neither the name of The NetBSD Foundation nor the names of its
     25       1.63   thorpej  *    contributors may be used to endorse or promote products derived
     26       1.63   thorpej  *    from this software without specific prior written permission.
     27       1.63   thorpej  *
     28       1.63   thorpej  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     29       1.63   thorpej  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     30       1.63   thorpej  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     31       1.63   thorpej  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     32       1.63   thorpej  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     33       1.63   thorpej  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     34       1.63   thorpej  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     35       1.63   thorpej  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     36       1.63   thorpej  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     37       1.63   thorpej  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     38       1.63   thorpej  * POSSIBILITY OF SUCH DAMAGE.
     39       1.63   thorpej  */
     40       1.26       cgd 
     41       1.26       cgd /*-
     42       1.26       cgd  * Copyright (c) 1982, 1986, 1990, 1991, 1993
     43       1.26       cgd  *	The Regents of the University of California.  All rights reserved.
     44       1.26       cgd  * (c) UNIX System Laboratories, Inc.
     45       1.26       cgd  * All or some portions of this file are derived from material licensed
     46       1.26       cgd  * to the University of California by American Telephone and Telegraph
     47       1.26       cgd  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     48       1.26       cgd  * the permission of UNIX System Laboratories, Inc.
     49       1.26       cgd  *
     50       1.26       cgd  * Redistribution and use in source and binary forms, with or without
     51       1.26       cgd  * modification, are permitted provided that the following conditions
     52       1.26       cgd  * are met:
     53       1.26       cgd  * 1. Redistributions of source code must retain the above copyright
     54       1.26       cgd  *    notice, this list of conditions and the following disclaimer.
     55       1.26       cgd  * 2. Redistributions in binary form must reproduce the above copyright
     56       1.26       cgd  *    notice, this list of conditions and the following disclaimer in the
     57       1.26       cgd  *    documentation and/or other materials provided with the distribution.
     58      1.136       agc  * 3. Neither the name of the University nor the names of its contributors
     59       1.26       cgd  *    may be used to endorse or promote products derived from this software
     60       1.26       cgd  *    without specific prior written permission.
     61       1.26       cgd  *
     62       1.26       cgd  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     63       1.26       cgd  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     64       1.26       cgd  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     65       1.26       cgd  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     66       1.26       cgd  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     67       1.26       cgd  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     68       1.26       cgd  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     69       1.26       cgd  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     70       1.26       cgd  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     71       1.26       cgd  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     72       1.26       cgd  * SUCH DAMAGE.
     73       1.26       cgd  *
     74       1.50      fvdl  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
     75       1.26       cgd  */
     76      1.106     lukem 
     77      1.106     lukem #include <sys/cdefs.h>
     78  1.211.2.4        ad __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.211.2.4 2007/12/28 21:40:48 ad Exp $");
     79       1.48       mrg 
     80      1.109      yamt #include "opt_kstack.h"
     81       1.82   thorpej #include "opt_lockdebug.h"
     82       1.83   thorpej #include "opt_multiprocessor.h"
     83      1.110    briggs #include "opt_perfctrs.h"
     84       1.26       cgd 
     85      1.174        ad #define	__MUTEX_PRIVATE
     86      1.174        ad 
     87       1.26       cgd #include <sys/param.h>
     88       1.26       cgd #include <sys/systm.h>
     89       1.26       cgd #include <sys/proc.h>
     90       1.26       cgd #include <sys/kernel.h>
     91      1.111    briggs #if defined(PERFCTRS)
     92      1.110    briggs #include <sys/pmc.h>
     93      1.111    briggs #endif
     94      1.188      yamt #include <sys/cpu.h>
     95       1.26       cgd #include <sys/resourcevar.h>
     96       1.55      ross #include <sys/sched.h>
     97      1.179       dsl #include <sys/syscall_stats.h>
     98      1.174        ad #include <sys/sleepq.h>
     99      1.174        ad #include <sys/lockdebug.h>
    100      1.190        ad #include <sys/evcnt.h>
    101      1.199        ad #include <sys/intr.h>
    102      1.207        ad #include <sys/lwpctl.h>
    103      1.209        ad #include <sys/atomic.h>
    104       1.47       mrg 
    105       1.47       mrg #include <uvm/uvm_extern.h>
    106       1.47       mrg 
    107      1.190        ad callout_t sched_pstats_ch;
    108      1.188      yamt unsigned int sched_pstats_ticks;
    109       1.34  christos 
    110      1.190        ad kcondvar_t	lbolt;			/* once a second sleep address */
    111       1.26       cgd 
    112      1.188      yamt static void	sched_unsleep(struct lwp *);
    113      1.188      yamt static void	sched_changepri(struct lwp *, pri_t);
    114      1.188      yamt static void	sched_lendpri(struct lwp *, pri_t);
    115      1.122   thorpej 
    116      1.174        ad syncobj_t sleep_syncobj = {
    117      1.174        ad 	SOBJ_SLEEPQ_SORTED,
    118      1.174        ad 	sleepq_unsleep,
    119      1.184      yamt 	sleepq_changepri,
    120      1.184      yamt 	sleepq_lendpri,
    121      1.184      yamt 	syncobj_noowner,
    122      1.174        ad };
    123      1.174        ad 
    124      1.174        ad syncobj_t sched_syncobj = {
    125      1.174        ad 	SOBJ_SLEEPQ_SORTED,
    126      1.174        ad 	sched_unsleep,
    127      1.184      yamt 	sched_changepri,
    128      1.184      yamt 	sched_lendpri,
    129      1.184      yamt 	syncobj_noowner,
    130      1.174        ad };
    131      1.122   thorpej 
    132       1.26       cgd /*
    133      1.174        ad  * During autoconfiguration or after a panic, a sleep will simply lower the
    134      1.174        ad  * priority briefly to allow interrupts, then return.  The priority to be
    135      1.174        ad  * used (safepri) is machine-dependent, thus this value is initialized and
    136      1.174        ad  * maintained in the machine-dependent layers.  This priority will typically
    137      1.174        ad  * be 0, or the lowest priority that is safe for use on the interrupt stack;
    138      1.174        ad  * it can be made higher to block network software interrupts after panics.
    139       1.26       cgd  */
    140      1.174        ad int	safepri;
    141       1.26       cgd 
    142       1.26       cgd /*
    143      1.174        ad  * OBSOLETE INTERFACE
    144      1.174        ad  *
    145       1.26       cgd  * General sleep call.  Suspends the current process until a wakeup is
    146       1.26       cgd  * performed on the specified identifier.  The process will then be made
    147      1.174        ad  * runnable with the specified priority.  Sleeps at most timo/hz seconds (0
    148      1.174        ad  * means no timeout).  If pri includes PCATCH flag, signals are checked
    149       1.26       cgd  * before and after sleeping, else signals are not checked.  Returns 0 if
    150       1.26       cgd  * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
    151       1.26       cgd  * signal needs to be delivered, ERESTART is returned if the current system
    152       1.26       cgd  * call should be restarted if possible, and EINTR is returned if the system
    153       1.26       cgd  * call should be interrupted by the signal (return EINTR).
    154       1.77   thorpej  *
    155      1.174        ad  * The interlock is held until we are on a sleep queue. The interlock will
    156      1.174        ad  * be locked before returning back to the caller unless the PNORELOCK flag
    157      1.174        ad  * is specified, in which case the interlock will always be unlocked upon
    158      1.174        ad  * return.
    159       1.26       cgd  */
    160       1.26       cgd int
    161      1.185      yamt ltsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
    162      1.174        ad 	volatile struct simplelock *interlock)
    163       1.26       cgd {
    164      1.122   thorpej 	struct lwp *l = curlwp;
    165      1.174        ad 	sleepq_t *sq;
    166      1.188      yamt 	int error;
    167       1.26       cgd 
    168      1.204        ad 	KASSERT((l->l_pflag & LP_INTR) == 0);
    169      1.204        ad 
    170      1.174        ad 	if (sleepq_dontsleep(l)) {
    171      1.174        ad 		(void)sleepq_abort(NULL, 0);
    172      1.174        ad 		if ((priority & PNORELOCK) != 0)
    173       1.77   thorpej 			simple_unlock(interlock);
    174      1.174        ad 		return 0;
    175       1.26       cgd 	}
    176       1.78  sommerfe 
    177      1.204        ad 	l->l_kpriority = true;
    178      1.174        ad 	sq = sleeptab_lookup(&sleeptab, ident);
    179      1.174        ad 	sleepq_enter(sq, l);
    180      1.204        ad 	sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj);
    181       1.42       cgd 
    182      1.174        ad 	if (interlock != NULL) {
    183      1.204        ad 		KASSERT(simple_lock_held(interlock));
    184      1.174        ad 		simple_unlock(interlock);
    185      1.150       chs 	}
    186      1.150       chs 
    187      1.188      yamt 	error = sleepq_block(timo, priority & PCATCH);
    188      1.126        pk 
    189      1.174        ad 	if (interlock != NULL && (priority & PNORELOCK) == 0)
    190      1.126        pk 		simple_lock(interlock);
    191      1.174        ad 
    192      1.174        ad 	return error;
    193       1.26       cgd }
    194       1.26       cgd 
    195      1.187        ad int
    196      1.187        ad mtsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
    197      1.187        ad 	kmutex_t *mtx)
    198      1.187        ad {
    199      1.187        ad 	struct lwp *l = curlwp;
    200      1.187        ad 	sleepq_t *sq;
    201      1.188      yamt 	int error;
    202      1.187        ad 
    203      1.204        ad 	KASSERT((l->l_pflag & LP_INTR) == 0);
    204      1.204        ad 
    205      1.187        ad 	if (sleepq_dontsleep(l)) {
    206      1.187        ad 		(void)sleepq_abort(mtx, (priority & PNORELOCK) != 0);
    207      1.187        ad 		return 0;
    208      1.187        ad 	}
    209      1.187        ad 
    210      1.204        ad 	l->l_kpriority = true;
    211      1.187        ad 	sq = sleeptab_lookup(&sleeptab, ident);
    212      1.187        ad 	sleepq_enter(sq, l);
    213      1.204        ad 	sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj);
    214      1.187        ad 	mutex_exit(mtx);
    215      1.188      yamt 	error = sleepq_block(timo, priority & PCATCH);
    216      1.187        ad 
    217      1.187        ad 	if ((priority & PNORELOCK) == 0)
    218      1.187        ad 		mutex_enter(mtx);
    219      1.187        ad 
    220      1.187        ad 	return error;
    221      1.187        ad }
    222      1.187        ad 
    223       1.26       cgd /*
    224      1.174        ad  * General sleep call for situations where a wake-up is not expected.
    225       1.26       cgd  */
    226      1.174        ad int
    227      1.182   thorpej kpause(const char *wmesg, bool intr, int timo, kmutex_t *mtx)
    228       1.26       cgd {
    229      1.174        ad 	struct lwp *l = curlwp;
    230      1.174        ad 	sleepq_t *sq;
    231      1.174        ad 	int error;
    232       1.26       cgd 
    233      1.174        ad 	if (sleepq_dontsleep(l))
    234      1.174        ad 		return sleepq_abort(NULL, 0);
    235       1.26       cgd 
    236      1.174        ad 	if (mtx != NULL)
    237      1.174        ad 		mutex_exit(mtx);
    238      1.204        ad 	l->l_kpriority = true;
    239      1.174        ad 	sq = sleeptab_lookup(&sleeptab, l);
    240      1.174        ad 	sleepq_enter(sq, l);
    241      1.204        ad 	sleepq_enqueue(sq, l, wmesg, &sleep_syncobj);
    242      1.188      yamt 	error = sleepq_block(timo, intr);
    243      1.174        ad 	if (mtx != NULL)
    244      1.174        ad 		mutex_enter(mtx);
    245       1.83   thorpej 
    246      1.174        ad 	return error;
    247      1.139        cl }
    248      1.139        cl 
    249       1.26       cgd /*
    250      1.174        ad  * OBSOLETE INTERFACE
    251      1.174        ad  *
    252       1.26       cgd  * Make all processes sleeping on the specified identifier runnable.
    253       1.26       cgd  */
    254       1.26       cgd void
    255      1.174        ad wakeup(wchan_t ident)
    256       1.26       cgd {
    257      1.174        ad 	sleepq_t *sq;
    258       1.83   thorpej 
    259      1.174        ad 	if (cold)
    260      1.174        ad 		return;
    261       1.83   thorpej 
    262      1.174        ad 	sq = sleeptab_lookup(&sleeptab, ident);
    263      1.174        ad 	sleepq_wake(sq, ident, (u_int)-1);
    264       1.63   thorpej }
    265       1.63   thorpej 
    266       1.63   thorpej /*
    267      1.174        ad  * OBSOLETE INTERFACE
    268      1.174        ad  *
    269       1.63   thorpej  * Make the highest priority process first in line on the specified
    270       1.63   thorpej  * identifier runnable.
    271       1.63   thorpej  */
    272      1.174        ad void
    273      1.174        ad wakeup_one(wchan_t ident)
    274       1.63   thorpej {
    275      1.174        ad 	sleepq_t *sq;
    276       1.63   thorpej 
    277      1.174        ad 	if (cold)
    278      1.174        ad 		return;
    279      1.188      yamt 
    280      1.174        ad 	sq = sleeptab_lookup(&sleeptab, ident);
    281      1.174        ad 	sleepq_wake(sq, ident, 1);
    282      1.174        ad }
    283       1.63   thorpej 
    284      1.117  gmcgarry 
    285      1.117  gmcgarry /*
    286      1.117  gmcgarry  * General yield call.  Puts the current process back on its run queue and
    287      1.117  gmcgarry  * performs a voluntary context switch.  Should only be called when the
    288      1.198        ad  * current process explicitly requests it (eg sched_yield(2)).
    289      1.117  gmcgarry  */
    290      1.117  gmcgarry void
    291      1.117  gmcgarry yield(void)
    292      1.117  gmcgarry {
    293      1.122   thorpej 	struct lwp *l = curlwp;
    294      1.117  gmcgarry 
    295      1.174        ad 	KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
    296      1.174        ad 	lwp_lock(l);
    297      1.188      yamt 	KASSERT(lwp_locked(l, &l->l_cpu->ci_schedstate.spc_lwplock));
    298      1.188      yamt 	KASSERT(l->l_stat == LSONPROC);
    299      1.204        ad 	l->l_kpriority = false;
    300      1.204        ad 	if (l->l_class == SCHED_OTHER) {
    301      1.204        ad 		/*
    302      1.204        ad 		 * Only for timeshared threads.  It will be reset
    303      1.204        ad 		 * by the scheduler in due course.
    304      1.204        ad 		 */
    305      1.204        ad 		l->l_priority = 0;
    306      1.204        ad 	}
    307      1.188      yamt 	(void)mi_switch(l);
    308      1.174        ad 	KERNEL_LOCK(l->l_biglocks, l);
    309       1.69   thorpej }
    310       1.69   thorpej 
    311       1.69   thorpej /*
    312       1.69   thorpej  * General preemption call.  Puts the current process back on its run queue
    313      1.156    rpaulo  * and performs an involuntary context switch.
    314       1.69   thorpej  */
    315       1.69   thorpej void
    316      1.174        ad preempt(void)
    317       1.69   thorpej {
    318      1.122   thorpej 	struct lwp *l = curlwp;
    319       1.69   thorpej 
    320      1.174        ad 	KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
    321      1.174        ad 	lwp_lock(l);
    322      1.188      yamt 	KASSERT(lwp_locked(l, &l->l_cpu->ci_schedstate.spc_lwplock));
    323      1.188      yamt 	KASSERT(l->l_stat == LSONPROC);
    324      1.204        ad 	l->l_kpriority = false;
    325      1.174        ad 	l->l_nivcsw++;
    326      1.188      yamt 	(void)mi_switch(l);
    327      1.174        ad 	KERNEL_LOCK(l->l_biglocks, l);
    328       1.69   thorpej }
    329       1.69   thorpej 
    330       1.69   thorpej /*
    331      1.188      yamt  * Compute the amount of time during which the current lwp was running.
    332      1.130   nathanw  *
    333      1.188      yamt  * - update l_rtime unless it's an idle lwp.
    334      1.188      yamt  */
    335      1.188      yamt 
    336      1.199        ad void
    337  1.211.2.3        ad updatertime(lwp_t *l, const struct bintime *now)
    338      1.188      yamt {
    339      1.188      yamt 
    340      1.199        ad 	if ((l->l_flag & LW_IDLE) != 0)
    341      1.188      yamt 		return;
    342      1.188      yamt 
    343  1.211.2.3        ad 	/* rtime += now - stime */
    344  1.211.2.3        ad 	bintime_add(&l->l_rtime, now);
    345  1.211.2.3        ad 	bintime_sub(&l->l_rtime, &l->l_stime);
    346      1.188      yamt }
    347      1.188      yamt 
    348      1.188      yamt /*
    349      1.188      yamt  * The machine independent parts of context switch.
    350      1.188      yamt  *
    351      1.188      yamt  * Returns 1 if another LWP was actually run.
    352       1.26       cgd  */
    353      1.122   thorpej int
    354      1.199        ad mi_switch(lwp_t *l)
    355       1.26       cgd {
    356       1.76   thorpej 	struct schedstate_percpu *spc;
    357      1.188      yamt 	struct lwp *newl;
    358      1.174        ad 	int retval, oldspl;
    359      1.196        ad 	struct cpu_info *ci;
    360  1.211.2.3        ad 	struct bintime bt;
    361      1.199        ad 	bool returning;
    362       1.26       cgd 
    363      1.188      yamt 	KASSERT(lwp_locked(l, NULL));
    364      1.188      yamt 	LOCKDEBUG_BARRIER(l->l_mutex, 1);
    365      1.174        ad 
    366      1.174        ad #ifdef KSTACK_CHECK_MAGIC
    367      1.174        ad 	kstack_check_magic(l);
    368      1.174        ad #endif
    369       1.83   thorpej 
    370  1.211.2.3        ad 	binuptime(&bt);
    371      1.199        ad 
    372      1.209        ad 	KDASSERT(l->l_cpu == curcpu());
    373      1.196        ad 	ci = l->l_cpu;
    374      1.196        ad 	spc = &ci->ci_schedstate;
    375      1.199        ad 	returning = false;
    376      1.190        ad 	newl = NULL;
    377      1.190        ad 
    378      1.199        ad 	/*
    379      1.199        ad 	 * If we have been asked to switch to a specific LWP, then there
    380      1.199        ad 	 * is no need to inspect the run queues.  If a soft interrupt is
    381      1.199        ad 	 * blocking, then return to the interrupted thread without adjusting
    382      1.199        ad 	 * VM context or its start time: neither have been changed in order
    383      1.199        ad 	 * to take the interrupt.
    384      1.199        ad 	 */
    385      1.190        ad 	if (l->l_switchto != NULL) {
    386      1.204        ad 		if ((l->l_pflag & LP_INTR) != 0) {
    387      1.199        ad 			returning = true;
    388      1.199        ad 			softint_block(l);
    389      1.199        ad 			if ((l->l_flag & LW_TIMEINTR) != 0)
    390  1.211.2.3        ad 				updatertime(l, &bt);
    391      1.199        ad 		}
    392      1.190        ad 		newl = l->l_switchto;
    393      1.190        ad 		l->l_switchto = NULL;
    394      1.190        ad 	}
    395      1.204        ad #ifndef __HAVE_FAST_SOFTINTS
    396      1.204        ad 	else if (ci->ci_data.cpu_softints != 0) {
    397      1.204        ad 		/* There are pending soft interrupts, so pick one. */
    398      1.204        ad 		newl = softint_picklwp();
    399      1.204        ad 		newl->l_stat = LSONPROC;
    400      1.204        ad 		newl->l_flag |= LW_RUNNING;
    401      1.204        ad 	}
    402      1.204        ad #endif	/* !__HAVE_FAST_SOFTINTS */
    403      1.190        ad 
    404      1.180       dsl 	/* Count time spent in current system call */
    405      1.199        ad 	if (!returning) {
    406      1.199        ad 		SYSCALL_TIME_SLEEP(l);
    407      1.180       dsl 
    408      1.199        ad 		/*
    409      1.199        ad 		 * XXXSMP If we are using h/w performance counters,
    410      1.199        ad 		 * save context.
    411      1.199        ad 		 */
    412      1.174        ad #if PERFCTRS
    413      1.199        ad 		if (PMC_ENABLED(l->l_proc)) {
    414      1.199        ad 			pmc_save_context(l->l_proc);
    415      1.199        ad 		}
    416      1.199        ad #endif
    417  1.211.2.3        ad 		updatertime(l, &bt);
    418      1.174        ad 	}
    419      1.113  gmcgarry 
    420      1.113  gmcgarry 	/*
    421      1.174        ad 	 * If on the CPU and we have gotten this far, then we must yield.
    422      1.113  gmcgarry 	 */
    423      1.188      yamt 	mutex_spin_enter(spc->spc_mutex);
    424      1.174        ad 	KASSERT(l->l_stat != LSRUN);
    425      1.204        ad 	if (l->l_stat == LSONPROC && l != newl) {
    426      1.188      yamt 		KASSERT(lwp_locked(l, &spc->spc_lwplock));
    427      1.188      yamt 		if ((l->l_flag & LW_IDLE) == 0) {
    428      1.188      yamt 			l->l_stat = LSRUN;
    429      1.188      yamt 			lwp_setlock(l, spc->spc_mutex);
    430      1.188      yamt 			sched_enqueue(l, true);
    431      1.188      yamt 		} else
    432      1.188      yamt 			l->l_stat = LSIDL;
    433      1.174        ad 	}
    434      1.174        ad 
    435      1.174        ad 	/*
    436      1.201     rmind 	 * Let sched_nextlwp() select the LWP to run the CPU next.
    437      1.209        ad 	 * If no LWP is runnable, select the idle LWP.
    438      1.209        ad 	 *
    439      1.209        ad 	 * Note that spc_lwplock might not necessary be held, and
    440      1.209        ad 	 * new thread would be unlocked after setting the LWP-lock.
    441      1.174        ad 	 */
    442      1.190        ad 	if (newl == NULL) {
    443      1.190        ad 		newl = sched_nextlwp();
    444      1.190        ad 		if (newl != NULL) {
    445      1.190        ad 			sched_dequeue(newl);
    446      1.190        ad 			KASSERT(lwp_locked(newl, spc->spc_mutex));
    447      1.190        ad 			newl->l_stat = LSONPROC;
    448      1.196        ad 			newl->l_cpu = ci;
    449      1.190        ad 			newl->l_flag |= LW_RUNNING;
    450      1.190        ad 			lwp_setlock(newl, &spc->spc_lwplock);
    451      1.190        ad 		} else {
    452      1.196        ad 			newl = ci->ci_data.cpu_idlelwp;
    453      1.190        ad 			newl->l_stat = LSONPROC;
    454      1.190        ad 			newl->l_flag |= LW_RUNNING;
    455      1.190        ad 		}
    456      1.204        ad 		/*
    457      1.204        ad 		 * Only clear want_resched if there are no
    458      1.204        ad 		 * pending (slow) software interrupts.
    459      1.204        ad 		 */
    460      1.204        ad 		ci->ci_want_resched = ci->ci_data.cpu_softints;
    461      1.199        ad 		spc->spc_flags &= ~SPCF_SWITCHCLEAR;
    462      1.204        ad 		spc->spc_curpriority = lwp_eprio(newl);
    463      1.199        ad 	}
    464      1.199        ad 
    465      1.204        ad 	/* Items that must be updated with the CPU locked. */
    466      1.199        ad 	if (!returning) {
    467      1.204        ad 		/* Update the new LWP's start time. */
    468  1.211.2.3        ad 		newl->l_stime = bt;
    469      1.204        ad 
    470      1.199        ad 		/*
    471      1.204        ad 		 * ci_curlwp changes when a fast soft interrupt occurs.
    472      1.204        ad 		 * We use cpu_onproc to keep track of which kernel or
    473      1.204        ad 		 * user thread is running 'underneath' the software
    474      1.204        ad 		 * interrupt.  This is important for time accounting,
    475      1.204        ad 		 * itimers and forcing user threads to preempt (aston).
    476      1.199        ad 		 */
    477      1.204        ad 		ci->ci_data.cpu_onproc = newl;
    478      1.188      yamt 	}
    479      1.188      yamt 
    480      1.188      yamt 	if (l != newl) {
    481      1.188      yamt 		struct lwp *prevlwp;
    482      1.174        ad 
    483      1.209        ad 		/* Release all locks, but leave the current LWP locked */
    484      1.188      yamt 		if (l->l_mutex == spc->spc_mutex) {
    485      1.209        ad 			/*
    486      1.209        ad 			 * Drop spc_lwplock, if the current LWP has been moved
    487      1.209        ad 			 * to the run queue (it is now locked by spc_mutex).
    488      1.209        ad 			 */
    489      1.188      yamt 			mutex_spin_exit(&spc->spc_lwplock);
    490      1.188      yamt 		} else {
    491      1.209        ad 			/*
    492      1.209        ad 			 * Otherwise, drop the spc_mutex, we are done with the
    493      1.209        ad 			 * run queues.
    494      1.209        ad 			 */
    495      1.188      yamt 			mutex_spin_exit(spc->spc_mutex);
    496      1.188      yamt 		}
    497      1.188      yamt 
    498      1.209        ad 		/*
    499      1.209        ad 		 * Mark that context switch is going to be perfomed
    500      1.209        ad 		 * for this LWP, to protect it from being switched
    501      1.209        ad 		 * to on another CPU.
    502      1.209        ad 		 */
    503      1.209        ad 		KASSERT(l->l_ctxswtch == 0);
    504      1.209        ad 		l->l_ctxswtch = 1;
    505      1.209        ad 		l->l_ncsw++;
    506      1.209        ad 		l->l_flag &= ~LW_RUNNING;
    507      1.209        ad 
    508      1.209        ad 		/*
    509      1.209        ad 		 * Increase the count of spin-mutexes before the release
    510      1.209        ad 		 * of the last lock - we must remain at IPL_SCHED during
    511      1.209        ad 		 * the context switch.
    512      1.209        ad 		 */
    513      1.209        ad 		oldspl = MUTEX_SPIN_OLDSPL(ci);
    514      1.209        ad 		ci->ci_mtx_count--;
    515      1.209        ad 		lwp_unlock(l);
    516      1.209        ad 
    517      1.188      yamt 		/* Unlocked, but for statistics only. */
    518      1.188      yamt 		uvmexp.swtch++;
    519      1.188      yamt 
    520      1.209        ad 		/* Update status for lwpctl, if present. */
    521      1.209        ad 		if (l->l_lwpctl != NULL)
    522      1.209        ad 			l->l_lwpctl->lc_curcpu = LWPCTL_CPU_NONE;
    523      1.209        ad 
    524      1.199        ad 		/*
    525      1.199        ad 		 * Save old VM context, unless a soft interrupt
    526      1.199        ad 		 * handler is blocking.
    527      1.199        ad 		 */
    528      1.199        ad 		if (!returning)
    529      1.199        ad 			pmap_deactivate(l);
    530      1.188      yamt 
    531      1.209        ad 		/*
    532      1.209        ad 		 * We may need to spin-wait for if 'newl' is still
    533      1.209        ad 		 * context switching on another CPU.
    534      1.209        ad 		 */
    535      1.209        ad 		if (newl->l_ctxswtch != 0) {
    536      1.209        ad 			u_int count;
    537      1.209        ad 			count = SPINLOCK_BACKOFF_MIN;
    538      1.209        ad 			while (newl->l_ctxswtch)
    539      1.209        ad 				SPINLOCK_BACKOFF(count);
    540      1.209        ad 		}
    541      1.207        ad 
    542      1.188      yamt 		/* Switch to the new LWP.. */
    543      1.204        ad 		prevlwp = cpu_switchto(l, newl, returning);
    544      1.207        ad 		ci = curcpu();
    545      1.207        ad 
    546      1.188      yamt 		/*
    547      1.209        ad 		 * Switched away - we have new curlwp.
    548      1.209        ad 		 * Restore VM context and IPL.
    549      1.188      yamt 		 */
    550      1.209        ad 		pmap_activate(l);
    551      1.188      yamt 		if (prevlwp != NULL) {
    552      1.209        ad 			/* Normalize the count of the spin-mutexes */
    553      1.209        ad 			ci->ci_mtx_count++;
    554      1.209        ad 			/* Unmark the state of context switch */
    555      1.209        ad 			membar_exit();
    556      1.209        ad 			prevlwp->l_ctxswtch = 0;
    557      1.188      yamt 		}
    558      1.209        ad 		splx(oldspl);
    559      1.209        ad 
    560      1.209        ad 		/* Update status for lwpctl, if present. */
    561      1.209        ad 		if (l->l_lwpctl != NULL)
    562      1.209        ad 			l->l_lwpctl->lc_curcpu = (int)cpu_index(ci);
    563      1.174        ad 
    564      1.188      yamt 		retval = 1;
    565      1.188      yamt 	} else {
    566      1.188      yamt 		/* Nothing to do - just unlock and return. */
    567      1.188      yamt 		mutex_spin_exit(spc->spc_mutex);
    568      1.188      yamt 		lwp_unlock(l);
    569      1.122   thorpej 		retval = 0;
    570      1.122   thorpej 	}
    571      1.110    briggs 
    572      1.188      yamt 	KASSERT(l == curlwp);
    573      1.188      yamt 	KASSERT(l->l_stat == LSONPROC);
    574      1.207        ad 	KASSERT(l->l_cpu == ci);
    575      1.188      yamt 
    576      1.110    briggs 	/*
    577      1.174        ad 	 * XXXSMP If we are using h/w performance counters, restore context.
    578       1.26       cgd 	 */
    579      1.114  gmcgarry #if PERFCTRS
    580      1.175  christos 	if (PMC_ENABLED(l->l_proc)) {
    581      1.175  christos 		pmc_restore_context(l->l_proc);
    582      1.166  christos 	}
    583      1.114  gmcgarry #endif
    584      1.180       dsl 	SYSCALL_TIME_WAKEUP(l);
    585      1.188      yamt 	LOCKDEBUG_BARRIER(NULL, 1);
    586      1.169      yamt 
    587      1.122   thorpej 	return retval;
    588       1.26       cgd }
    589       1.26       cgd 
    590       1.26       cgd /*
    591      1.174        ad  * Change process state to be runnable, placing it on the run queue if it is
    592      1.174        ad  * in memory, and awakening the swapper if it isn't in memory.
    593      1.174        ad  *
    594      1.174        ad  * Call with the process and LWP locked.  Will return with the LWP unlocked.
    595       1.26       cgd  */
    596       1.26       cgd void
    597      1.122   thorpej setrunnable(struct lwp *l)
    598       1.26       cgd {
    599      1.122   thorpej 	struct proc *p = l->l_proc;
    600      1.205        ad 	struct cpu_info *ci;
    601      1.174        ad 	sigset_t *ss;
    602       1.26       cgd 
    603      1.188      yamt 	KASSERT((l->l_flag & LW_IDLE) == 0);
    604      1.183        ad 	KASSERT(mutex_owned(&p->p_smutex));
    605      1.183        ad 	KASSERT(lwp_locked(l, NULL));
    606      1.205        ad 	KASSERT(l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex);
    607       1.83   thorpej 
    608      1.122   thorpej 	switch (l->l_stat) {
    609      1.122   thorpej 	case LSSTOP:
    610       1.33   mycroft 		/*
    611       1.33   mycroft 		 * If we're being traced (possibly because someone attached us
    612       1.33   mycroft 		 * while we were stopped), check for a signal from the debugger.
    613       1.33   mycroft 		 */
    614      1.174        ad 		if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0) {
    615      1.174        ad 			if ((sigprop[p->p_xstat] & SA_TOLWP) != 0)
    616      1.174        ad 				ss = &l->l_sigpend.sp_set;
    617      1.174        ad 			else
    618      1.174        ad 				ss = &p->p_sigpend.sp_set;
    619      1.174        ad 			sigaddset(ss, p->p_xstat);
    620      1.174        ad 			signotify(l);
    621       1.53   mycroft 		}
    622      1.174        ad 		p->p_nrlwps++;
    623       1.26       cgd 		break;
    624      1.174        ad 	case LSSUSPENDED:
    625      1.178     pavel 		l->l_flag &= ~LW_WSUSPEND;
    626      1.174        ad 		p->p_nrlwps++;
    627      1.192     rmind 		cv_broadcast(&p->p_lwpcv);
    628      1.122   thorpej 		break;
    629      1.174        ad 	case LSSLEEP:
    630      1.174        ad 		KASSERT(l->l_wchan != NULL);
    631       1.26       cgd 		break;
    632      1.174        ad 	default:
    633      1.174        ad 		panic("setrunnable: lwp %p state was %d", l, l->l_stat);
    634       1.26       cgd 	}
    635      1.139        cl 
    636      1.174        ad 	/*
    637      1.174        ad 	 * If the LWP was sleeping interruptably, then it's OK to start it
    638      1.174        ad 	 * again.  If not, mark it as still sleeping.
    639      1.174        ad 	 */
    640      1.174        ad 	if (l->l_wchan != NULL) {
    641      1.174        ad 		l->l_stat = LSSLEEP;
    642      1.183        ad 		/* lwp_unsleep() will release the lock. */
    643      1.183        ad 		lwp_unsleep(l);
    644      1.174        ad 		return;
    645      1.174        ad 	}
    646      1.139        cl 
    647      1.174        ad 	/*
    648      1.174        ad 	 * If the LWP is still on the CPU, mark it as LSONPROC.  It may be
    649      1.174        ad 	 * about to call mi_switch(), in which case it will yield.
    650      1.174        ad 	 */
    651      1.188      yamt 	if ((l->l_flag & LW_RUNNING) != 0) {
    652      1.174        ad 		l->l_stat = LSONPROC;
    653      1.174        ad 		l->l_slptime = 0;
    654      1.174        ad 		lwp_unlock(l);
    655      1.174        ad 		return;
    656      1.174        ad 	}
    657      1.122   thorpej 
    658      1.174        ad 	/*
    659      1.205        ad 	 * Look for a CPU to run.
    660      1.205        ad 	 * Set the LWP runnable.
    661      1.174        ad 	 */
    662      1.205        ad 	ci = sched_takecpu(l);
    663      1.205        ad 	l->l_cpu = ci;
    664      1.206        ad 	if (l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex) {
    665      1.206        ad 		lwp_unlock_to(l, ci->ci_schedstate.spc_mutex);
    666      1.206        ad 		lwp_lock(l);
    667      1.206        ad 	}
    668      1.188      yamt 	sched_setrunnable(l);
    669      1.174        ad 	l->l_stat = LSRUN;
    670      1.122   thorpej 	l->l_slptime = 0;
    671      1.174        ad 
    672      1.205        ad 	/*
    673      1.205        ad 	 * If thread is swapped out - wake the swapper to bring it back in.
    674      1.205        ad 	 * Otherwise, enter it into a run queue.
    675      1.205        ad 	 */
    676      1.178     pavel 	if (l->l_flag & LW_INMEM) {
    677      1.188      yamt 		sched_enqueue(l, false);
    678      1.188      yamt 		resched_cpu(l);
    679      1.174        ad 		lwp_unlock(l);
    680      1.174        ad 	} else {
    681      1.174        ad 		lwp_unlock(l);
    682      1.177        ad 		uvm_kick_scheduler();
    683      1.174        ad 	}
    684       1.26       cgd }
    685       1.26       cgd 
    686       1.26       cgd /*
    687      1.174        ad  * suspendsched:
    688      1.174        ad  *
    689      1.174        ad  *	Convert all non-L_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
    690      1.174        ad  */
    691       1.94    bouyer void
    692      1.174        ad suspendsched(void)
    693       1.94    bouyer {
    694      1.174        ad 	CPU_INFO_ITERATOR cii;
    695      1.174        ad 	struct cpu_info *ci;
    696      1.122   thorpej 	struct lwp *l;
    697      1.174        ad 	struct proc *p;
    698       1.94    bouyer 
    699       1.94    bouyer 	/*
    700      1.174        ad 	 * We do this by process in order not to violate the locking rules.
    701       1.94    bouyer 	 */
    702      1.204        ad 	mutex_enter(&proclist_lock);
    703      1.174        ad 	PROCLIST_FOREACH(p, &allproc) {
    704      1.174        ad 		mutex_enter(&p->p_smutex);
    705      1.174        ad 
    706      1.178     pavel 		if ((p->p_flag & PK_SYSTEM) != 0) {
    707      1.174        ad 			mutex_exit(&p->p_smutex);
    708       1.94    bouyer 			continue;
    709      1.174        ad 		}
    710      1.174        ad 
    711      1.174        ad 		p->p_stat = SSTOP;
    712      1.174        ad 
    713      1.174        ad 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    714      1.174        ad 			if (l == curlwp)
    715      1.174        ad 				continue;
    716      1.174        ad 
    717      1.174        ad 			lwp_lock(l);
    718      1.122   thorpej 
    719       1.97     enami 			/*
    720      1.174        ad 			 * Set L_WREBOOT so that the LWP will suspend itself
    721      1.174        ad 			 * when it tries to return to user mode.  We want to
    722      1.174        ad 			 * try and get to get as many LWPs as possible to
    723      1.174        ad 			 * the user / kernel boundary, so that they will
    724      1.174        ad 			 * release any locks that they hold.
    725       1.97     enami 			 */
    726      1.178     pavel 			l->l_flag |= (LW_WREBOOT | LW_WSUSPEND);
    727      1.174        ad 
    728      1.174        ad 			if (l->l_stat == LSSLEEP &&
    729      1.178     pavel 			    (l->l_flag & LW_SINTR) != 0) {
    730      1.174        ad 				/* setrunnable() will release the lock. */
    731      1.174        ad 				setrunnable(l);
    732      1.174        ad 				continue;
    733      1.174        ad 			}
    734      1.174        ad 
    735      1.174        ad 			lwp_unlock(l);
    736       1.94    bouyer 		}
    737      1.174        ad 
    738      1.174        ad 		mutex_exit(&p->p_smutex);
    739       1.94    bouyer 	}
    740      1.204        ad 	mutex_exit(&proclist_lock);
    741      1.174        ad 
    742      1.174        ad 	/*
    743      1.174        ad 	 * Kick all CPUs to make them preempt any LWPs running in user mode.
    744      1.174        ad 	 * They'll trap into the kernel and suspend themselves in userret().
    745      1.174        ad 	 */
    746      1.204        ad 	for (CPU_INFO_FOREACH(cii, ci)) {
    747      1.204        ad 		spc_lock(ci);
    748      1.204        ad 		cpu_need_resched(ci, RESCHED_IMMED);
    749      1.204        ad 		spc_unlock(ci);
    750      1.204        ad 	}
    751      1.174        ad }
    752      1.174        ad 
    753      1.174        ad /*
    754      1.174        ad  * sched_unsleep:
    755      1.174        ad  *
    756      1.174        ad  *	The is called when the LWP has not been awoken normally but instead
    757      1.174        ad  *	interrupted: for example, if the sleep timed out.  Because of this,
    758      1.174        ad  *	it's not a valid action for running or idle LWPs.
    759      1.174        ad  */
    760      1.188      yamt static void
    761      1.174        ad sched_unsleep(struct lwp *l)
    762      1.174        ad {
    763      1.174        ad 
    764      1.174        ad 	lwp_unlock(l);
    765      1.174        ad 	panic("sched_unsleep");
    766      1.174        ad }
    767      1.174        ad 
    768      1.204        ad void
    769      1.188      yamt resched_cpu(struct lwp *l)
    770      1.188      yamt {
    771      1.188      yamt 	struct cpu_info *ci;
    772      1.188      yamt 
    773      1.188      yamt 	/*
    774      1.188      yamt 	 * XXXSMP
    775      1.188      yamt 	 * Since l->l_cpu persists across a context switch,
    776      1.188      yamt 	 * this gives us *very weak* processor affinity, in
    777      1.188      yamt 	 * that we notify the CPU on which the process last
    778      1.188      yamt 	 * ran that it should try to switch.
    779      1.188      yamt 	 *
    780      1.188      yamt 	 * This does not guarantee that the process will run on
    781      1.188      yamt 	 * that processor next, because another processor might
    782      1.188      yamt 	 * grab it the next time it performs a context switch.
    783      1.188      yamt 	 *
    784      1.188      yamt 	 * This also does not handle the case where its last
    785      1.188      yamt 	 * CPU is running a higher-priority process, but every
    786      1.188      yamt 	 * other CPU is running a lower-priority process.  There
    787      1.188      yamt 	 * are ways to handle this situation, but they're not
    788      1.188      yamt 	 * currently very pretty, and we also need to weigh the
    789      1.188      yamt 	 * cost of moving a process from one CPU to another.
    790      1.188      yamt 	 */
    791      1.204        ad 	ci = l->l_cpu;
    792      1.204        ad 	if (lwp_eprio(l) > ci->ci_schedstate.spc_curpriority)
    793      1.188      yamt 		cpu_need_resched(ci, 0);
    794      1.188      yamt }
    795      1.188      yamt 
    796      1.188      yamt static void
    797      1.185      yamt sched_changepri(struct lwp *l, pri_t pri)
    798      1.174        ad {
    799      1.174        ad 
    800      1.188      yamt 	KASSERT(lwp_locked(l, NULL));
    801      1.174        ad 
    802      1.204        ad 	if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM) != 0) {
    803      1.204        ad 		KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
    804      1.204        ad 		sched_dequeue(l);
    805      1.204        ad 		l->l_priority = pri;
    806      1.204        ad 		sched_enqueue(l, false);
    807      1.204        ad 	} else {
    808      1.174        ad 		l->l_priority = pri;
    809      1.157      yamt 	}
    810      1.188      yamt 	resched_cpu(l);
    811      1.184      yamt }
    812      1.184      yamt 
    813      1.188      yamt static void
    814      1.185      yamt sched_lendpri(struct lwp *l, pri_t pri)
    815      1.184      yamt {
    816      1.184      yamt 
    817      1.188      yamt 	KASSERT(lwp_locked(l, NULL));
    818      1.184      yamt 
    819      1.204        ad 	if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM) != 0) {
    820      1.204        ad 		KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
    821      1.204        ad 		sched_dequeue(l);
    822      1.204        ad 		l->l_inheritedprio = pri;
    823      1.204        ad 		sched_enqueue(l, false);
    824      1.204        ad 	} else {
    825      1.184      yamt 		l->l_inheritedprio = pri;
    826      1.184      yamt 	}
    827      1.188      yamt 	resched_cpu(l);
    828      1.184      yamt }
    829      1.184      yamt 
    830      1.184      yamt struct lwp *
    831      1.184      yamt syncobj_noowner(wchan_t wchan)
    832      1.184      yamt {
    833      1.184      yamt 
    834      1.184      yamt 	return NULL;
    835      1.151      yamt }
    836      1.151      yamt 
    837      1.113  gmcgarry 
    838      1.188      yamt /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
    839      1.188      yamt fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;		/* exp(-1/20) */
    840      1.115  nisimura 
    841      1.130   nathanw /*
    842      1.188      yamt  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
    843      1.188      yamt  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
    844      1.188      yamt  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
    845      1.188      yamt  *
    846      1.188      yamt  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
    847      1.188      yamt  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
    848      1.188      yamt  *
    849      1.188      yamt  * If you dont want to bother with the faster/more-accurate formula, you
    850      1.188      yamt  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
    851      1.188      yamt  * (more general) method of calculating the %age of CPU used by a process.
    852      1.134      matt  */
    853      1.188      yamt #define	CCPU_SHIFT	(FSHIFT + 1)
    854      1.134      matt 
    855      1.134      matt /*
    856      1.188      yamt  * sched_pstats:
    857      1.188      yamt  *
    858      1.188      yamt  * Update process statistics and check CPU resource allocation.
    859      1.188      yamt  * Call scheduler-specific hook to eventually adjust process/LWP
    860      1.188      yamt  * priorities.
    861      1.130   nathanw  */
    862      1.188      yamt /* ARGSUSED */
    863      1.113  gmcgarry void
    864      1.188      yamt sched_pstats(void *arg)
    865      1.113  gmcgarry {
    866      1.188      yamt 	struct rlimit *rlim;
    867      1.188      yamt 	struct lwp *l;
    868      1.188      yamt 	struct proc *p;
    869      1.204        ad 	int sig, clkhz;
    870      1.188      yamt 	long runtm;
    871      1.113  gmcgarry 
    872      1.188      yamt 	sched_pstats_ticks++;
    873      1.174        ad 
    874      1.211        ad 	mutex_enter(&proclist_lock);
    875      1.188      yamt 	PROCLIST_FOREACH(p, &allproc) {
    876      1.188      yamt 		/*
    877      1.188      yamt 		 * Increment time in/out of memory and sleep time (if
    878      1.188      yamt 		 * sleeping).  We ignore overflow; with 16-bit int's
    879      1.188      yamt 		 * (remember them?) overflow takes 45 days.
    880      1.188      yamt 		 */
    881      1.188      yamt 		mutex_enter(&p->p_smutex);
    882      1.188      yamt 		mutex_spin_enter(&p->p_stmutex);
    883  1.211.2.3        ad 		runtm = p->p_rtime.sec;
    884      1.188      yamt 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
    885      1.188      yamt 			if ((l->l_flag & LW_IDLE) != 0)
    886      1.188      yamt 				continue;
    887      1.188      yamt 			lwp_lock(l);
    888  1.211.2.3        ad 			runtm += l->l_rtime.sec;
    889      1.188      yamt 			l->l_swtime++;
    890      1.200     rmind 			sched_pstats_hook(l);
    891      1.188      yamt 			lwp_unlock(l);
    892      1.113  gmcgarry 
    893      1.188      yamt 			/*
    894      1.188      yamt 			 * p_pctcpu is only for ps.
    895      1.188      yamt 			 */
    896      1.188      yamt 			l->l_pctcpu = (l->l_pctcpu * ccpu) >> FSHIFT;
    897      1.188      yamt 			if (l->l_slptime < 1) {
    898      1.188      yamt 				clkhz = stathz != 0 ? stathz : hz;
    899      1.188      yamt #if	(FSHIFT >= CCPU_SHIFT)
    900      1.188      yamt 				l->l_pctcpu += (clkhz == 100) ?
    901      1.188      yamt 				    ((fixpt_t)l->l_cpticks) <<
    902      1.188      yamt 				        (FSHIFT - CCPU_SHIFT) :
    903      1.188      yamt 				    100 * (((fixpt_t) p->p_cpticks)
    904      1.188      yamt 				        << (FSHIFT - CCPU_SHIFT)) / clkhz;
    905      1.188      yamt #else
    906      1.188      yamt 				l->l_pctcpu += ((FSCALE - ccpu) *
    907      1.188      yamt 				    (l->l_cpticks * FSCALE / clkhz)) >> FSHIFT;
    908      1.146      matt #endif
    909      1.188      yamt 				l->l_cpticks = 0;
    910      1.188      yamt 			}
    911      1.188      yamt 		}
    912      1.188      yamt 		p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
    913      1.188      yamt 		mutex_spin_exit(&p->p_stmutex);
    914      1.174        ad 
    915      1.188      yamt 		/*
    916      1.188      yamt 		 * Check if the process exceeds its CPU resource allocation.
    917      1.188      yamt 		 * If over max, kill it.
    918      1.188      yamt 		 */
    919      1.188      yamt 		rlim = &p->p_rlimit[RLIMIT_CPU];
    920      1.188      yamt 		sig = 0;
    921      1.188      yamt 		if (runtm >= rlim->rlim_cur) {
    922      1.188      yamt 			if (runtm >= rlim->rlim_max)
    923      1.188      yamt 				sig = SIGKILL;
    924      1.188      yamt 			else {
    925      1.188      yamt 				sig = SIGXCPU;
    926      1.188      yamt 				if (rlim->rlim_cur < rlim->rlim_max)
    927      1.188      yamt 					rlim->rlim_cur += 5;
    928      1.188      yamt 			}
    929      1.188      yamt 		}
    930      1.188      yamt 		mutex_exit(&p->p_smutex);
    931      1.188      yamt 		if (sig) {
    932  1.211.2.4        ad 			mutex_enter(&proclist_mutex);
    933      1.188      yamt 			psignal(p, sig);
    934  1.211.2.4        ad 			mutex_exit(&proclist_mutex);
    935      1.188      yamt 		}
    936      1.174        ad 	}
    937      1.211        ad 	mutex_exit(&proclist_lock);
    938      1.188      yamt 	uvm_meter();
    939      1.191        ad 	cv_wakeup(&lbolt);
    940      1.188      yamt 	callout_schedule(&sched_pstats_ch, hz);
    941      1.113  gmcgarry }
    942      1.190        ad 
    943      1.190        ad void
    944      1.190        ad sched_init(void)
    945      1.190        ad {
    946      1.190        ad 
    947  1.211.2.2        ad 	cv_init(&lbolt, "lbolt");
    948  1.211.2.1        ad 	callout_init(&sched_pstats_ch, CALLOUT_MPSAFE);
    949      1.190        ad 	callout_setfunc(&sched_pstats_ch, sched_pstats, NULL);
    950      1.190        ad 	sched_setup();
    951      1.190        ad 	sched_pstats(NULL);
    952      1.190        ad }
    953