Home | History | Annotate | Line # | Download | only in kern
kern_sleepq.c revision 1.1.2.2
      1  1.1.2.2  ad /*	$NetBSD: kern_sleepq.c,v 1.1.2.2 2006/10/20 20:41:26 ad Exp $	*/
      2  1.1.2.1  ad 
      3  1.1.2.1  ad /*-
      4  1.1.2.1  ad  * Copyright (c) 2006 The NetBSD Foundation, Inc.
      5  1.1.2.1  ad  * All rights reserved.
      6  1.1.2.1  ad  *
      7  1.1.2.1  ad  * This code is derived from software contributed to The NetBSD Foundation
      8  1.1.2.1  ad  * by Andrew Doran.
      9  1.1.2.1  ad  *
     10  1.1.2.1  ad  * Redistribution and use in source and binary forms, with or without
     11  1.1.2.1  ad  * modification, are permitted provided that the following conditions
     12  1.1.2.1  ad  * are met:
     13  1.1.2.1  ad  * 1. Redistributions of source code must retain the above copyright
     14  1.1.2.1  ad  *    notice, this list of conditions and the following disclaimer.
     15  1.1.2.1  ad  * 2. Redistributions in binary form must reproduce the above copyright
     16  1.1.2.1  ad  *    notice, this list of conditions and the following disclaimer in the
     17  1.1.2.1  ad  *    documentation and/or other materials provided with the distribution.
     18  1.1.2.1  ad  * 3. All advertising materials mentioning features or use of this software
     19  1.1.2.1  ad  *    must display the following acknowledgement:
     20  1.1.2.1  ad  *	This product includes software developed by the NetBSD
     21  1.1.2.1  ad  *	Foundation, Inc. and its contributors.
     22  1.1.2.1  ad  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  1.1.2.1  ad  *    contributors may be used to endorse or promote products derived
     24  1.1.2.1  ad  *    from this software without specific prior written permission.
     25  1.1.2.1  ad  *
     26  1.1.2.1  ad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  1.1.2.1  ad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  1.1.2.1  ad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  1.1.2.1  ad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  1.1.2.1  ad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  1.1.2.1  ad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  1.1.2.1  ad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  1.1.2.1  ad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  1.1.2.1  ad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  1.1.2.1  ad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  1.1.2.1  ad  * POSSIBILITY OF SUCH DAMAGE.
     37  1.1.2.1  ad  */
     38  1.1.2.1  ad 
     39  1.1.2.1  ad /*
     40  1.1.2.1  ad  * Sleep queue implementation, used by turnstiles and general sleep/wakeup
     41  1.1.2.1  ad  * interfaces.
     42  1.1.2.1  ad  */
     43  1.1.2.1  ad 
     44  1.1.2.1  ad #include "opt_multiprocessor.h"
     45  1.1.2.1  ad 
     46  1.1.2.1  ad #include <sys/cdefs.h>
     47  1.1.2.2  ad __KERNEL_RCSID(0, "$NetBSD: kern_sleepq.c,v 1.1.2.2 2006/10/20 20:41:26 ad Exp $");
     48  1.1.2.1  ad 
     49  1.1.2.1  ad #include <sys/param.h>
     50  1.1.2.1  ad #include <sys/lock.h>
     51  1.1.2.1  ad #include <sys/kernel.h>
     52  1.1.2.1  ad #include <sys/pool.h>
     53  1.1.2.1  ad #include <sys/proc.h>
     54  1.1.2.1  ad #include <sys/resourcevar.h>
     55  1.1.2.1  ad #include <sys/sched.h>
     56  1.1.2.1  ad #include <sys/systm.h>
     57  1.1.2.1  ad #include <sys/sa.h>
     58  1.1.2.1  ad #include <sys/savar.h>
     59  1.1.2.1  ad #include <sys/sleepq.h>
     60  1.1.2.1  ad 
     61  1.1.2.1  ad int	sleepq_sigtoerror(struct lwp *, int);
     62  1.1.2.1  ad void	updatepri(struct lwp *);
     63  1.1.2.1  ad void	sa_awaken(struct lwp *);
     64  1.1.2.1  ad 
     65  1.1.2.1  ad sleepq_t	sleeptab[SLEEPTAB_HASH_SIZE];
     66  1.1.2.1  ad #ifdef MULTIPROCESSOR
     67  1.1.2.1  ad kmutex_t	sleeptab_mutexes[SLEEPTAB_HASH_SIZE];
     68  1.1.2.1  ad #else
     69  1.1.2.1  ad kmutex_t	sleeptab_mutex;
     70  1.1.2.1  ad #endif
     71  1.1.2.1  ad 
     72  1.1.2.1  ad /*
     73  1.1.2.1  ad  * sleeptab_init:
     74  1.1.2.1  ad  *
     75  1.1.2.1  ad  *	Initialize the general-purpose sleep queues.
     76  1.1.2.1  ad  */
     77  1.1.2.1  ad void
     78  1.1.2.1  ad sleeptab_init(void)
     79  1.1.2.1  ad {
     80  1.1.2.1  ad 	sleepq_t *sq;
     81  1.1.2.1  ad 	int i;
     82  1.1.2.1  ad 
     83  1.1.2.1  ad #ifndef MULTIPROCESSOR
     84  1.1.2.1  ad 	mutex_init(&sleeptab_mutex, MUTEX_SPIN, IPL_SCHED);
     85  1.1.2.1  ad #endif
     86  1.1.2.1  ad 
     87  1.1.2.1  ad 	for (i = 0; i < SLEEPTAB_HASH_SIZE; i++) {
     88  1.1.2.1  ad 		sq = &sleeptab[i];
     89  1.1.2.1  ad #ifdef MULTIPROCESSOR
     90  1.1.2.1  ad 		mutex_init(&sleeptab_mutexes[i], MUTEX_SPIN, IPL_SCHED);
     91  1.1.2.1  ad 		sleepq_init(&sleeptab[i], &sleeptab_mutexes[i]);
     92  1.1.2.1  ad #else
     93  1.1.2.1  ad 		sleepq_init(&sleeptab[i], &sleeptab_mutex);
     94  1.1.2.1  ad #endif
     95  1.1.2.1  ad 	}
     96  1.1.2.1  ad }
     97  1.1.2.1  ad 
     98  1.1.2.1  ad /*
     99  1.1.2.1  ad  * sleepq_init:
    100  1.1.2.1  ad  *
    101  1.1.2.1  ad  *	Prepare a sleep queue for use.
    102  1.1.2.1  ad  */
    103  1.1.2.1  ad void
    104  1.1.2.1  ad sleepq_init(sleepq_t *sq, kmutex_t *mtx)
    105  1.1.2.1  ad {
    106  1.1.2.1  ad 
    107  1.1.2.1  ad 	sq->sq_waiters = 0;
    108  1.1.2.1  ad 	sq->sq_mutex = mtx;
    109  1.1.2.1  ad 	TAILQ_INIT(&sq->sq_queue);
    110  1.1.2.1  ad }
    111  1.1.2.1  ad 
    112  1.1.2.1  ad /*
    113  1.1.2.1  ad  * sleepq_remove:
    114  1.1.2.1  ad  *
    115  1.1.2.1  ad  *	Remove an LWP from a sleep queue and wake it up.  Return non-zero if
    116  1.1.2.1  ad  *	the LWP is swapped out; if so the caller needs to awaken the swapper
    117  1.1.2.1  ad  *	to bring the LWP into memory.
    118  1.1.2.1  ad  */
    119  1.1.2.1  ad int
    120  1.1.2.1  ad sleepq_remove(sleepq_t *sq, struct lwp *l)
    121  1.1.2.1  ad {
    122  1.1.2.1  ad 
    123  1.1.2.1  ad 	LOCK_ASSERT(lwp_locked(l, sq->sq_mutex));
    124  1.1.2.1  ad 	KASSERT(sq->sq_waiters > 0);
    125  1.1.2.1  ad 
    126  1.1.2.1  ad 	l->l_wchan = NULL;
    127  1.1.2.1  ad 	l->l_slptime = 0;
    128  1.1.2.1  ad 	l->l_flag &= ~L_SINTR;
    129  1.1.2.1  ad 
    130  1.1.2.1  ad 	sq->sq_waiters--;
    131  1.1.2.1  ad 	TAILQ_REMOVE(&sq->sq_queue, l, l_sleepq);
    132  1.1.2.1  ad 
    133  1.1.2.1  ad #ifdef DIAGNOSTIC
    134  1.1.2.1  ad 	if (sq->sq_waiters == 0)
    135  1.1.2.1  ad 		KASSERT(TAILQ_FIRST(&sq->sq_queue) == NULL);
    136  1.1.2.1  ad 	else
    137  1.1.2.1  ad 		KASSERT(TAILQ_FIRST(&sq->sq_queue) != NULL);
    138  1.1.2.1  ad #endif
    139  1.1.2.1  ad 
    140  1.1.2.1  ad 	/*
    141  1.1.2.1  ad 	 * If not sleeping, the LWP must have been suspended.  Let whoever
    142  1.1.2.1  ad 	 * holds it stopped set it running again.
    143  1.1.2.1  ad 	 */
    144  1.1.2.1  ad 	if (l->l_stat != LSSLEEP) {
    145  1.1.2.1  ad 		KASSERT(l->l_stat == LSSTOP || l->l_stat == LSSUSPENDED);
    146  1.1.2.1  ad 		return 0;
    147  1.1.2.1  ad 	}
    148  1.1.2.1  ad 
    149  1.1.2.1  ad 	if (l == curlwp) {
    150  1.1.2.1  ad 		l->l_stat = LSONPROC;
    151  1.1.2.1  ad 		lwp_setlock(l, &l->l_cpu->ci_sched_mutex);
    152  1.1.2.1  ad 		return 0;
    153  1.1.2.1  ad 	}
    154  1.1.2.1  ad 
    155  1.1.2.1  ad 	l->l_stat = LSRUN;
    156  1.1.2.1  ad 
    157  1.1.2.1  ad 	if (l->l_proc->p_sa)
    158  1.1.2.1  ad 		sa_awaken(l);
    159  1.1.2.1  ad 	if (l->l_slptime > 1)
    160  1.1.2.1  ad 		updatepri(l);
    161  1.1.2.1  ad 
    162  1.1.2.1  ad 	/*
    163  1.1.2.1  ad 	 * Try to set the LWP running, and swap in its new mutex.  Once
    164  1.1.2.1  ad 	 * we've done the swap, we can't touch the LWP again.
    165  1.1.2.1  ad 	 */
    166  1.1.2.1  ad 	if ((l->l_flag & L_INMEM) != 0) {
    167  1.1.2.1  ad 		/*
    168  1.1.2.1  ad 		 * Try to get the last CPU that ran this LWP to pick it up.
    169  1.1.2.1  ad 		 */
    170  1.1.2.1  ad 		setrunqueue(l);
    171  1.1.2.1  ad 		lwp_setlock(l, &sched_mutex);
    172  1.1.2.1  ad 		cpu_need_resched(l->l_cpu);
    173  1.1.2.1  ad 		return 0;
    174  1.1.2.1  ad 	}
    175  1.1.2.1  ad 
    176  1.1.2.1  ad 	lwp_setlock(l, &lwp_mutex);
    177  1.1.2.1  ad 	return 1;
    178  1.1.2.1  ad }
    179  1.1.2.1  ad 
    180  1.1.2.1  ad /*
    181  1.1.2.1  ad  * sleepq_enter:
    182  1.1.2.1  ad  *
    183  1.1.2.1  ad  *	Enter an LWP into the sleep queue and prepare for sleep.  Any interlocking
    184  1.1.2.1  ad  * 	step such as releasing a mutex or checking for signals may be safely done
    185  1.1.2.1  ad  *	by the caller once on the sleep queue.
    186  1.1.2.1  ad  */
    187  1.1.2.1  ad void
    188  1.1.2.1  ad sleepq_enter(sleepq_t *sq, int pri, wchan_t wchan, const char *wmesg, int timo,
    189  1.1.2.1  ad 	     int catch)
    190  1.1.2.1  ad {
    191  1.1.2.1  ad 	struct lwp *l = curlwp;
    192  1.1.2.1  ad 
    193  1.1.2.1  ad 	LOCK_ASSERT(mutex_owned(sq->sq_mutex));
    194  1.1.2.1  ad 
    195  1.1.2.1  ad #ifdef KTRACE
    196  1.1.2.1  ad 	if (KTRPOINT(p, KTR_CSW))
    197  1.1.2.1  ad 		ktrcsw(l, 1, 0);
    198  1.1.2.1  ad #endif
    199  1.1.2.1  ad 
    200  1.1.2.1  ad 	sq->sq_waiters++;
    201  1.1.2.1  ad 	TAILQ_INSERT_TAIL(&sq->sq_queue, l, l_sleepq);
    202  1.1.2.1  ad 
    203  1.1.2.1  ad 	/*
    204  1.1.2.1  ad 	 * Acquire the per-LWP mutex.
    205  1.1.2.1  ad 	 */
    206  1.1.2.1  ad 	lwp_lock(l);
    207  1.1.2.1  ad 
    208  1.1.2.1  ad 	KASSERT(l->l_wchan == NULL);
    209  1.1.2.1  ad 
    210  1.1.2.1  ad 	l->l_wchan = wchan;
    211  1.1.2.1  ad 	l->l_wmesg = wmesg;
    212  1.1.2.1  ad 	l->l_slptime = 0;
    213  1.1.2.1  ad 	l->l_priority = pri & PRIMASK;
    214  1.1.2.1  ad 	l->l_flag &= ~L_CANCELLED;
    215  1.1.2.1  ad 	if (catch)
    216  1.1.2.1  ad 		l->l_flag |= L_SINTR;
    217  1.1.2.1  ad 	if (l->l_stat == LSONPROC)
    218  1.1.2.1  ad 		l->l_stat = LSSLEEP;
    219  1.1.2.1  ad 	l->l_nvcsw++;
    220  1.1.2.1  ad 
    221  1.1.2.1  ad 	if (timo)
    222  1.1.2.1  ad 		callout_reset(&l->l_tsleep_ch, timo, sleepq_timeout, l);
    223  1.1.2.1  ad 
    224  1.1.2.1  ad 	/*
    225  1.1.2.1  ad 	 * The LWP is now on the sleep queue.  Release its old mutex and
    226  1.1.2.1  ad 	 * lend it ours for the duration of the sleep.
    227  1.1.2.1  ad 	 */
    228  1.1.2.1  ad 	lwp_swaplock(l, sq->sq_mutex);
    229  1.1.2.1  ad }
    230  1.1.2.1  ad 
    231  1.1.2.1  ad /*
    232  1.1.2.1  ad  * sleepq_block:
    233  1.1.2.1  ad  *
    234  1.1.2.1  ad  *	The calling LWP has been entered into the sleep queue by
    235  1.1.2.1  ad  *	sleepq_enter(), and now wants to block.  sleepq_block() may return
    236  1.1.2.1  ad  *	early under exceptional conditions, for example if the LWP's process
    237  1.1.2.1  ad  *	is exiting.  sleepq_block() must be called if sleepq_enter() has
    238  1.1.2.1  ad  *	been called.
    239  1.1.2.1  ad  */
    240  1.1.2.1  ad int
    241  1.1.2.1  ad sleepq_block(sleepq_t *sq, int timo)
    242  1.1.2.1  ad {
    243  1.1.2.1  ad 	int error, flag, expired, sig;
    244  1.1.2.1  ad 	struct lwp *l = curlwp;
    245  1.1.2.1  ad 	struct proc *p;
    246  1.1.2.1  ad 
    247  1.1.2.1  ad 	LOCK_ASSERT(lwp_locked(l, sq->sq_mutex));
    248  1.1.2.1  ad 
    249  1.1.2.1  ad 	flag = l->l_flag;
    250  1.1.2.1  ad 	error = 0;
    251  1.1.2.1  ad 
    252  1.1.2.1  ad 	/*
    253  1.1.2.1  ad 	 * If sleeping interruptably, check for pending signals, exits or
    254  1.1.2.1  ad 	 * core dump events.
    255  1.1.2.1  ad 	 */
    256  1.1.2.1  ad 	if ((flag & L_SINTR) != 0) {
    257  1.1.2.1  ad 		while ((l->l_flag & L_PENDSIG) != 0 && error == 0) {
    258  1.1.2.1  ad 			lwp_unlock(l);
    259  1.1.2.1  ad 			p = l->l_proc;
    260  1.1.2.1  ad 			mutex_enter(&p->p_smutex);
    261  1.1.2.1  ad 			if ((sig = issignal(l)) != 0)
    262  1.1.2.1  ad 				error = sleepq_sigtoerror(l, sig);
    263  1.1.2.1  ad 			mutex_exit(&p->p_smutex);
    264  1.1.2.1  ad 			lwp_lock(l);
    265  1.1.2.1  ad 		}
    266  1.1.2.1  ad 
    267  1.1.2.1  ad 		if (error == 0 && (l->l_flag & (L_WEXIT | L_WCORE)) != 0)
    268  1.1.2.1  ad 			error = EINTR;
    269  1.1.2.1  ad 
    270  1.1.2.1  ad 		if (error != 0) {
    271  1.1.2.1  ad 			/*
    272  1.1.2.1  ad 			 * If the LWP is on a sleep queue and we remove it,
    273  1.1.2.1  ad 			 * we will change its mutex and so we need to unlock
    274  1.1.2.1  ad 			 * the sleep queue.  If it's off the sleep queue
    275  1.1.2.1  ad 			 * already, the unlock the LWP directly.
    276  1.1.2.1  ad 			 */
    277  1.1.2.1  ad 			if (l->l_wchan != NULL) {
    278  1.1.2.2  ad 				mutex_enter(&sched_mutex);
    279  1.1.2.1  ad 				(void)sleepq_remove(sq, l);
    280  1.1.2.2  ad 				mutex_exit(&sched_mutex);
    281  1.1.2.1  ad 				mutex_exit(sq->sq_mutex);
    282  1.1.2.1  ad 			} else
    283  1.1.2.1  ad 				lwp_unlock(l);
    284  1.1.2.1  ad 
    285  1.1.2.1  ad 			goto out;
    286  1.1.2.1  ad 		}
    287  1.1.2.1  ad 	}
    288  1.1.2.1  ad 
    289  1.1.2.1  ad 	if (l->l_stat == LSONPROC) {
    290  1.1.2.1  ad 		/*
    291  1.1.2.1  ad 		 * We may have decided not to switch away, and so removed
    292  1.1.2.1  ad 		 * ourself from the sleep queue.
    293  1.1.2.1  ad 		 */
    294  1.1.2.1  ad 		lwp_unlock(l);
    295  1.1.2.1  ad 	} else if ((flag & L_SA) != 0) {
    296  1.1.2.1  ad 		sa_switch(l, sadata_upcall_alloc(0), SA_UPCALL_BLOCKED);
    297  1.1.2.1  ad 		/* XXXAD verify sa_switch restores SPL. */
    298  1.1.2.1  ad 	} else {
    299  1.1.2.1  ad 		mi_switch(l, NULL);
    300  1.1.2.1  ad 
    301  1.1.2.1  ad 		l->l_cpu->ci_schedstate.spc_curpriority = l->l_usrpri;
    302  1.1.2.1  ad 	}
    303  1.1.2.1  ad 
    304  1.1.2.1  ad 	KASSERT(l->l_wchan == NULL);
    305  1.1.2.1  ad 
    306  1.1.2.1  ad 	if (timo) {
    307  1.1.2.1  ad 		/*
    308  1.1.2.1  ad 		 * Even if the callout appears to have fired, we need to
    309  1.1.2.1  ad 		 * stop it in order to synchronise with other CPUs.
    310  1.1.2.1  ad 		 */
    311  1.1.2.1  ad 		expired = callout_expired(&l->l_tsleep_ch);
    312  1.1.2.1  ad 		callout_stop(&l->l_tsleep_ch);
    313  1.1.2.1  ad 		if (expired)
    314  1.1.2.1  ad 			return EWOULDBLOCK;
    315  1.1.2.1  ad 	}
    316  1.1.2.1  ad 
    317  1.1.2.1  ad 	if ((flag & L_SINTR) != 0) {
    318  1.1.2.1  ad 		if ((l->l_flag & (L_CANCELLED | L_WEXIT | L_WCORE)) != 0)
    319  1.1.2.1  ad 			error = EINTR;
    320  1.1.2.1  ad 		else if ((l->l_flag & L_PENDSIG) != 0) {
    321  1.1.2.1  ad 			p = l->l_proc;
    322  1.1.2.1  ad 			mutex_enter(&p->p_smutex);
    323  1.1.2.1  ad 			if ((sig = issignal(l)) != 0)
    324  1.1.2.1  ad 				error = sleepq_sigtoerror(l, sig);
    325  1.1.2.1  ad 			mutex_exit(&p->p_smutex);
    326  1.1.2.1  ad 		}
    327  1.1.2.1  ad 	}
    328  1.1.2.1  ad 
    329  1.1.2.1  ad  out:
    330  1.1.2.1  ad #ifdef KTRACE
    331  1.1.2.1  ad 	if (KTRPOINT(p, KTR_CSW))
    332  1.1.2.1  ad 		ktrcsw(l, 0, 0);
    333  1.1.2.1  ad #endif
    334  1.1.2.1  ad 	return error;
    335  1.1.2.1  ad }
    336  1.1.2.1  ad 
    337  1.1.2.1  ad /*
    338  1.1.2.1  ad  * sleepq_wakeone:
    339  1.1.2.1  ad  *
    340  1.1.2.1  ad  *	Remove one LWP from the sleep queue and wake it.  We search among
    341  1.1.2.1  ad  *	the higest priority LWPs waiting on a single wait channel, and pick
    342  1.1.2.1  ad  *	the longest waiting one.
    343  1.1.2.1  ad  */
    344  1.1.2.1  ad void
    345  1.1.2.1  ad sleepq_wakeone(sleepq_t *sq, wchan_t wchan)
    346  1.1.2.1  ad {
    347  1.1.2.1  ad 	struct lwp *l, *bl;
    348  1.1.2.1  ad 	int bpri, swapin;
    349  1.1.2.1  ad 
    350  1.1.2.1  ad 	LOCK_ASSERT(mutex_owned(sq->sq_mutex));
    351  1.1.2.1  ad 
    352  1.1.2.1  ad 	swapin = 0;
    353  1.1.2.1  ad 	bpri = MAXPRI;
    354  1.1.2.1  ad 	bl = NULL;
    355  1.1.2.1  ad 
    356  1.1.2.1  ad 	TAILQ_FOREACH(l, &sq->sq_queue, l_sleepq) {
    357  1.1.2.1  ad 		if (l->l_wchan != wchan || l->l_priority > bpri)
    358  1.1.2.1  ad 			continue;
    359  1.1.2.1  ad 		bl = l;
    360  1.1.2.1  ad 		bpri = l->l_priority;
    361  1.1.2.1  ad 	}
    362  1.1.2.1  ad 
    363  1.1.2.1  ad 	if (bl != NULL) {
    364  1.1.2.1  ad 		mutex_enter(&sched_mutex);
    365  1.1.2.1  ad 		swapin = sleepq_remove(sq, bl);
    366  1.1.2.1  ad 		mutex_exit(&sched_mutex);
    367  1.1.2.1  ad 	}
    368  1.1.2.1  ad 
    369  1.1.2.1  ad 	mutex_exit(sq->sq_mutex);
    370  1.1.2.1  ad 
    371  1.1.2.1  ad 	if (swapin)
    372  1.1.2.1  ad 		wakeup(&proc0);
    373  1.1.2.1  ad }
    374  1.1.2.1  ad 
    375  1.1.2.1  ad /*
    376  1.1.2.1  ad  * sleepq_wakeall:
    377  1.1.2.1  ad  *
    378  1.1.2.1  ad  *	Wake all LWPs blocked on a single wait channel.
    379  1.1.2.1  ad  */
    380  1.1.2.1  ad void
    381  1.1.2.1  ad sleepq_wakeall(sleepq_t *sq, wchan_t wchan, u_int expected)
    382  1.1.2.1  ad {
    383  1.1.2.1  ad 	struct lwp *l, *next;
    384  1.1.2.1  ad 	int swapin = 0;
    385  1.1.2.1  ad 
    386  1.1.2.1  ad 	LOCK_ASSERT(mutex_owned(sq->sq_mutex));
    387  1.1.2.1  ad 
    388  1.1.2.1  ad 	mutex_enter(&sched_mutex);
    389  1.1.2.1  ad 	for (l = TAILQ_FIRST(&sq->sq_queue); l != NULL; l = next) {
    390  1.1.2.1  ad 		next = TAILQ_NEXT(l, l_sleepq);
    391  1.1.2.1  ad 		if (l->l_wchan != wchan)
    392  1.1.2.1  ad 			continue;
    393  1.1.2.1  ad 		swapin |= sleepq_remove(sq, l);
    394  1.1.2.1  ad 		if (--expected == 0)
    395  1.1.2.1  ad 			break;
    396  1.1.2.1  ad 	}
    397  1.1.2.1  ad 	mutex_exit(&sched_mutex);
    398  1.1.2.1  ad 
    399  1.1.2.1  ad 	LOCK_ASSERT(mutex_owned(sq->sq_mutex));
    400  1.1.2.1  ad 	mutex_exit(sq->sq_mutex);
    401  1.1.2.1  ad 
    402  1.1.2.1  ad 	/*
    403  1.1.2.1  ad 	 * If there are newly awakend threads that need to be swapped in,
    404  1.1.2.1  ad 	 * then kick the swapper into action.
    405  1.1.2.1  ad 	 */
    406  1.1.2.1  ad 	if (swapin)
    407  1.1.2.1  ad 		wakeup(&proc0);
    408  1.1.2.1  ad }
    409  1.1.2.1  ad 
    410  1.1.2.1  ad /*
    411  1.1.2.1  ad  * sleepq_unsleep:
    412  1.1.2.1  ad  *
    413  1.1.2.1  ad  *	Remove an LWP from its sleep queue and set it runnable again.
    414  1.1.2.1  ad  *	sleepq_unsleep() is called with the LWP's mutex held, and will
    415  1.1.2.1  ad  *	always release it.
    416  1.1.2.1  ad  */
    417  1.1.2.1  ad void
    418  1.1.2.1  ad sleepq_unsleep(struct lwp *l)
    419  1.1.2.1  ad {
    420  1.1.2.1  ad 	sleepq_t *sq;
    421  1.1.2.1  ad 	int swapin;
    422  1.1.2.1  ad 
    423  1.1.2.1  ad 	sq = &sleeptab[SLEEPTAB_HASH(l->l_wchan)];
    424  1.1.2.1  ad 	KASSERT(l->l_wchan != NULL);
    425  1.1.2.1  ad 	KASSERT(l->l_mutex == sq->sq_mutex);
    426  1.1.2.1  ad 
    427  1.1.2.1  ad 	mutex_enter(&sched_mutex);
    428  1.1.2.1  ad 	swapin = sleepq_remove(sq, l);
    429  1.1.2.1  ad 	mutex_exit(&sched_mutex);
    430  1.1.2.1  ad 
    431  1.1.2.1  ad 	LOCK_ASSERT(mutex_owned(sq->sq_mutex));
    432  1.1.2.1  ad 	mutex_exit(sq->sq_mutex);
    433  1.1.2.1  ad 
    434  1.1.2.1  ad 	if (swapin)
    435  1.1.2.1  ad 		wakeup(&proc0);
    436  1.1.2.1  ad }
    437  1.1.2.1  ad 
    438  1.1.2.1  ad /*
    439  1.1.2.1  ad  * sleepq_timeout:
    440  1.1.2.1  ad  *
    441  1.1.2.1  ad  *	Entered via the callout(9) subsystem to time out an LWP that is on a
    442  1.1.2.1  ad  *	sleep queue.
    443  1.1.2.1  ad  */
    444  1.1.2.1  ad void
    445  1.1.2.1  ad sleepq_timeout(void *arg)
    446  1.1.2.1  ad {
    447  1.1.2.1  ad 	struct lwp *l = arg;
    448  1.1.2.1  ad 
    449  1.1.2.1  ad 	/*
    450  1.1.2.1  ad 	 * Lock the LWP.  Assuming it's still on the sleep queue, its
    451  1.1.2.1  ad 	 * current mutex will also be the sleep queue mutex.
    452  1.1.2.1  ad 	 */
    453  1.1.2.1  ad 	lwp_lock(l);
    454  1.1.2.1  ad 
    455  1.1.2.1  ad 	if (l->l_wchan == NULL) {
    456  1.1.2.1  ad 		/* Somebody beat us to it. */
    457  1.1.2.1  ad 		lwp_unlock(l);
    458  1.1.2.1  ad 		return;
    459  1.1.2.1  ad 	}
    460  1.1.2.1  ad 
    461  1.1.2.1  ad 	sleepq_unsleep(arg);
    462  1.1.2.1  ad }
    463  1.1.2.1  ad 
    464  1.1.2.1  ad /*
    465  1.1.2.1  ad  * sleepq_sigtoerror:
    466  1.1.2.1  ad  *
    467  1.1.2.1  ad  *	Given a signal number, interpret and return an error code.
    468  1.1.2.1  ad  */
    469  1.1.2.1  ad int
    470  1.1.2.1  ad sleepq_sigtoerror(struct lwp *l, int sig)
    471  1.1.2.1  ad {
    472  1.1.2.1  ad 	struct proc *p;
    473  1.1.2.1  ad 	int error;
    474  1.1.2.1  ad 
    475  1.1.2.1  ad 	/*
    476  1.1.2.1  ad 	 * If this sleep was canceled, don't let the syscall restart.
    477  1.1.2.1  ad 	 */
    478  1.1.2.1  ad 	p = l->l_proc;
    479  1.1.2.1  ad 	mutex_enter(&p->p_smutex);
    480  1.1.2.1  ad 	if ((SIGACTION(p, sig).sa_flags & SA_RESTART) == 0)
    481  1.1.2.1  ad 		error = EINTR;
    482  1.1.2.1  ad 	else
    483  1.1.2.1  ad 		error = ERESTART;
    484  1.1.2.1  ad 	mutex_exit(&p->p_smutex);
    485  1.1.2.1  ad 
    486  1.1.2.1  ad 	return error;
    487  1.1.2.1  ad }
    488  1.1.2.1  ad 
    489  1.1.2.1  ad /*
    490  1.1.2.1  ad  * sleepq_abort:
    491  1.1.2.1  ad  *
    492  1.1.2.1  ad  *	After a panic or during autoconfiguration, lower the interrupt
    493  1.1.2.1  ad  *	priority level to give pending interrupts a chance to run, and
    494  1.1.2.1  ad  *	then return.  Called if sleepq_dontsleep() returns non-zero, and
    495  1.1.2.1  ad  *	always returns zero.
    496  1.1.2.1  ad  */
    497  1.1.2.1  ad int
    498  1.1.2.1  ad sleepq_abort(kmutex_t *mtx, int unlock)
    499  1.1.2.1  ad {
    500  1.1.2.1  ad 	extern int safepri;
    501  1.1.2.1  ad 	int s;
    502  1.1.2.1  ad 
    503  1.1.2.1  ad 	s = splhigh();
    504  1.1.2.1  ad 	splx(safepri);
    505  1.1.2.1  ad 	splx(s);
    506  1.1.2.1  ad 	if (mtx != NULL && unlock != 0)
    507  1.1.2.1  ad 		mutex_exit(mtx);
    508  1.1.2.1  ad 
    509  1.1.2.1  ad 	return 0;
    510  1.1.2.1  ad }
    511