Home | History | Annotate | Line # | Download | only in rumpkern
sleepq.c revision 1.2.6.4
      1  1.2.6.4  yamt /*	$NetBSD: sleepq.c,v 1.2.6.4 2010/08/11 22:55:07 yamt Exp $	*/
      2  1.2.6.2  yamt 
      3  1.2.6.2  yamt /*
      4  1.2.6.2  yamt  * Copyright (c) 2008 Antti Kantee.  All Rights Reserved.
      5  1.2.6.2  yamt  *
      6  1.2.6.2  yamt  * Redistribution and use in source and binary forms, with or without
      7  1.2.6.2  yamt  * modification, are permitted provided that the following conditions
      8  1.2.6.2  yamt  * are met:
      9  1.2.6.2  yamt  * 1. Redistributions of source code must retain the above copyright
     10  1.2.6.2  yamt  *    notice, this list of conditions and the following disclaimer.
     11  1.2.6.2  yamt  * 2. Redistributions in binary form must reproduce the above copyright
     12  1.2.6.2  yamt  *    notice, this list of conditions and the following disclaimer in the
     13  1.2.6.2  yamt  *    documentation and/or other materials provided with the distribution.
     14  1.2.6.2  yamt  *
     15  1.2.6.2  yamt  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
     16  1.2.6.2  yamt  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     17  1.2.6.2  yamt  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     18  1.2.6.2  yamt  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     19  1.2.6.2  yamt  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     20  1.2.6.2  yamt  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     21  1.2.6.2  yamt  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     22  1.2.6.2  yamt  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     23  1.2.6.2  yamt  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     24  1.2.6.2  yamt  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     25  1.2.6.2  yamt  * SUCH DAMAGE.
     26  1.2.6.2  yamt  */
     27  1.2.6.2  yamt 
     28  1.2.6.2  yamt #include <sys/cdefs.h>
     29  1.2.6.4  yamt __KERNEL_RCSID(0, "$NetBSD: sleepq.c,v 1.2.6.4 2010/08/11 22:55:07 yamt Exp $");
     30  1.2.6.2  yamt 
     31  1.2.6.2  yamt #include <sys/param.h>
     32  1.2.6.2  yamt #include <sys/condvar.h>
     33  1.2.6.2  yamt #include <sys/mutex.h>
     34  1.2.6.3  yamt #include <sys/once.h>
     35  1.2.6.2  yamt #include <sys/queue.h>
     36  1.2.6.2  yamt #include <sys/sleepq.h>
     37  1.2.6.2  yamt #include <sys/syncobj.h>
     38  1.2.6.2  yamt 
     39  1.2.6.3  yamt #include "rump_private.h"
     40  1.2.6.3  yamt 
     41  1.2.6.2  yamt /*
     42  1.2.6.2  yamt  * Flimsy and minimalistic sleepq implementation.  This is implemented
     43  1.2.6.2  yamt  * only for the use of callouts in kern_timeout.c.  locking etc is
     44  1.2.6.2  yamt  * completely incorrect, horrible, etc etc etc.
     45  1.2.6.2  yamt  */
     46  1.2.6.2  yamt 
     47  1.2.6.2  yamt syncobj_t sleep_syncobj;
     48  1.2.6.2  yamt static kcondvar_t sq_cv;
     49  1.2.6.3  yamt 
     50  1.2.6.3  yamt static int
     51  1.2.6.3  yamt sqinit1(void)
     52  1.2.6.3  yamt {
     53  1.2.6.3  yamt 
     54  1.2.6.3  yamt 	cv_init(&sq_cv, "sleepq");
     55  1.2.6.3  yamt 
     56  1.2.6.3  yamt 	return 0;
     57  1.2.6.3  yamt }
     58  1.2.6.2  yamt 
     59  1.2.6.2  yamt void
     60  1.2.6.2  yamt sleepq_init(sleepq_t *sq)
     61  1.2.6.2  yamt {
     62  1.2.6.3  yamt 	ONCE_DECL(sqctl);
     63  1.2.6.2  yamt 
     64  1.2.6.3  yamt 	RUN_ONCE(&sqctl, sqinit1);
     65  1.2.6.2  yamt 
     66  1.2.6.3  yamt 	TAILQ_INIT(sq);
     67  1.2.6.2  yamt }
     68  1.2.6.2  yamt 
     69  1.2.6.2  yamt void
     70  1.2.6.2  yamt sleepq_enqueue(sleepq_t *sq, wchan_t wc, const char *wmsg, syncobj_t *sob)
     71  1.2.6.2  yamt {
     72  1.2.6.2  yamt 	struct lwp *l = curlwp;
     73  1.2.6.2  yamt 
     74  1.2.6.2  yamt 	l->l_wchan = wc;
     75  1.2.6.3  yamt 	l->l_sleepq = sq;
     76  1.2.6.2  yamt 	TAILQ_INSERT_TAIL(sq, l, l_sleepchain);
     77  1.2.6.2  yamt }
     78  1.2.6.2  yamt 
     79  1.2.6.2  yamt int
     80  1.2.6.3  yamt sleepq_block(int timo, bool catch)
     81  1.2.6.2  yamt {
     82  1.2.6.2  yamt 	struct lwp *l = curlwp;
     83  1.2.6.3  yamt 	int error = 0;
     84  1.2.6.3  yamt 	kmutex_t *mp = l->l_mutex;
     85  1.2.6.3  yamt 	int biglocks = l->l_biglocks;
     86  1.2.6.3  yamt 
     87  1.2.6.3  yamt 	while (l->l_wchan) {
     88  1.2.6.4  yamt 		l->l_mutex = mp;
     89  1.2.6.3  yamt 		if ((error=cv_timedwait(&sq_cv, mp, timo)) == EWOULDBLOCK) {
     90  1.2.6.3  yamt 			TAILQ_REMOVE(l->l_sleepq, l, l_sleepchain);
     91  1.2.6.3  yamt 			l->l_wchan = NULL;
     92  1.2.6.3  yamt 		}
     93  1.2.6.3  yamt 	}
     94  1.2.6.3  yamt 	mutex_spin_exit(mp);
     95  1.2.6.2  yamt 
     96  1.2.6.3  yamt 	if (biglocks)
     97  1.2.6.3  yamt 		KERNEL_LOCK(biglocks, curlwp);
     98  1.2.6.2  yamt 
     99  1.2.6.3  yamt 	return error;
    100  1.2.6.2  yamt }
    101  1.2.6.2  yamt 
    102  1.2.6.2  yamt lwp_t *
    103  1.2.6.2  yamt sleepq_wake(sleepq_t *sq, wchan_t wchan, u_int expected, kmutex_t *mp)
    104  1.2.6.2  yamt {
    105  1.2.6.3  yamt 	struct lwp *l, *l_next;
    106  1.2.6.2  yamt 	bool found = false;
    107  1.2.6.2  yamt 
    108  1.2.6.3  yamt 	if (__predict_false(expected != -1))
    109  1.2.6.3  yamt 		panic("sleepq_wake: \"expected\" not supported");
    110  1.2.6.3  yamt 
    111  1.2.6.3  yamt 	for (l = TAILQ_FIRST(sq); l; l = l_next) {
    112  1.2.6.3  yamt 		l_next = TAILQ_NEXT(l, l_sleepchain);
    113  1.2.6.2  yamt 		if (l->l_wchan == wchan) {
    114  1.2.6.2  yamt 			found = true;
    115  1.2.6.2  yamt 			l->l_wchan = NULL;
    116  1.2.6.4  yamt 			l->l_mutex = NULL;
    117  1.2.6.3  yamt 			TAILQ_REMOVE(sq, l, l_sleepchain);
    118  1.2.6.2  yamt 		}
    119  1.2.6.2  yamt 	}
    120  1.2.6.2  yamt 	if (found)
    121  1.2.6.2  yamt 		cv_broadcast(&sq_cv);
    122  1.2.6.2  yamt 
    123  1.2.6.2  yamt 	mutex_spin_exit(mp);
    124  1.2.6.2  yamt 	return NULL;
    125  1.2.6.2  yamt }
    126  1.2.6.2  yamt 
    127  1.2.6.3  yamt void
    128  1.2.6.3  yamt sleepq_unsleep(struct lwp *l, bool cleanup)
    129  1.2.6.3  yamt {
    130  1.2.6.3  yamt 
    131  1.2.6.3  yamt 	l->l_wchan = NULL;
    132  1.2.6.4  yamt 	l->l_mutex = NULL;
    133  1.2.6.3  yamt 	TAILQ_REMOVE(l->l_sleepq, l, l_sleepchain);
    134  1.2.6.3  yamt 	cv_broadcast(&sq_cv);
    135  1.2.6.3  yamt 
    136  1.2.6.3  yamt 	if (cleanup) {
    137  1.2.6.3  yamt 		mutex_spin_exit(l->l_mutex);
    138  1.2.6.3  yamt 	}
    139  1.2.6.3  yamt }
    140  1.2.6.3  yamt 
    141  1.2.6.3  yamt /*
    142  1.2.6.3  yamt  * Thread scheduler handles priorities.  Therefore no action here.
    143  1.2.6.3  yamt  * (maybe do something if we're deperate?)
    144  1.2.6.3  yamt  */
    145  1.2.6.3  yamt void
    146  1.2.6.3  yamt sleepq_changepri(struct lwp *l, pri_t pri)
    147  1.2.6.3  yamt {
    148  1.2.6.3  yamt 
    149  1.2.6.3  yamt }
    150  1.2.6.3  yamt 
    151  1.2.6.3  yamt void
    152  1.2.6.3  yamt sleepq_lendpri(struct lwp *l, pri_t pri)
    153  1.2.6.3  yamt {
    154  1.2.6.3  yamt 
    155  1.2.6.3  yamt }
    156  1.2.6.3  yamt 
    157  1.2.6.3  yamt struct lwp *
    158  1.2.6.3  yamt syncobj_noowner(wchan_t wc)
    159  1.2.6.3  yamt {
    160  1.2.6.3  yamt 
    161  1.2.6.3  yamt 	return NULL;
    162  1.2.6.3  yamt }
    163  1.2.6.3  yamt 
    164  1.2.6.2  yamt /*
    165  1.2.6.3  yamt  * XXX: used only by callout, therefore here.  should try to use
    166  1.2.6.3  yamt  * one in kern_lwp directly.
    167  1.2.6.2  yamt  */
    168  1.2.6.2  yamt kmutex_t *
    169  1.2.6.2  yamt lwp_lock_retry(struct lwp *l, kmutex_t *old)
    170  1.2.6.2  yamt {
    171  1.2.6.2  yamt 
    172  1.2.6.3  yamt 	while (l->l_mutex != old) {
    173  1.2.6.3  yamt 		mutex_spin_exit(old);
    174  1.2.6.3  yamt 		old = l->l_mutex;
    175  1.2.6.3  yamt 		mutex_spin_enter(old);
    176  1.2.6.3  yamt 	}
    177  1.2.6.2  yamt 	return old;
    178  1.2.6.2  yamt }
    179