Home | History | Annotate | Line # | Download | only in rumpkern
sleepq.c revision 1.10
      1 /*	$NetBSD: sleepq.c,v 1.10 2010/12/18 14:01:43 skrll Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2008 Antti Kantee.  All Rights Reserved.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
     16  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     18  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     25  * SUCH DAMAGE.
     26  */
     27 
     28 #include <sys/cdefs.h>
     29 __KERNEL_RCSID(0, "$NetBSD: sleepq.c,v 1.10 2010/12/18 14:01:43 skrll Exp $");
     30 
     31 #include <sys/param.h>
     32 #include <sys/condvar.h>
     33 #include <sys/mutex.h>
     34 #include <sys/once.h>
     35 #include <sys/queue.h>
     36 #include <sys/sleepq.h>
     37 #include <sys/syncobj.h>
     38 #include <sys/atomic.h>
     39 
     40 #include "rump_private.h"
     41 
     42 /*
     43  * Flimsy and minimalistic sleepq implementation.  This is implemented
     44  * only for the use of callouts in kern_timeout.c.  locking etc is
     45  * completely incorrect, horrible, etc etc etc.
     46  */
     47 
     48 syncobj_t sleep_syncobj;
     49 static kcondvar_t sq_cv;
     50 
     51 static int
     52 sqinit1(void)
     53 {
     54 
     55 	cv_init(&sq_cv, "sleepq");
     56 
     57 	return 0;
     58 }
     59 
     60 void
     61 sleepq_init(sleepq_t *sq)
     62 {
     63 	ONCE_DECL(sqctl);
     64 
     65 	RUN_ONCE(&sqctl, sqinit1);
     66 
     67 	TAILQ_INIT(sq);
     68 }
     69 
     70 void
     71 sleepq_enqueue(sleepq_t *sq, wchan_t wc, const char *wmsg, syncobj_t *sob)
     72 {
     73 	struct lwp *l = curlwp;
     74 
     75 	l->l_wchan = wc;
     76 	l->l_sleepq = sq;
     77 	TAILQ_INSERT_TAIL(sq, l, l_sleepchain);
     78 }
     79 
     80 int
     81 sleepq_block(int timo, bool catch)
     82 {
     83 	struct lwp *l = curlwp;
     84 	int error = 0;
     85 	kmutex_t *mp = l->l_mutex;
     86 	int biglocks = l->l_biglocks;
     87 
     88 	while (l->l_wchan) {
     89 		l->l_mutex = mp;
     90 		if ((error=cv_timedwait(&sq_cv, mp, timo)) == EWOULDBLOCK) {
     91 			TAILQ_REMOVE(l->l_sleepq, l, l_sleepchain);
     92 			l->l_wchan = NULL;
     93 		}
     94 	}
     95 	mutex_spin_exit(mp);
     96 
     97 	if (biglocks)
     98 		KERNEL_LOCK(biglocks, curlwp);
     99 
    100 	return error;
    101 }
    102 
    103 lwp_t *
    104 sleepq_wake(sleepq_t *sq, wchan_t wchan, u_int expected, kmutex_t *mp)
    105 {
    106 	struct lwp *l, *l_next;
    107 	bool found = false;
    108 
    109 	if (__predict_false(expected != -1))
    110 		panic("sleepq_wake: \"expected\" not supported");
    111 
    112 	for (l = TAILQ_FIRST(sq); l; l = l_next) {
    113 		l_next = TAILQ_NEXT(l, l_sleepchain);
    114 		if (l->l_wchan == wchan) {
    115 			found = true;
    116 			l->l_wchan = NULL;
    117 			l->l_mutex = NULL;
    118 			TAILQ_REMOVE(sq, l, l_sleepchain);
    119 		}
    120 	}
    121 	if (found)
    122 		cv_broadcast(&sq_cv);
    123 
    124 	mutex_spin_exit(mp);
    125 	return NULL;
    126 }
    127 
    128 void
    129 sleepq_unsleep(struct lwp *l, bool cleanup)
    130 {
    131 
    132 	l->l_wchan = NULL;
    133 	l->l_mutex = NULL;
    134 	TAILQ_REMOVE(l->l_sleepq, l, l_sleepchain);
    135 	cv_broadcast(&sq_cv);
    136 
    137 	if (cleanup) {
    138 		mutex_spin_exit(l->l_mutex);
    139 	}
    140 }
    141 
    142 /*
    143  * Thread scheduler handles priorities.  Therefore no action here.
    144  * (maybe do something if we're deperate?)
    145  */
    146 void
    147 sleepq_changepri(struct lwp *l, pri_t pri)
    148 {
    149 
    150 }
    151 
    152 void
    153 sleepq_lendpri(struct lwp *l, pri_t pri)
    154 {
    155 
    156 }
    157 
    158 struct lwp *
    159 syncobj_noowner(wchan_t wc)
    160 {
    161 
    162 	return NULL;
    163 }
    164 
    165 void
    166 lwp_unlock_to(struct lwp *l, kmutex_t *new)
    167 {
    168 	kmutex_t *old;
    169 
    170 	KASSERT(mutex_owned(l->l_mutex));
    171 
    172 	old = l->l_mutex;
    173 	membar_exit();
    174 	l->l_mutex = new;
    175 	mutex_spin_exit(old);
    176 }
    177