sleepq.c revision 1.14 1 /* $NetBSD: sleepq.c,v 1.14 2013/03/10 11:21:05 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2008 Antti Kantee. All Rights Reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: sleepq.c,v 1.14 2013/03/10 11:21:05 pooka Exp $");
30
31 #include <sys/param.h>
32 #include <sys/condvar.h>
33 #include <sys/mutex.h>
34 #include <sys/once.h>
35 #include <sys/queue.h>
36 #include <sys/sleepq.h>
37 #include <sys/syncobj.h>
38 #include <sys/atomic.h>
39
40 #include "rump_private.h"
41
42 syncobj_t sleep_syncobj;
43 static kcondvar_t sq_cv;
44
45 static int
46 sqinit1(void)
47 {
48
49 cv_init(&sq_cv, "sleepq");
50
51 return 0;
52 }
53
54 void
55 sleepq_init(sleepq_t *sq)
56 {
57 static ONCE_DECL(sqctl);
58
59 RUN_ONCE(&sqctl, sqinit1);
60
61 TAILQ_INIT(sq);
62 }
63
64 void
65 sleepq_enqueue(sleepq_t *sq, wchan_t wc, const char *wmsg, syncobj_t *sob)
66 {
67 struct lwp *l = curlwp;
68
69 l->l_wchan = wc;
70 l->l_wmesg = wmsg;
71 l->l_sleepq = sq;
72 TAILQ_INSERT_TAIL(sq, l, l_sleepchain);
73 }
74
75 int
76 sleepq_block(int timo, bool catch)
77 {
78 struct lwp *l = curlwp;
79 int error = 0;
80 kmutex_t *mp = l->l_mutex;
81 int biglocks = l->l_biglocks;
82
83 while (l->l_wchan) {
84 l->l_mutex = mp; /* keep sleepq lock until woken up */
85 error = cv_timedwait(&sq_cv, mp, timo);
86 if (error == EWOULDBLOCK || error == EINTR) {
87 if (l->l_wchan) {
88 TAILQ_REMOVE(l->l_sleepq, l, l_sleepchain);
89 l->l_wchan = NULL;
90 l->l_wmesg = NULL;
91 }
92 }
93 }
94 mutex_spin_exit(mp);
95
96 if (biglocks)
97 KERNEL_LOCK(biglocks, curlwp);
98
99 return error;
100 }
101
102 lwp_t *
103 sleepq_wake(sleepq_t *sq, wchan_t wchan, u_int expected, kmutex_t *mp)
104 {
105 struct lwp *l, *l_next;
106 bool found = false;
107
108 if (__predict_false(expected != -1))
109 panic("sleepq_wake: \"expected\" not supported");
110
111 for (l = TAILQ_FIRST(sq); l; l = l_next) {
112 l_next = TAILQ_NEXT(l, l_sleepchain);
113 if (l->l_wchan == wchan) {
114 found = true;
115 l->l_wchan = NULL;
116 l->l_wmesg = NULL;
117 TAILQ_REMOVE(sq, l, l_sleepchain);
118 }
119 }
120 if (found)
121 cv_broadcast(&sq_cv);
122
123 mutex_spin_exit(mp);
124 return NULL;
125 }
126
127 void
128 sleepq_unsleep(struct lwp *l, bool cleanup)
129 {
130
131 l->l_wchan = NULL;
132 l->l_wmesg = NULL;
133 TAILQ_REMOVE(l->l_sleepq, l, l_sleepchain);
134 cv_broadcast(&sq_cv);
135
136 if (cleanup) {
137 mutex_spin_exit(l->l_mutex);
138 }
139 }
140
141 /*
142 * Thread scheduler handles priorities. Therefore no action here.
143 * (maybe do something if we're deperate?)
144 */
145 void
146 sleepq_changepri(struct lwp *l, pri_t pri)
147 {
148
149 }
150
151 void
152 sleepq_lendpri(struct lwp *l, pri_t pri)
153 {
154
155 }
156
157 struct lwp *
158 syncobj_noowner(wchan_t wc)
159 {
160
161 return NULL;
162 }
163
164 void
165 lwp_unlock_to(struct lwp *l, kmutex_t *new)
166 {
167 kmutex_t *old;
168
169 KASSERT(mutex_owned(l->l_mutex));
170
171 old = l->l_mutex;
172 membar_exit();
173 l->l_mutex = new;
174 mutex_spin_exit(old);
175 }
176