kern_condvar.c revision 1.2 1 /* $NetBSD: kern_condvar.c,v 1.2 2007/02/09 21:55:30 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Kernel condition variable implementation, modeled after those found in
41 * Solaris, a description of which can be found in:
42 *
43 * Solaris Internals: Core Kernel Architecture, Jim Mauro and
44 * Richard McDougall.
45 */
46
47 #include <sys/cdefs.h>
48 __KERNEL_RCSID(0, "$NetBSD: kern_condvar.c,v 1.2 2007/02/09 21:55:30 ad Exp $");
49
50 #include <sys/param.h>
51 #include <sys/proc.h>
52 #include <sys/sched.h>
53 #include <sys/systm.h>
54 #include <sys/condvar.h>
55 #include <sys/sleepq.h>
56
57 static void cv_unsleep(struct lwp *);
58 static void cv_changepri(struct lwp *, int);
59
60 syncobj_t cv_syncobj = {
61 SOBJ_SLEEPQ_SORTED,
62 cv_unsleep,
63 cv_changepri,
64 };
65
66 /*
67 * cv_init:
68 *
69 * Initialize a condition variable for use.
70 */
71 void
72 cv_init(kcondvar_t *cv, const char *wmesg)
73 {
74
75 KASSERT(wmesg != NULL);
76
77 cv->cv_wmesg = wmesg;
78 cv->cv_waiters = 0;
79 }
80
81 /*
82 * cv_destroy:
83 *
84 * Tear down a condition variable.
85 */
86 void
87 cv_destroy(kcondvar_t *cv)
88 {
89
90 #ifdef DIAGNOSTIC
91 KASSERT(cv->cv_waiters == 0 && cv->cv_wmesg != NULL);
92 cv->cv_wmesg = NULL;
93 #endif
94 }
95
96 /*
97 * cv_enter:
98 *
99 * Look up and lock the sleep queue corresponding to the given
100 * condition variable, and increment the number of waiters.
101 */
102 static inline sleepq_t *
103 cv_enter(kcondvar_t *cv, kmutex_t *mtx, struct lwp *l)
104 {
105 sleepq_t *sq;
106
107 KASSERT(cv->cv_wmesg != NULL);
108
109 sq = sleeptab_lookup(&sleeptab, cv);
110 cv->cv_waiters++;
111 sleepq_enter(sq, l);
112 mutex_exit(mtx);
113
114 return sq;
115 }
116
117 /*
118 * cv_unsleep:
119 *
120 * Remove an LWP from the condition variable and sleep queue. This
121 * is called when the LWP has not been awoken normally but instead
122 * interrupted: for example, when a signal is received. Must be
123 * called with the LWP locked, and must return it unlocked.
124 */
125 static void
126 cv_unsleep(struct lwp *l)
127 {
128 uintptr_t addr;
129
130 KASSERT(l->l_wchan != NULL);
131 LOCK_ASSERT(lwp_locked(l, l->l_sleepq->sq_mutex));
132
133 addr = (uintptr_t)l->l_wchan;
134 ((kcondvar_t *)addr)->cv_waiters--;
135
136 sleepq_unsleep(l);
137 }
138
139 /*
140 * cv_changepri:
141 *
142 * Adjust the real (user) priority of an LWP blocked on a CV.
143 */
144 static void
145 cv_changepri(struct lwp *l, int pri)
146 {
147 sleepq_t *sq = l->l_sleepq;
148 int opri;
149
150 KASSERT(lwp_locked(l, sq->sq_mutex));
151
152 opri = l->l_priority;
153 l->l_usrpri = pri;
154 l->l_priority = sched_kpri(l);
155
156 if (l->l_priority != opri) {
157 TAILQ_REMOVE(&sq->sq_queue, l, l_sleepchain);
158 sleepq_insert(sq, l, pri, l->l_syncobj);
159 }
160 }
161
162 /*
163 * cv_wait:
164 *
165 * Wait non-interruptably on a condition variable until awoken.
166 */
167 void
168 cv_wait(kcondvar_t *cv, kmutex_t *mtx)
169 {
170 struct lwp *l = curlwp;
171 sleepq_t *sq;
172
173 LOCK_ASSERT(mutex_owned(mtx));
174
175 if (sleepq_dontsleep(l)) {
176 (void)sleepq_abort(mtx, 0);
177 return;
178 }
179
180 sq = cv_enter(cv, mtx, l);
181 sleepq_block(sq, sched_kpri(l), cv, cv->cv_wmesg, 0, 0,
182 &cv_syncobj);
183 (void)sleepq_unblock(0, 0);
184 mutex_enter(mtx);
185 }
186
187 /*
188 * cv_wait_sig:
189 *
190 * Wait on a condition variable until a awoken or a signal is received.
191 * Will also return early if the process is exiting. Returns zero if
192 * awoken normallly, ERESTART if a signal was received and the system
193 * call is restartable, or EINTR otherwise.
194 */
195 int
196 cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx)
197 {
198 struct lwp *l = curlwp;
199 sleepq_t *sq;
200 int error;
201
202 LOCK_ASSERT(mutex_owned(mtx));
203
204 if (sleepq_dontsleep(l))
205 return sleepq_abort(mtx, 0);
206
207 sq = cv_enter(cv, mtx, l);
208 sleepq_block(sq, sched_kpri(l), cv, cv->cv_wmesg, 0, 1,
209 &cv_syncobj);
210 error = sleepq_unblock(0, 1);
211 mutex_enter(mtx);
212
213 return error;
214 }
215
216 /*
217 * cv_timedwait:
218 *
219 * Wait on a condition variable until awoken or the specified timeout
220 * expires. Returns zero if awoken normally or EWOULDBLOCK if the
221 * timeout expired.
222 */
223 int
224 cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int timo)
225 {
226 struct lwp *l = curlwp;
227 sleepq_t *sq;
228 int error;
229
230 LOCK_ASSERT(mutex_owned(mtx));
231
232 if (sleepq_dontsleep(l))
233 return sleepq_abort(mtx, 0);
234
235 sq = cv_enter(cv, mtx, l);
236 sleepq_block(sq, sched_kpri(l), cv, cv->cv_wmesg, timo, 0,
237 &cv_syncobj);
238 error = sleepq_unblock(timo, 0);
239 mutex_enter(mtx);
240
241 return error;
242 }
243
244 /*
245 * cv_timedwait_sig:
246 *
247 * Wait on a condition variable until a timeout expires, awoken or a
248 * signal is received. Will also return early if the process is
249 * exiting. Returns zero if awoken normallly, EWOULDBLOCK if the
250 * timeout expires, ERESTART if a signal was received and the system
251 * call is restartable, or EINTR otherwise.
252 */
253 int
254 cv_timedwait_sig(kcondvar_t *cv, kmutex_t *mtx, int timo)
255 {
256 struct lwp *l = curlwp;
257 sleepq_t *sq;
258 int error;
259
260 LOCK_ASSERT(mutex_owned(mtx));
261
262 if (sleepq_dontsleep(l))
263 return sleepq_abort(mtx, 0);
264
265 sq = cv_enter(cv, mtx, l);
266 sleepq_block(sq, sched_kpri(l), cv, cv->cv_wmesg, timo, 1,
267 &cv_syncobj);
268 error = sleepq_unblock(timo, 1);
269 mutex_enter(mtx);
270
271 return error;
272 }
273
274 /*
275 * cv_signal:
276 *
277 * Wake the highest priority LWP waiting on a condition variable.
278 * Must be called with the interlocking mutex held.
279 */
280 void
281 cv_signal(kcondvar_t *cv)
282 {
283 sleepq_t *sq;
284
285 if (cv->cv_waiters == 0)
286 return;
287
288 /*
289 * cv->cv_waiters may be stale and have dropped to zero, but
290 * while holding the interlock (the mutex passed to cv_wait()
291 * and similar) we will see non-zero values when it matters.
292 */
293
294 sq = sleeptab_lookup(&sleeptab, cv);
295 if (cv->cv_waiters != 0) {
296 cv->cv_waiters--;
297 sleepq_wake(sq, cv, 1);
298 } else
299 sleepq_unlock(sq);
300 }
301
302 /*
303 * cv_broadcast:
304 *
305 * Wake all LWPs waiting on a condition variable. Must be called
306 * with the interlocking mutex held.
307 */
308 void
309 cv_broadcast(kcondvar_t *cv)
310 {
311 sleepq_t *sq;
312 u_int cnt;
313
314 if (cv->cv_waiters == 0)
315 return;
316
317 sq = sleeptab_lookup(&sleeptab, cv);
318 if ((cnt = cv->cv_waiters) != 0) {
319 cv->cv_waiters = 0;
320 sleepq_wake(sq, cv, cnt);
321 } else
322 sleepq_unlock(sq);
323 }
324
325 /*
326 * cv_wakeup:
327 *
328 * Wake all LWPs waiting on a condition variable. The interlock
329 * need not be held, but it is the caller's responsibility to
330 * ensure correct synchronization.
331 */
332 void
333 cv_wakeup(kcondvar_t *cv)
334 {
335 sleepq_t *sq;
336 u_int cnt;
337
338 sq = sleeptab_lookup(&sleeptab, cv);
339 if ((cnt = cv->cv_waiters) != 0) {
340 cv->cv_waiters = 0;
341 sleepq_wake(sq, cv, cnt);
342 } else
343 sleepq_unlock(sq);
344 }
345
346 /*
347 * cv_has_waiters:
348 *
349 * For diagnostic assertions: return non-zero if a condition
350 * variable has waiters.
351 */
352 int
353 cv_has_waiters(kcondvar_t *cv)
354 {
355
356 /* No need to interlock here */
357 return (int)cv->cv_waiters;
358 }
359