kern_condvar.c revision 1.3.4.2 1 1.3.4.2 yamt /* $NetBSD: kern_condvar.c,v 1.3.4.2 2007/02/26 09:11:04 yamt Exp $ */
2 1.3.4.2 yamt
3 1.3.4.2 yamt /*-
4 1.3.4.2 yamt * Copyright (c) 2006, 2007 The NetBSD Foundation, Inc.
5 1.3.4.2 yamt * All rights reserved.
6 1.3.4.2 yamt *
7 1.3.4.2 yamt * This code is derived from software contributed to The NetBSD Foundation
8 1.3.4.2 yamt * by Andrew Doran.
9 1.3.4.2 yamt *
10 1.3.4.2 yamt * Redistribution and use in source and binary forms, with or without
11 1.3.4.2 yamt * modification, are permitted provided that the following conditions
12 1.3.4.2 yamt * are met:
13 1.3.4.2 yamt * 1. Redistributions of source code must retain the above copyright
14 1.3.4.2 yamt * notice, this list of conditions and the following disclaimer.
15 1.3.4.2 yamt * 2. Redistributions in binary form must reproduce the above copyright
16 1.3.4.2 yamt * notice, this list of conditions and the following disclaimer in the
17 1.3.4.2 yamt * documentation and/or other materials provided with the distribution.
18 1.3.4.2 yamt * 3. All advertising materials mentioning features or use of this software
19 1.3.4.2 yamt * must display the following acknowledgement:
20 1.3.4.2 yamt * This product includes software developed by the NetBSD
21 1.3.4.2 yamt * Foundation, Inc. and its contributors.
22 1.3.4.2 yamt * 4. Neither the name of The NetBSD Foundation nor the names of its
23 1.3.4.2 yamt * contributors may be used to endorse or promote products derived
24 1.3.4.2 yamt * from this software without specific prior written permission.
25 1.3.4.2 yamt *
26 1.3.4.2 yamt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 1.3.4.2 yamt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 1.3.4.2 yamt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.3.4.2 yamt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 1.3.4.2 yamt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 1.3.4.2 yamt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 1.3.4.2 yamt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 1.3.4.2 yamt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 1.3.4.2 yamt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 1.3.4.2 yamt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 1.3.4.2 yamt * POSSIBILITY OF SUCH DAMAGE.
37 1.3.4.2 yamt */
38 1.3.4.2 yamt
39 1.3.4.2 yamt /*
40 1.3.4.2 yamt * Kernel condition variable implementation, modeled after those found in
41 1.3.4.2 yamt * Solaris, a description of which can be found in:
42 1.3.4.2 yamt *
43 1.3.4.2 yamt * Solaris Internals: Core Kernel Architecture, Jim Mauro and
44 1.3.4.2 yamt * Richard McDougall.
45 1.3.4.2 yamt */
46 1.3.4.2 yamt
47 1.3.4.2 yamt #include <sys/cdefs.h>
48 1.3.4.2 yamt __KERNEL_RCSID(0, "$NetBSD: kern_condvar.c,v 1.3.4.2 2007/02/26 09:11:04 yamt Exp $");
49 1.3.4.2 yamt
50 1.3.4.2 yamt #include <sys/param.h>
51 1.3.4.2 yamt #include <sys/proc.h>
52 1.3.4.2 yamt #include <sys/sched.h>
53 1.3.4.2 yamt #include <sys/systm.h>
54 1.3.4.2 yamt #include <sys/condvar.h>
55 1.3.4.2 yamt #include <sys/sleepq.h>
56 1.3.4.2 yamt
57 1.3.4.2 yamt static void cv_unsleep(struct lwp *);
58 1.3.4.2 yamt static void cv_changepri(struct lwp *, int);
59 1.3.4.2 yamt
60 1.3.4.2 yamt syncobj_t cv_syncobj = {
61 1.3.4.2 yamt SOBJ_SLEEPQ_SORTED,
62 1.3.4.2 yamt cv_unsleep,
63 1.3.4.2 yamt cv_changepri,
64 1.3.4.2 yamt };
65 1.3.4.2 yamt
66 1.3.4.2 yamt /*
67 1.3.4.2 yamt * cv_init:
68 1.3.4.2 yamt *
69 1.3.4.2 yamt * Initialize a condition variable for use.
70 1.3.4.2 yamt */
71 1.3.4.2 yamt void
72 1.3.4.2 yamt cv_init(kcondvar_t *cv, const char *wmesg)
73 1.3.4.2 yamt {
74 1.3.4.2 yamt
75 1.3.4.2 yamt KASSERT(wmesg != NULL);
76 1.3.4.2 yamt
77 1.3.4.2 yamt cv->cv_wmesg = wmesg;
78 1.3.4.2 yamt cv->cv_waiters = 0;
79 1.3.4.2 yamt }
80 1.3.4.2 yamt
81 1.3.4.2 yamt /*
82 1.3.4.2 yamt * cv_destroy:
83 1.3.4.2 yamt *
84 1.3.4.2 yamt * Tear down a condition variable.
85 1.3.4.2 yamt */
86 1.3.4.2 yamt void
87 1.3.4.2 yamt cv_destroy(kcondvar_t *cv)
88 1.3.4.2 yamt {
89 1.3.4.2 yamt
90 1.3.4.2 yamt #ifdef DIAGNOSTIC
91 1.3.4.2 yamt KASSERT(cv->cv_waiters == 0 && cv->cv_wmesg != NULL);
92 1.3.4.2 yamt cv->cv_wmesg = NULL;
93 1.3.4.2 yamt #endif
94 1.3.4.2 yamt }
95 1.3.4.2 yamt
96 1.3.4.2 yamt /*
97 1.3.4.2 yamt * cv_enter:
98 1.3.4.2 yamt *
99 1.3.4.2 yamt * Look up and lock the sleep queue corresponding to the given
100 1.3.4.2 yamt * condition variable, and increment the number of waiters.
101 1.3.4.2 yamt */
102 1.3.4.2 yamt static inline sleepq_t *
103 1.3.4.2 yamt cv_enter(kcondvar_t *cv, kmutex_t *mtx, struct lwp *l)
104 1.3.4.2 yamt {
105 1.3.4.2 yamt sleepq_t *sq;
106 1.3.4.2 yamt
107 1.3.4.2 yamt KASSERT(cv->cv_wmesg != NULL);
108 1.3.4.2 yamt
109 1.3.4.2 yamt sq = sleeptab_lookup(&sleeptab, cv);
110 1.3.4.2 yamt cv->cv_waiters++;
111 1.3.4.2 yamt sleepq_enter(sq, l);
112 1.3.4.2 yamt mutex_exit(mtx);
113 1.3.4.2 yamt
114 1.3.4.2 yamt return sq;
115 1.3.4.2 yamt }
116 1.3.4.2 yamt
117 1.3.4.2 yamt /*
118 1.3.4.2 yamt * cv_unsleep:
119 1.3.4.2 yamt *
120 1.3.4.2 yamt * Remove an LWP from the condition variable and sleep queue. This
121 1.3.4.2 yamt * is called when the LWP has not been awoken normally but instead
122 1.3.4.2 yamt * interrupted: for example, when a signal is received. Must be
123 1.3.4.2 yamt * called with the LWP locked, and must return it unlocked.
124 1.3.4.2 yamt */
125 1.3.4.2 yamt static void
126 1.3.4.2 yamt cv_unsleep(struct lwp *l)
127 1.3.4.2 yamt {
128 1.3.4.2 yamt uintptr_t addr;
129 1.3.4.2 yamt
130 1.3.4.2 yamt KASSERT(l->l_wchan != NULL);
131 1.3.4.2 yamt LOCK_ASSERT(lwp_locked(l, l->l_sleepq->sq_mutex));
132 1.3.4.2 yamt
133 1.3.4.2 yamt addr = (uintptr_t)l->l_wchan;
134 1.3.4.2 yamt ((kcondvar_t *)addr)->cv_waiters--;
135 1.3.4.2 yamt
136 1.3.4.2 yamt sleepq_unsleep(l);
137 1.3.4.2 yamt }
138 1.3.4.2 yamt
139 1.3.4.2 yamt /*
140 1.3.4.2 yamt * cv_changepri:
141 1.3.4.2 yamt *
142 1.3.4.2 yamt * Adjust the real (user) priority of an LWP blocked on a CV.
143 1.3.4.2 yamt */
144 1.3.4.2 yamt static void
145 1.3.4.2 yamt cv_changepri(struct lwp *l, int pri)
146 1.3.4.2 yamt {
147 1.3.4.2 yamt sleepq_t *sq = l->l_sleepq;
148 1.3.4.2 yamt int opri;
149 1.3.4.2 yamt
150 1.3.4.2 yamt KASSERT(lwp_locked(l, sq->sq_mutex));
151 1.3.4.2 yamt
152 1.3.4.2 yamt opri = l->l_priority;
153 1.3.4.2 yamt l->l_usrpri = pri;
154 1.3.4.2 yamt l->l_priority = sched_kpri(l);
155 1.3.4.2 yamt
156 1.3.4.2 yamt if (l->l_priority != opri) {
157 1.3.4.2 yamt TAILQ_REMOVE(&sq->sq_queue, l, l_sleepchain);
158 1.3.4.2 yamt sleepq_insert(sq, l, pri, l->l_syncobj);
159 1.3.4.2 yamt }
160 1.3.4.2 yamt }
161 1.3.4.2 yamt
162 1.3.4.2 yamt /*
163 1.3.4.2 yamt * cv_wait:
164 1.3.4.2 yamt *
165 1.3.4.2 yamt * Wait non-interruptably on a condition variable until awoken.
166 1.3.4.2 yamt */
167 1.3.4.2 yamt void
168 1.3.4.2 yamt cv_wait(kcondvar_t *cv, kmutex_t *mtx)
169 1.3.4.2 yamt {
170 1.3.4.2 yamt struct lwp *l = curlwp;
171 1.3.4.2 yamt sleepq_t *sq;
172 1.3.4.2 yamt
173 1.3.4.2 yamt LOCK_ASSERT(mutex_owned(mtx));
174 1.3.4.2 yamt
175 1.3.4.2 yamt if (sleepq_dontsleep(l)) {
176 1.3.4.2 yamt (void)sleepq_abort(mtx, 0);
177 1.3.4.2 yamt return;
178 1.3.4.2 yamt }
179 1.3.4.2 yamt
180 1.3.4.2 yamt sq = cv_enter(cv, mtx, l);
181 1.3.4.2 yamt sleepq_block(sq, sched_kpri(l), cv, cv->cv_wmesg, 0, 0, &cv_syncobj);
182 1.3.4.2 yamt (void)sleepq_unblock(0, 0);
183 1.3.4.2 yamt mutex_enter(mtx);
184 1.3.4.2 yamt }
185 1.3.4.2 yamt
186 1.3.4.2 yamt /*
187 1.3.4.2 yamt * cv_wait_sig:
188 1.3.4.2 yamt *
189 1.3.4.2 yamt * Wait on a condition variable until a awoken or a signal is received.
190 1.3.4.2 yamt * Will also return early if the process is exiting. Returns zero if
191 1.3.4.2 yamt * awoken normallly, ERESTART if a signal was received and the system
192 1.3.4.2 yamt * call is restartable, or EINTR otherwise.
193 1.3.4.2 yamt */
194 1.3.4.2 yamt int
195 1.3.4.2 yamt cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx)
196 1.3.4.2 yamt {
197 1.3.4.2 yamt struct lwp *l = curlwp;
198 1.3.4.2 yamt sleepq_t *sq;
199 1.3.4.2 yamt int error;
200 1.3.4.2 yamt
201 1.3.4.2 yamt LOCK_ASSERT(mutex_owned(mtx));
202 1.3.4.2 yamt
203 1.3.4.2 yamt if (sleepq_dontsleep(l))
204 1.3.4.2 yamt return sleepq_abort(mtx, 0);
205 1.3.4.2 yamt
206 1.3.4.2 yamt sq = cv_enter(cv, mtx, l);
207 1.3.4.2 yamt sleepq_block(sq, sched_kpri(l), cv, cv->cv_wmesg, 0, 1, &cv_syncobj);
208 1.3.4.2 yamt error = sleepq_unblock(0, 1);
209 1.3.4.2 yamt mutex_enter(mtx);
210 1.3.4.2 yamt
211 1.3.4.2 yamt return error;
212 1.3.4.2 yamt }
213 1.3.4.2 yamt
214 1.3.4.2 yamt /*
215 1.3.4.2 yamt * cv_timedwait:
216 1.3.4.2 yamt *
217 1.3.4.2 yamt * Wait on a condition variable until awoken or the specified timeout
218 1.3.4.2 yamt * expires. Returns zero if awoken normally or EWOULDBLOCK if the
219 1.3.4.2 yamt * timeout expired.
220 1.3.4.2 yamt */
221 1.3.4.2 yamt int
222 1.3.4.2 yamt cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int timo)
223 1.3.4.2 yamt {
224 1.3.4.2 yamt struct lwp *l = curlwp;
225 1.3.4.2 yamt sleepq_t *sq;
226 1.3.4.2 yamt int error;
227 1.3.4.2 yamt
228 1.3.4.2 yamt LOCK_ASSERT(mutex_owned(mtx));
229 1.3.4.2 yamt
230 1.3.4.2 yamt if (sleepq_dontsleep(l))
231 1.3.4.2 yamt return sleepq_abort(mtx, 0);
232 1.3.4.2 yamt
233 1.3.4.2 yamt sq = cv_enter(cv, mtx, l);
234 1.3.4.2 yamt sleepq_block(sq, sched_kpri(l), cv, cv->cv_wmesg, timo, 0, &cv_syncobj);
235 1.3.4.2 yamt error = sleepq_unblock(timo, 0);
236 1.3.4.2 yamt mutex_enter(mtx);
237 1.3.4.2 yamt
238 1.3.4.2 yamt return error;
239 1.3.4.2 yamt }
240 1.3.4.2 yamt
241 1.3.4.2 yamt /*
242 1.3.4.2 yamt * cv_timedwait_sig:
243 1.3.4.2 yamt *
244 1.3.4.2 yamt * Wait on a condition variable until a timeout expires, awoken or a
245 1.3.4.2 yamt * signal is received. Will also return early if the process is
246 1.3.4.2 yamt * exiting. Returns zero if awoken normallly, EWOULDBLOCK if the
247 1.3.4.2 yamt * timeout expires, ERESTART if a signal was received and the system
248 1.3.4.2 yamt * call is restartable, or EINTR otherwise.
249 1.3.4.2 yamt */
250 1.3.4.2 yamt int
251 1.3.4.2 yamt cv_timedwait_sig(kcondvar_t *cv, kmutex_t *mtx, int timo)
252 1.3.4.2 yamt {
253 1.3.4.2 yamt struct lwp *l = curlwp;
254 1.3.4.2 yamt sleepq_t *sq;
255 1.3.4.2 yamt int error;
256 1.3.4.2 yamt
257 1.3.4.2 yamt LOCK_ASSERT(mutex_owned(mtx));
258 1.3.4.2 yamt
259 1.3.4.2 yamt if (sleepq_dontsleep(l))
260 1.3.4.2 yamt return sleepq_abort(mtx, 0);
261 1.3.4.2 yamt
262 1.3.4.2 yamt sq = cv_enter(cv, mtx, l);
263 1.3.4.2 yamt sleepq_block(sq, sched_kpri(l), cv, cv->cv_wmesg, timo, 1, &cv_syncobj);
264 1.3.4.2 yamt error = sleepq_unblock(timo, 1);
265 1.3.4.2 yamt mutex_enter(mtx);
266 1.3.4.2 yamt
267 1.3.4.2 yamt return error;
268 1.3.4.2 yamt }
269 1.3.4.2 yamt
270 1.3.4.2 yamt /*
271 1.3.4.2 yamt * cv_signal:
272 1.3.4.2 yamt *
273 1.3.4.2 yamt * Wake the highest priority LWP waiting on a condition variable.
274 1.3.4.2 yamt * Must be called with the interlocking mutex held.
275 1.3.4.2 yamt */
276 1.3.4.2 yamt void
277 1.3.4.2 yamt cv_signal(kcondvar_t *cv)
278 1.3.4.2 yamt {
279 1.3.4.2 yamt sleepq_t *sq;
280 1.3.4.2 yamt
281 1.3.4.2 yamt if (cv->cv_waiters == 0)
282 1.3.4.2 yamt return;
283 1.3.4.2 yamt
284 1.3.4.2 yamt /*
285 1.3.4.2 yamt * cv->cv_waiters may be stale and have dropped to zero, but
286 1.3.4.2 yamt * while holding the interlock (the mutex passed to cv_wait()
287 1.3.4.2 yamt * and similar) we will see non-zero values when it matters.
288 1.3.4.2 yamt */
289 1.3.4.2 yamt
290 1.3.4.2 yamt sq = sleeptab_lookup(&sleeptab, cv);
291 1.3.4.2 yamt if (cv->cv_waiters != 0) {
292 1.3.4.2 yamt cv->cv_waiters--;
293 1.3.4.2 yamt sleepq_wake(sq, cv, 1);
294 1.3.4.2 yamt } else
295 1.3.4.2 yamt sleepq_unlock(sq);
296 1.3.4.2 yamt }
297 1.3.4.2 yamt
298 1.3.4.2 yamt /*
299 1.3.4.2 yamt * cv_broadcast:
300 1.3.4.2 yamt *
301 1.3.4.2 yamt * Wake all LWPs waiting on a condition variable. Must be called
302 1.3.4.2 yamt * with the interlocking mutex held.
303 1.3.4.2 yamt */
304 1.3.4.2 yamt void
305 1.3.4.2 yamt cv_broadcast(kcondvar_t *cv)
306 1.3.4.2 yamt {
307 1.3.4.2 yamt sleepq_t *sq;
308 1.3.4.2 yamt u_int cnt;
309 1.3.4.2 yamt
310 1.3.4.2 yamt if (cv->cv_waiters == 0)
311 1.3.4.2 yamt return;
312 1.3.4.2 yamt
313 1.3.4.2 yamt sq = sleeptab_lookup(&sleeptab, cv);
314 1.3.4.2 yamt if ((cnt = cv->cv_waiters) != 0) {
315 1.3.4.2 yamt cv->cv_waiters = 0;
316 1.3.4.2 yamt sleepq_wake(sq, cv, cnt);
317 1.3.4.2 yamt } else
318 1.3.4.2 yamt sleepq_unlock(sq);
319 1.3.4.2 yamt }
320 1.3.4.2 yamt
321 1.3.4.2 yamt /*
322 1.3.4.2 yamt * cv_wakeup:
323 1.3.4.2 yamt *
324 1.3.4.2 yamt * Wake all LWPs waiting on a condition variable. The interlock
325 1.3.4.2 yamt * need not be held, but it is the caller's responsibility to
326 1.3.4.2 yamt * ensure correct synchronization.
327 1.3.4.2 yamt */
328 1.3.4.2 yamt void
329 1.3.4.2 yamt cv_wakeup(kcondvar_t *cv)
330 1.3.4.2 yamt {
331 1.3.4.2 yamt sleepq_t *sq;
332 1.3.4.2 yamt u_int cnt;
333 1.3.4.2 yamt
334 1.3.4.2 yamt sq = sleeptab_lookup(&sleeptab, cv);
335 1.3.4.2 yamt if ((cnt = cv->cv_waiters) != 0) {
336 1.3.4.2 yamt cv->cv_waiters = 0;
337 1.3.4.2 yamt sleepq_wake(sq, cv, cnt);
338 1.3.4.2 yamt } else
339 1.3.4.2 yamt sleepq_unlock(sq);
340 1.3.4.2 yamt }
341 1.3.4.2 yamt
342 1.3.4.2 yamt /*
343 1.3.4.2 yamt * cv_has_waiters:
344 1.3.4.2 yamt *
345 1.3.4.2 yamt * For diagnostic assertions: return non-zero if a condition
346 1.3.4.2 yamt * variable has waiters.
347 1.3.4.2 yamt */
348 1.3.4.2 yamt int
349 1.3.4.2 yamt cv_has_waiters(kcondvar_t *cv)
350 1.3.4.2 yamt {
351 1.3.4.2 yamt
352 1.3.4.2 yamt /* No need to interlock here */
353 1.3.4.2 yamt return (int)cv->cv_waiters;
354 1.3.4.2 yamt }
355