locks_up.c revision 1.4.4.2 1 1.4.4.2 uebayasi /* $NetBSD: locks_up.c,v 1.4.4.2 2010/08/17 06:48:01 uebayasi Exp $ */
2 1.4.4.2 uebayasi
3 1.4.4.2 uebayasi /*
4 1.4.4.2 uebayasi * Copyright (c) 2010 Antti Kantee. All Rights Reserved.
5 1.4.4.2 uebayasi *
6 1.4.4.2 uebayasi * Redistribution and use in source and binary forms, with or without
7 1.4.4.2 uebayasi * modification, are permitted provided that the following conditions
8 1.4.4.2 uebayasi * are met:
9 1.4.4.2 uebayasi * 1. Redistributions of source code must retain the above copyright
10 1.4.4.2 uebayasi * notice, this list of conditions and the following disclaimer.
11 1.4.4.2 uebayasi * 2. Redistributions in binary form must reproduce the above copyright
12 1.4.4.2 uebayasi * notice, this list of conditions and the following disclaimer in the
13 1.4.4.2 uebayasi * documentation and/or other materials provided with the distribution.
14 1.4.4.2 uebayasi *
15 1.4.4.2 uebayasi * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 1.4.4.2 uebayasi * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 1.4.4.2 uebayasi * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 1.4.4.2 uebayasi * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 1.4.4.2 uebayasi * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 1.4.4.2 uebayasi * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 1.4.4.2 uebayasi * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 1.4.4.2 uebayasi * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 1.4.4.2 uebayasi * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 1.4.4.2 uebayasi * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 1.4.4.2 uebayasi * SUCH DAMAGE.
26 1.4.4.2 uebayasi */
27 1.4.4.2 uebayasi
28 1.4.4.2 uebayasi /*
29 1.4.4.2 uebayasi * Virtual uniprocessor rump kernel version of locks. Since the entire
30 1.4.4.2 uebayasi * kernel is running on only one CPU in the system, there is no need
31 1.4.4.2 uebayasi * to perform slow cache-coherent MP locking operations. This speeds
32 1.4.4.2 uebayasi * up things quite dramatically and is a good example of that two
33 1.4.4.2 uebayasi * disjoint kernels running simultaneously in an MP system can be
34 1.4.4.2 uebayasi * massively faster than one with fine-grained locking.
35 1.4.4.2 uebayasi */
36 1.4.4.2 uebayasi
37 1.4.4.2 uebayasi #include <sys/cdefs.h>
38 1.4.4.2 uebayasi __KERNEL_RCSID(0, "$NetBSD: locks_up.c,v 1.4.4.2 2010/08/17 06:48:01 uebayasi Exp $");
39 1.4.4.2 uebayasi
40 1.4.4.2 uebayasi #include <sys/param.h>
41 1.4.4.2 uebayasi #include <sys/kernel.h>
42 1.4.4.2 uebayasi #include <sys/kmem.h>
43 1.4.4.2 uebayasi #include <sys/mutex.h>
44 1.4.4.2 uebayasi #include <sys/rwlock.h>
45 1.4.4.2 uebayasi
46 1.4.4.2 uebayasi #include <rump/rumpuser.h>
47 1.4.4.2 uebayasi
48 1.4.4.2 uebayasi #include "rump_private.h"
49 1.4.4.2 uebayasi
50 1.4.4.2 uebayasi struct upmtx {
51 1.4.4.2 uebayasi struct lwp *upm_owner;
52 1.4.4.2 uebayasi int upm_wanted;
53 1.4.4.2 uebayasi struct rumpuser_cv *upm_rucv;
54 1.4.4.2 uebayasi };
55 1.4.4.2 uebayasi #define UPMTX(mtx) struct upmtx *upm = *(struct upmtx **)mtx
56 1.4.4.2 uebayasi
57 1.4.4.2 uebayasi static inline void
58 1.4.4.2 uebayasi checkncpu(void)
59 1.4.4.2 uebayasi {
60 1.4.4.2 uebayasi
61 1.4.4.2 uebayasi if (__predict_false(ncpu != 1))
62 1.4.4.2 uebayasi panic("UP lock implementation requires RUMP_NCPU == 1");
63 1.4.4.2 uebayasi }
64 1.4.4.2 uebayasi
65 1.4.4.2 uebayasi void
66 1.4.4.2 uebayasi mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
67 1.4.4.2 uebayasi {
68 1.4.4.2 uebayasi struct upmtx *upm;
69 1.4.4.2 uebayasi
70 1.4.4.2 uebayasi CTASSERT(sizeof(kmutex_t) >= sizeof(void *));
71 1.4.4.2 uebayasi checkncpu();
72 1.4.4.2 uebayasi
73 1.4.4.2 uebayasi /*
74 1.4.4.2 uebayasi * XXX: pool_cache would be nice, but not easily possible,
75 1.4.4.2 uebayasi * as pool cache init wants to call mutex_init() ...
76 1.4.4.2 uebayasi */
77 1.4.4.2 uebayasi upm = rump_hypermalloc(sizeof(*upm), 0, true, "mutex_init");
78 1.4.4.2 uebayasi memset(upm, 0, sizeof(*upm));
79 1.4.4.2 uebayasi rumpuser_cv_init(&upm->upm_rucv);
80 1.4.4.2 uebayasi memcpy(mtx, &upm, sizeof(void *));
81 1.4.4.2 uebayasi }
82 1.4.4.2 uebayasi
83 1.4.4.2 uebayasi void
84 1.4.4.2 uebayasi mutex_destroy(kmutex_t *mtx)
85 1.4.4.2 uebayasi {
86 1.4.4.2 uebayasi UPMTX(mtx);
87 1.4.4.2 uebayasi
88 1.4.4.2 uebayasi KASSERT(upm->upm_owner == NULL);
89 1.4.4.2 uebayasi KASSERT(upm->upm_wanted == 0);
90 1.4.4.2 uebayasi rumpuser_cv_destroy(upm->upm_rucv);
91 1.4.4.2 uebayasi rump_hyperfree(upm, sizeof(*upm));
92 1.4.4.2 uebayasi }
93 1.4.4.2 uebayasi
94 1.4.4.2 uebayasi void
95 1.4.4.2 uebayasi mutex_enter(kmutex_t *mtx)
96 1.4.4.2 uebayasi {
97 1.4.4.2 uebayasi UPMTX(mtx);
98 1.4.4.2 uebayasi
99 1.4.4.2 uebayasi /* fastpath? */
100 1.4.4.2 uebayasi if (mutex_tryenter(mtx))
101 1.4.4.2 uebayasi return;
102 1.4.4.2 uebayasi
103 1.4.4.2 uebayasi /*
104 1.4.4.2 uebayasi * No? bummer, do it the slow and painful way then.
105 1.4.4.2 uebayasi */
106 1.4.4.2 uebayasi upm->upm_wanted++;
107 1.4.4.2 uebayasi while (!mutex_tryenter(mtx)) {
108 1.4.4.2 uebayasi rump_schedlock_cv_wait(upm->upm_rucv);
109 1.4.4.2 uebayasi }
110 1.4.4.2 uebayasi upm->upm_wanted--;
111 1.4.4.2 uebayasi
112 1.4.4.2 uebayasi KASSERT(upm->upm_wanted >= 0);
113 1.4.4.2 uebayasi }
114 1.4.4.2 uebayasi
115 1.4.4.2 uebayasi void
116 1.4.4.2 uebayasi mutex_spin_enter(kmutex_t *mtx)
117 1.4.4.2 uebayasi {
118 1.4.4.2 uebayasi
119 1.4.4.2 uebayasi mutex_enter(mtx);
120 1.4.4.2 uebayasi }
121 1.4.4.2 uebayasi
122 1.4.4.2 uebayasi int
123 1.4.4.2 uebayasi mutex_tryenter(kmutex_t *mtx)
124 1.4.4.2 uebayasi {
125 1.4.4.2 uebayasi UPMTX(mtx);
126 1.4.4.2 uebayasi
127 1.4.4.2 uebayasi if (upm->upm_owner)
128 1.4.4.2 uebayasi return 0;
129 1.4.4.2 uebayasi
130 1.4.4.2 uebayasi upm->upm_owner = curlwp;
131 1.4.4.2 uebayasi return 1;
132 1.4.4.2 uebayasi }
133 1.4.4.2 uebayasi
134 1.4.4.2 uebayasi void
135 1.4.4.2 uebayasi mutex_exit(kmutex_t *mtx)
136 1.4.4.2 uebayasi {
137 1.4.4.2 uebayasi UPMTX(mtx);
138 1.4.4.2 uebayasi
139 1.4.4.2 uebayasi if (upm->upm_wanted) {
140 1.4.4.2 uebayasi rumpuser_cv_signal(upm->upm_rucv); /* CPU is our interlock */
141 1.4.4.2 uebayasi }
142 1.4.4.2 uebayasi upm->upm_owner = NULL;
143 1.4.4.2 uebayasi }
144 1.4.4.2 uebayasi
145 1.4.4.2 uebayasi void
146 1.4.4.2 uebayasi mutex_spin_exit(kmutex_t *mtx)
147 1.4.4.2 uebayasi {
148 1.4.4.2 uebayasi
149 1.4.4.2 uebayasi mutex_exit(mtx);
150 1.4.4.2 uebayasi }
151 1.4.4.2 uebayasi
152 1.4.4.2 uebayasi int
153 1.4.4.2 uebayasi mutex_owned(kmutex_t *mtx)
154 1.4.4.2 uebayasi {
155 1.4.4.2 uebayasi UPMTX(mtx);
156 1.4.4.2 uebayasi
157 1.4.4.2 uebayasi return upm->upm_owner == curlwp;
158 1.4.4.2 uebayasi }
159 1.4.4.2 uebayasi
160 1.4.4.2 uebayasi struct uprw {
161 1.4.4.2 uebayasi struct lwp *uprw_owner;
162 1.4.4.2 uebayasi int uprw_readers;
163 1.4.4.2 uebayasi uint16_t uprw_rwant;
164 1.4.4.2 uebayasi uint16_t uprw_wwant;
165 1.4.4.2 uebayasi struct rumpuser_cv *uprw_rucv_reader;
166 1.4.4.2 uebayasi struct rumpuser_cv *uprw_rucv_writer;
167 1.4.4.2 uebayasi };
168 1.4.4.2 uebayasi
169 1.4.4.2 uebayasi #define UPRW(rw) struct uprw *uprw = *(struct uprw **)rw
170 1.4.4.2 uebayasi
171 1.4.4.2 uebayasi /* reader/writer locks */
172 1.4.4.2 uebayasi
173 1.4.4.2 uebayasi void
174 1.4.4.2 uebayasi rw_init(krwlock_t *rw)
175 1.4.4.2 uebayasi {
176 1.4.4.2 uebayasi struct uprw *uprw;
177 1.4.4.2 uebayasi
178 1.4.4.2 uebayasi CTASSERT(sizeof(krwlock_t) >= sizeof(void *));
179 1.4.4.2 uebayasi checkncpu();
180 1.4.4.2 uebayasi
181 1.4.4.2 uebayasi uprw = rump_hypermalloc(sizeof(*uprw), 0, true, "rwinit");
182 1.4.4.2 uebayasi memset(uprw, 0, sizeof(*uprw));
183 1.4.4.2 uebayasi rumpuser_cv_init(&uprw->uprw_rucv_reader);
184 1.4.4.2 uebayasi rumpuser_cv_init(&uprw->uprw_rucv_writer);
185 1.4.4.2 uebayasi memcpy(rw, &uprw, sizeof(void *));
186 1.4.4.2 uebayasi }
187 1.4.4.2 uebayasi
188 1.4.4.2 uebayasi void
189 1.4.4.2 uebayasi rw_destroy(krwlock_t *rw)
190 1.4.4.2 uebayasi {
191 1.4.4.2 uebayasi UPRW(rw);
192 1.4.4.2 uebayasi
193 1.4.4.2 uebayasi rumpuser_cv_destroy(uprw->uprw_rucv_reader);
194 1.4.4.2 uebayasi rumpuser_cv_destroy(uprw->uprw_rucv_writer);
195 1.4.4.2 uebayasi rump_hyperfree(uprw, sizeof(*uprw));
196 1.4.4.2 uebayasi }
197 1.4.4.2 uebayasi
198 1.4.4.2 uebayasi /* take rwlock. prefer writers over readers (see rw_tryenter and rw_exit) */
199 1.4.4.2 uebayasi void
200 1.4.4.2 uebayasi rw_enter(krwlock_t *rw, const krw_t op)
201 1.4.4.2 uebayasi {
202 1.4.4.2 uebayasi UPRW(rw);
203 1.4.4.2 uebayasi struct rumpuser_cv *rucv;
204 1.4.4.2 uebayasi uint16_t *wp;
205 1.4.4.2 uebayasi
206 1.4.4.2 uebayasi if (rw_tryenter(rw, op))
207 1.4.4.2 uebayasi return;
208 1.4.4.2 uebayasi
209 1.4.4.2 uebayasi /* lagpath */
210 1.4.4.2 uebayasi if (op == RW_READER) {
211 1.4.4.2 uebayasi rucv = uprw->uprw_rucv_reader;
212 1.4.4.2 uebayasi wp = &uprw->uprw_rwant;
213 1.4.4.2 uebayasi } else {
214 1.4.4.2 uebayasi rucv = uprw->uprw_rucv_writer;
215 1.4.4.2 uebayasi wp = &uprw->uprw_wwant;
216 1.4.4.2 uebayasi }
217 1.4.4.2 uebayasi
218 1.4.4.2 uebayasi (*wp)++;
219 1.4.4.2 uebayasi while (!rw_tryenter(rw, op)) {
220 1.4.4.2 uebayasi rump_schedlock_cv_wait(rucv);
221 1.4.4.2 uebayasi }
222 1.4.4.2 uebayasi (*wp)--;
223 1.4.4.2 uebayasi }
224 1.4.4.2 uebayasi
225 1.4.4.2 uebayasi int
226 1.4.4.2 uebayasi rw_tryenter(krwlock_t *rw, const krw_t op)
227 1.4.4.2 uebayasi {
228 1.4.4.2 uebayasi UPRW(rw);
229 1.4.4.2 uebayasi
230 1.4.4.2 uebayasi switch (op) {
231 1.4.4.2 uebayasi case RW_READER:
232 1.4.4.2 uebayasi if (uprw->uprw_owner == NULL && uprw->uprw_wwant == 0) {
233 1.4.4.2 uebayasi uprw->uprw_readers++;
234 1.4.4.2 uebayasi return 1;
235 1.4.4.2 uebayasi }
236 1.4.4.2 uebayasi break;
237 1.4.4.2 uebayasi case RW_WRITER:
238 1.4.4.2 uebayasi if (uprw->uprw_owner == NULL && uprw->uprw_readers == 0) {
239 1.4.4.2 uebayasi uprw->uprw_owner = curlwp;
240 1.4.4.2 uebayasi return 1;
241 1.4.4.2 uebayasi }
242 1.4.4.2 uebayasi break;
243 1.4.4.2 uebayasi }
244 1.4.4.2 uebayasi
245 1.4.4.2 uebayasi return 0;
246 1.4.4.2 uebayasi }
247 1.4.4.2 uebayasi
248 1.4.4.2 uebayasi void
249 1.4.4.2 uebayasi rw_exit(krwlock_t *rw)
250 1.4.4.2 uebayasi {
251 1.4.4.2 uebayasi UPRW(rw);
252 1.4.4.2 uebayasi
253 1.4.4.2 uebayasi if (uprw->uprw_readers > 0) {
254 1.4.4.2 uebayasi uprw->uprw_readers--;
255 1.4.4.2 uebayasi } else {
256 1.4.4.2 uebayasi KASSERT(uprw->uprw_owner == curlwp);
257 1.4.4.2 uebayasi uprw->uprw_owner = NULL;
258 1.4.4.2 uebayasi }
259 1.4.4.2 uebayasi
260 1.4.4.2 uebayasi if (uprw->uprw_wwant) {
261 1.4.4.2 uebayasi rumpuser_cv_signal(uprw->uprw_rucv_writer);
262 1.4.4.2 uebayasi } else if (uprw->uprw_rwant) {
263 1.4.4.2 uebayasi rumpuser_cv_signal(uprw->uprw_rucv_reader);
264 1.4.4.2 uebayasi }
265 1.4.4.2 uebayasi }
266 1.4.4.2 uebayasi
267 1.4.4.2 uebayasi int
268 1.4.4.2 uebayasi rw_tryupgrade(krwlock_t *rw)
269 1.4.4.2 uebayasi {
270 1.4.4.2 uebayasi UPRW(rw);
271 1.4.4.2 uebayasi
272 1.4.4.2 uebayasi if (uprw->uprw_readers == 1 && uprw->uprw_owner == NULL) {
273 1.4.4.2 uebayasi uprw->uprw_readers = 0;
274 1.4.4.2 uebayasi uprw->uprw_owner = curlwp;
275 1.4.4.2 uebayasi return 1;
276 1.4.4.2 uebayasi } else {
277 1.4.4.2 uebayasi return 0;
278 1.4.4.2 uebayasi }
279 1.4.4.2 uebayasi }
280 1.4.4.2 uebayasi
281 1.4.4.2 uebayasi int
282 1.4.4.2 uebayasi rw_write_held(krwlock_t *rw)
283 1.4.4.2 uebayasi {
284 1.4.4.2 uebayasi UPRW(rw);
285 1.4.4.2 uebayasi
286 1.4.4.2 uebayasi return uprw->uprw_owner == curlwp;
287 1.4.4.2 uebayasi }
288 1.4.4.2 uebayasi
289 1.4.4.2 uebayasi int
290 1.4.4.2 uebayasi rw_read_held(krwlock_t *rw)
291 1.4.4.2 uebayasi {
292 1.4.4.2 uebayasi UPRW(rw);
293 1.4.4.2 uebayasi
294 1.4.4.2 uebayasi return uprw->uprw_readers > 0;
295 1.4.4.2 uebayasi }
296 1.4.4.2 uebayasi
297 1.4.4.2 uebayasi int
298 1.4.4.2 uebayasi rw_lock_held(krwlock_t *rw)
299 1.4.4.2 uebayasi {
300 1.4.4.2 uebayasi UPRW(rw);
301 1.4.4.2 uebayasi
302 1.4.4.2 uebayasi return uprw->uprw_owner || uprw->uprw_readers;
303 1.4.4.2 uebayasi }
304 1.4.4.2 uebayasi
305 1.4.4.2 uebayasi
306 1.4.4.2 uebayasi /*
307 1.4.4.2 uebayasi * Condvars are almost the same as in the MP case except that we
308 1.4.4.2 uebayasi * use the scheduler mutex as the pthread interlock instead of the
309 1.4.4.2 uebayasi * mutex associated with the condvar.
310 1.4.4.2 uebayasi */
311 1.4.4.2 uebayasi
312 1.4.4.2 uebayasi #define RUMPCV(cv) (*(struct rumpuser_cv **)(cv))
313 1.4.4.2 uebayasi
314 1.4.4.2 uebayasi void
315 1.4.4.2 uebayasi cv_init(kcondvar_t *cv, const char *msg)
316 1.4.4.2 uebayasi {
317 1.4.4.2 uebayasi
318 1.4.4.2 uebayasi CTASSERT(sizeof(kcondvar_t) >= sizeof(void *));
319 1.4.4.2 uebayasi checkncpu();
320 1.4.4.2 uebayasi
321 1.4.4.2 uebayasi rumpuser_cv_init((struct rumpuser_cv **)cv);
322 1.4.4.2 uebayasi }
323 1.4.4.2 uebayasi
324 1.4.4.2 uebayasi void
325 1.4.4.2 uebayasi cv_destroy(kcondvar_t *cv)
326 1.4.4.2 uebayasi {
327 1.4.4.2 uebayasi
328 1.4.4.2 uebayasi rumpuser_cv_destroy(RUMPCV(cv));
329 1.4.4.2 uebayasi }
330 1.4.4.2 uebayasi
331 1.4.4.2 uebayasi void
332 1.4.4.2 uebayasi cv_wait(kcondvar_t *cv, kmutex_t *mtx)
333 1.4.4.2 uebayasi {
334 1.4.4.2 uebayasi #ifdef DIAGNOSTIC
335 1.4.4.2 uebayasi UPMTX(mtx);
336 1.4.4.2 uebayasi KASSERT(upm->upm_owner == curlwp);
337 1.4.4.2 uebayasi
338 1.4.4.2 uebayasi if (rump_threads == 0)
339 1.4.4.2 uebayasi panic("cv_wait without threads");
340 1.4.4.2 uebayasi #endif
341 1.4.4.2 uebayasi
342 1.4.4.2 uebayasi /*
343 1.4.4.2 uebayasi * NOTE: we must atomically release the *CPU* here, i.e.
344 1.4.4.2 uebayasi * nothing between mutex_exit and entering rumpuser condwait
345 1.4.4.2 uebayasi * may preempt us from the virtual CPU.
346 1.4.4.2 uebayasi */
347 1.4.4.2 uebayasi mutex_exit(mtx);
348 1.4.4.2 uebayasi rump_schedlock_cv_wait(RUMPCV(cv));
349 1.4.4.2 uebayasi mutex_enter(mtx);
350 1.4.4.2 uebayasi }
351 1.4.4.2 uebayasi
352 1.4.4.2 uebayasi int
353 1.4.4.2 uebayasi cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx)
354 1.4.4.2 uebayasi {
355 1.4.4.2 uebayasi
356 1.4.4.2 uebayasi cv_wait(cv, mtx);
357 1.4.4.2 uebayasi return 0;
358 1.4.4.2 uebayasi }
359 1.4.4.2 uebayasi
360 1.4.4.2 uebayasi int
361 1.4.4.2 uebayasi cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int ticks)
362 1.4.4.2 uebayasi {
363 1.4.4.2 uebayasi struct timespec ts, tstick;
364 1.4.4.2 uebayasi
365 1.4.4.2 uebayasi #ifdef DIAGNOSTIC
366 1.4.4.2 uebayasi UPMTX(mtx);
367 1.4.4.2 uebayasi KASSERT(upm->upm_owner == curlwp);
368 1.4.4.2 uebayasi #endif
369 1.4.4.2 uebayasi
370 1.4.4.2 uebayasi /*
371 1.4.4.2 uebayasi * XXX: this fetches rump kernel time, but rumpuser_cv_timedwait
372 1.4.4.2 uebayasi * uses host time.
373 1.4.4.2 uebayasi */
374 1.4.4.2 uebayasi nanotime(&ts);
375 1.4.4.2 uebayasi tstick.tv_sec = ticks / hz;
376 1.4.4.2 uebayasi tstick.tv_nsec = (ticks % hz) * (1000000000/hz);
377 1.4.4.2 uebayasi timespecadd(&ts, &tstick, &ts);
378 1.4.4.2 uebayasi
379 1.4.4.2 uebayasi if (ticks == 0) {
380 1.4.4.2 uebayasi cv_wait(cv, mtx);
381 1.4.4.2 uebayasi return 0;
382 1.4.4.2 uebayasi } else {
383 1.4.4.2 uebayasi int rv;
384 1.4.4.2 uebayasi mutex_exit(mtx);
385 1.4.4.2 uebayasi rv = rump_schedlock_cv_timedwait(RUMPCV(cv), &ts);
386 1.4.4.2 uebayasi mutex_enter(mtx);
387 1.4.4.2 uebayasi if (rv)
388 1.4.4.2 uebayasi return EWOULDBLOCK;
389 1.4.4.2 uebayasi else
390 1.4.4.2 uebayasi return 0;
391 1.4.4.2 uebayasi }
392 1.4.4.2 uebayasi }
393 1.4.4.2 uebayasi
394 1.4.4.2 uebayasi int
395 1.4.4.2 uebayasi cv_timedwait_sig(kcondvar_t *cv, kmutex_t *mtx, int ticks)
396 1.4.4.2 uebayasi {
397 1.4.4.2 uebayasi
398 1.4.4.2 uebayasi return cv_timedwait(cv, mtx, ticks);
399 1.4.4.2 uebayasi }
400 1.4.4.2 uebayasi
401 1.4.4.2 uebayasi void
402 1.4.4.2 uebayasi cv_signal(kcondvar_t *cv)
403 1.4.4.2 uebayasi {
404 1.4.4.2 uebayasi
405 1.4.4.2 uebayasi /* CPU == interlock */
406 1.4.4.2 uebayasi rumpuser_cv_signal(RUMPCV(cv));
407 1.4.4.2 uebayasi }
408 1.4.4.2 uebayasi
409 1.4.4.2 uebayasi void
410 1.4.4.2 uebayasi cv_broadcast(kcondvar_t *cv)
411 1.4.4.2 uebayasi {
412 1.4.4.2 uebayasi
413 1.4.4.2 uebayasi /* CPU == interlock */
414 1.4.4.2 uebayasi rumpuser_cv_broadcast(RUMPCV(cv));
415 1.4.4.2 uebayasi }
416 1.4.4.2 uebayasi
417 1.4.4.2 uebayasi bool
418 1.4.4.2 uebayasi cv_has_waiters(kcondvar_t *cv)
419 1.4.4.2 uebayasi {
420 1.4.4.2 uebayasi
421 1.4.4.2 uebayasi return rumpuser_cv_has_waiters(RUMPCV(cv));
422 1.4.4.2 uebayasi }
423 1.4.4.2 uebayasi
424 1.4.4.2 uebayasi /* this is not much of an attempt, but ... */
425 1.4.4.2 uebayasi bool
426 1.4.4.2 uebayasi cv_is_valid(kcondvar_t *cv)
427 1.4.4.2 uebayasi {
428 1.4.4.2 uebayasi
429 1.4.4.2 uebayasi return RUMPCV(cv) != NULL;
430 1.4.4.2 uebayasi }
431