locks.c revision 1.51 1 /* $NetBSD: locks.c,v 1.51 2011/03/08 12:39:29 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2007, 2008 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by the
7 * Finnish Cultural Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
19 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: locks.c,v 1.51 2011/03/08 12:39:29 pooka Exp $");
33
34 #include <sys/param.h>
35 #include <sys/kmem.h>
36 #include <sys/mutex.h>
37 #include <sys/rwlock.h>
38
39 #include <rump/rumpuser.h>
40
41 #include "rump_private.h"
42
43 /*
44 * Simple lockdebug. If it's compiled in, it's always active.
45 * Currently available only for mtx/rwlock.
46 */
47 #ifdef LOCKDEBUG
48 #include <sys/lockdebug.h>
49
50 static lockops_t mutex_lockops = {
51 "mutex",
52 LOCKOPS_SLEEP,
53 NULL
54 };
55 static lockops_t rw_lockops = {
56 "rwlock",
57 LOCKOPS_SLEEP,
58 NULL
59 };
60
61 #define ALLOCK(lock, ops) \
62 lockdebug_alloc(lock, ops, (uintptr_t)__builtin_return_address(0))
63 #define FREELOCK(lock) \
64 lockdebug_free(lock)
65 #define WANTLOCK(lock, shar, try) \
66 lockdebug_wantlock(lock, (uintptr_t)__builtin_return_address(0), shar, try)
67 #define LOCKED(lock, shar) \
68 lockdebug_locked(lock, NULL, (uintptr_t)__builtin_return_address(0), shar)
69 #define UNLOCKED(lock, shar) \
70 lockdebug_unlocked(lock, (uintptr_t)__builtin_return_address(0), shar)
71 #else
72 #define ALLOCK(a, b)
73 #define FREELOCK(a)
74 #define WANTLOCK(a, b, c)
75 #define LOCKED(a, b)
76 #define UNLOCKED(a, b)
77 #endif
78
79 /*
80 * We map locks to pthread routines. The difference between kernel
81 * and rumpuser routines is that while the kernel uses static
82 * storage, rumpuser allocates the object from the heap. This
83 * indirection is necessary because we don't know the size of
84 * pthread objects here. It is also beneficial, since we can
85 * be easily compatible with the kernel ABI because all kernel
86 * objects regardless of machine architecture are always at least
87 * the size of a pointer. The downside, of course, is a performance
88 * penalty.
89 */
90
91 #define RUMPMTX(mtx) (*(struct rumpuser_mtx **)(mtx))
92
93 void
94 mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
95 {
96
97 CTASSERT(sizeof(kmutex_t) >= sizeof(void *));
98
99 rumpuser_mutex_init_kmutex((struct rumpuser_mtx **)mtx);
100 ALLOCK(mtx, &mutex_lockops);
101 }
102
103 void
104 mutex_destroy(kmutex_t *mtx)
105 {
106
107 FREELOCK(mtx);
108 rumpuser_mutex_destroy(RUMPMTX(mtx));
109 }
110
111 void
112 mutex_enter(kmutex_t *mtx)
113 {
114
115 WANTLOCK(mtx, false, false);
116 rumpuser_mutex_enter(RUMPMTX(mtx));
117 LOCKED(mtx, false);
118 }
119 __strong_alias(mutex_spin_enter,mutex_enter);
120
121 int
122 mutex_tryenter(kmutex_t *mtx)
123 {
124 int rv;
125
126 rv = rumpuser_mutex_tryenter(RUMPMTX(mtx));
127 if (rv) {
128 WANTLOCK(mtx, false, true);
129 LOCKED(mtx, false);
130 }
131 return rv;
132 }
133
134 void
135 mutex_exit(kmutex_t *mtx)
136 {
137
138 UNLOCKED(mtx, false);
139 rumpuser_mutex_exit(RUMPMTX(mtx));
140 }
141 __strong_alias(mutex_spin_exit,mutex_exit);
142
143 int
144 mutex_owned(kmutex_t *mtx)
145 {
146
147 return mutex_owner(mtx) == curlwp;
148 }
149
150 struct lwp *
151 mutex_owner(kmutex_t *mtx)
152 {
153
154 return rumpuser_mutex_owner(RUMPMTX(mtx));
155 }
156
157 #define RUMPRW(rw) (*(struct rumpuser_rw **)(rw))
158
159 /* reader/writer locks */
160
161 void
162 rw_init(krwlock_t *rw)
163 {
164
165 CTASSERT(sizeof(krwlock_t) >= sizeof(void *));
166
167 rumpuser_rw_init((struct rumpuser_rw **)rw);
168 ALLOCK(rw, &rw_lockops);
169 }
170
171 void
172 rw_destroy(krwlock_t *rw)
173 {
174
175 FREELOCK(rw);
176 rumpuser_rw_destroy(RUMPRW(rw));
177 }
178
179 void
180 rw_enter(krwlock_t *rw, const krw_t op)
181 {
182
183
184 WANTLOCK(rw, op == RW_READER, false);
185 rumpuser_rw_enter(RUMPRW(rw), op == RW_WRITER);
186 LOCKED(rw, op == RW_READER);
187 }
188
189 int
190 rw_tryenter(krwlock_t *rw, const krw_t op)
191 {
192 int rv;
193
194 rv = rumpuser_rw_tryenter(RUMPRW(rw), op == RW_WRITER);
195 if (rv) {
196 WANTLOCK(rw, op == RW_READER, true);
197 LOCKED(rw, op == RW_READER);
198 }
199 return rv;
200 }
201
202 void
203 rw_exit(krwlock_t *rw)
204 {
205
206 #ifdef LOCKDEBUG
207 bool shared = !rw_write_held(rw);
208
209 if (shared)
210 KASSERT(rw_read_held(rw));
211 UNLOCKED(rw, shared);
212 #endif
213 rumpuser_rw_exit(RUMPRW(rw));
214 }
215
216 /* always fails */
217 int
218 rw_tryupgrade(krwlock_t *rw)
219 {
220
221 return 0;
222 }
223
224 void
225 rw_downgrade(krwlock_t *rw)
226 {
227
228 #ifdef LOCKDEBUG
229 KASSERT(!rw_write_held(rw));
230 #endif
231 /*
232 * XXX HACK: How we can downgrade re lock in rump properly.
233 */
234 rw_exit(rw);
235 rw_enter(rw, RW_READER);
236 return;
237 }
238
239 int
240 rw_write_held(krwlock_t *rw)
241 {
242
243 return rumpuser_rw_wrheld(RUMPRW(rw));
244 }
245
246 int
247 rw_read_held(krwlock_t *rw)
248 {
249
250 return rumpuser_rw_rdheld(RUMPRW(rw));
251 }
252
253 int
254 rw_lock_held(krwlock_t *rw)
255 {
256
257 return rumpuser_rw_held(RUMPRW(rw));
258 }
259
260 /* curriculum vitaes */
261
262 #define RUMPCV(cv) (*(struct rumpuser_cv **)(cv))
263
264 void
265 cv_init(kcondvar_t *cv, const char *msg)
266 {
267
268 CTASSERT(sizeof(kcondvar_t) >= sizeof(void *));
269
270 rumpuser_cv_init((struct rumpuser_cv **)cv);
271 }
272
273 void
274 cv_destroy(kcondvar_t *cv)
275 {
276
277 rumpuser_cv_destroy(RUMPCV(cv));
278 }
279
280 static int
281 docvwait(kcondvar_t *cv, kmutex_t *mtx, struct timespec *ts)
282 {
283 struct lwp *l = curlwp;
284 int rv;
285
286 if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) {
287 /*
288 * yield() here, someone might want the cpu
289 * to set a condition. otherwise we'll just
290 * loop forever.
291 */
292 yield();
293 return EINTR;
294 }
295
296 UNLOCKED(mtx, false);
297
298 l->l_private = cv;
299 rv = 0;
300 if (ts) {
301 if (rumpuser_cv_timedwait(RUMPCV(cv), RUMPMTX(mtx),
302 ts->tv_sec, ts->tv_nsec))
303 rv = EWOULDBLOCK;
304 } else {
305 rumpuser_cv_wait(RUMPCV(cv), RUMPMTX(mtx));
306 }
307
308 /*
309 * Check for QEXIT. if so, we need to wait here until we
310 * are allowed to exit.
311 */
312 if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) {
313 struct proc *p = l->l_proc;
314
315 mutex_exit(mtx); /* drop and retake later */
316
317 mutex_enter(p->p_lock);
318 while ((p->p_sflag & PS_RUMP_LWPEXIT) == 0) {
319 /* avoid recursion */
320 rumpuser_cv_wait(RUMPCV(&p->p_waitcv),
321 RUMPMTX(p->p_lock));
322 }
323 KASSERT(p->p_sflag & PS_RUMP_LWPEXIT);
324 mutex_exit(p->p_lock);
325
326 /* ok, we can exit and remove "reference" to l->private */
327
328 mutex_enter(mtx);
329 rv = EINTR;
330 }
331 l->l_private = NULL;
332
333 LOCKED(mtx, false);
334
335 return rv;
336 }
337
338 void
339 cv_wait(kcondvar_t *cv, kmutex_t *mtx)
340 {
341
342 if (__predict_false(rump_threads == 0))
343 panic("cv_wait without threads");
344 (void) docvwait(cv, mtx, NULL);
345 }
346
347 int
348 cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx)
349 {
350
351 if (__predict_false(rump_threads == 0))
352 panic("cv_wait without threads");
353 return docvwait(cv, mtx, NULL);
354 }
355
356 int
357 cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int ticks)
358 {
359 struct timespec ts, tick;
360 extern int hz;
361 int rv;
362
363 if (ticks == 0) {
364 rv = cv_wait_sig(cv, mtx);
365 } else {
366 /*
367 * XXX: this fetches rump kernel time, but
368 * rumpuser_cv_timedwait uses host time.
369 */
370 nanotime(&ts);
371 tick.tv_sec = ticks / hz;
372 tick.tv_nsec = (ticks % hz) * (1000000000/hz);
373 timespecadd(&ts, &tick, &ts);
374
375 rv = docvwait(cv, mtx, &ts);
376 }
377
378 return rv;
379 }
380 __strong_alias(cv_timedwait_sig,cv_timedwait);
381
382 void
383 cv_signal(kcondvar_t *cv)
384 {
385
386 rumpuser_cv_signal(RUMPCV(cv));
387 }
388
389 void
390 cv_broadcast(kcondvar_t *cv)
391 {
392
393 rumpuser_cv_broadcast(RUMPCV(cv));
394 }
395
396 bool
397 cv_has_waiters(kcondvar_t *cv)
398 {
399
400 return rumpuser_cv_has_waiters(RUMPCV(cv));
401 }
402
403 /* this is not much of an attempt, but ... */
404 bool
405 cv_is_valid(kcondvar_t *cv)
406 {
407
408 return RUMPCV(cv) != NULL;
409 }
410