locks.c revision 1.56 1 /* $NetBSD: locks.c,v 1.56 2013/04/27 13:59:46 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2007-2011 Antti Kantee. All Rights Reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: locks.c,v 1.56 2013/04/27 13:59:46 pooka Exp $");
30
31 #include <sys/param.h>
32 #include <sys/kmem.h>
33 #include <sys/mutex.h>
34 #include <sys/rwlock.h>
35
36 #include <rump/rumpuser.h>
37
38 #include "rump_private.h"
39
40 /*
41 * Simple lockdebug. If it's compiled in, it's always active.
42 * Currently available only for mtx/rwlock.
43 */
44 #ifdef LOCKDEBUG
45 #include <sys/lockdebug.h>
46
47 static lockops_t mutex_lockops = {
48 "mutex",
49 LOCKOPS_SLEEP,
50 NULL
51 };
52 static lockops_t rw_lockops = {
53 "rwlock",
54 LOCKOPS_SLEEP,
55 NULL
56 };
57
58 #define ALLOCK(lock, ops) \
59 lockdebug_alloc(lock, ops, (uintptr_t)__builtin_return_address(0))
60 #define FREELOCK(lock) \
61 lockdebug_free(lock)
62 #define WANTLOCK(lock, shar, try) \
63 lockdebug_wantlock(lock, (uintptr_t)__builtin_return_address(0), shar, try)
64 #define LOCKED(lock, shar) \
65 lockdebug_locked(lock, NULL, (uintptr_t)__builtin_return_address(0), shar)
66 #define UNLOCKED(lock, shar) \
67 lockdebug_unlocked(lock, (uintptr_t)__builtin_return_address(0), shar)
68 #else
69 #define ALLOCK(a, b)
70 #define FREELOCK(a)
71 #define WANTLOCK(a, b, c)
72 #define LOCKED(a, b)
73 #define UNLOCKED(a, b)
74 #endif
75
76 /*
77 * We map locks to pthread routines. The difference between kernel
78 * and rumpuser routines is that while the kernel uses static
79 * storage, rumpuser allocates the object from the heap. This
80 * indirection is necessary because we don't know the size of
81 * pthread objects here. It is also beneficial, since we can
82 * be easily compatible with the kernel ABI because all kernel
83 * objects regardless of machine architecture are always at least
84 * the size of a pointer. The downside, of course, is a performance
85 * penalty.
86 */
87
88 #define RUMPMTX(mtx) (*(struct rumpuser_mtx **)(mtx))
89
90 void
91 mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
92 {
93 int isspin;
94
95 /*
96 * Try to figure out if the caller wanted a spin mutex or
97 * not with this easy set of conditionals. The difference
98 * between a spin mutex and an adaptive mutex for a rump
99 * kernel is that the hypervisor does not relinquish the
100 * rump kernel CPU context for a spin mutex. The
101 * hypervisor itself may block even when "spinning".
102 */
103 if (type == MUTEX_SPIN) {
104 isspin = 1;
105 } else if (ipl == IPL_NONE || ipl == IPL_SOFTCLOCK ||
106 ipl == IPL_SOFTBIO || ipl == IPL_SOFTNET ||
107 ipl == IPL_SOFTSERIAL) {
108 isspin = 0;
109 } else {
110 isspin = 1;
111 }
112
113 CTASSERT(sizeof(kmutex_t) >= sizeof(void *));
114
115 rumpuser_mutex_init_kmutex((struct rumpuser_mtx **)mtx, isspin);
116 ALLOCK(mtx, &mutex_lockops);
117 }
118
119 void
120 mutex_destroy(kmutex_t *mtx)
121 {
122
123 FREELOCK(mtx);
124 rumpuser_mutex_destroy(RUMPMTX(mtx));
125 }
126
127 void
128 mutex_enter(kmutex_t *mtx)
129 {
130
131 WANTLOCK(mtx, false, false);
132 rumpuser_mutex_enter(RUMPMTX(mtx));
133 LOCKED(mtx, false);
134 }
135
136 void
137 mutex_spin_enter(kmutex_t *mtx)
138 {
139
140 WANTLOCK(mtx, false, false);
141 rumpuser_mutex_enter_nowrap(RUMPMTX(mtx));
142 LOCKED(mtx, false);
143 }
144
145 int
146 mutex_tryenter(kmutex_t *mtx)
147 {
148 int rv;
149
150 rv = rumpuser_mutex_tryenter(RUMPMTX(mtx));
151 if (rv) {
152 WANTLOCK(mtx, false, true);
153 LOCKED(mtx, false);
154 }
155 return rv;
156 }
157
158 void
159 mutex_exit(kmutex_t *mtx)
160 {
161
162 UNLOCKED(mtx, false);
163 rumpuser_mutex_exit(RUMPMTX(mtx));
164 }
165 __strong_alias(mutex_spin_exit,mutex_exit);
166
167 int
168 mutex_owned(kmutex_t *mtx)
169 {
170
171 return mutex_owner(mtx) == curlwp;
172 }
173
174 struct lwp *
175 mutex_owner(kmutex_t *mtx)
176 {
177
178 return rumpuser_mutex_owner(RUMPMTX(mtx));
179 }
180
181 #define RUMPRW(rw) (*(struct rumpuser_rw **)(rw))
182
183 /* reader/writer locks */
184
185 void
186 rw_init(krwlock_t *rw)
187 {
188
189 CTASSERT(sizeof(krwlock_t) >= sizeof(void *));
190
191 rumpuser_rw_init((struct rumpuser_rw **)rw);
192 ALLOCK(rw, &rw_lockops);
193 }
194
195 void
196 rw_destroy(krwlock_t *rw)
197 {
198
199 FREELOCK(rw);
200 rumpuser_rw_destroy(RUMPRW(rw));
201 }
202
203 void
204 rw_enter(krwlock_t *rw, const krw_t op)
205 {
206
207
208 WANTLOCK(rw, op == RW_READER, false);
209 rumpuser_rw_enter(RUMPRW(rw), op == RW_WRITER);
210 LOCKED(rw, op == RW_READER);
211 }
212
213 int
214 rw_tryenter(krwlock_t *rw, const krw_t op)
215 {
216 int rv;
217
218 rv = rumpuser_rw_tryenter(RUMPRW(rw), op == RW_WRITER);
219 if (rv) {
220 WANTLOCK(rw, op == RW_READER, true);
221 LOCKED(rw, op == RW_READER);
222 }
223 return rv;
224 }
225
226 void
227 rw_exit(krwlock_t *rw)
228 {
229
230 #ifdef LOCKDEBUG
231 bool shared = !rw_write_held(rw);
232
233 if (shared)
234 KASSERT(rw_read_held(rw));
235 UNLOCKED(rw, shared);
236 #endif
237 rumpuser_rw_exit(RUMPRW(rw));
238 }
239
240 /* always fails */
241 int
242 rw_tryupgrade(krwlock_t *rw)
243 {
244
245 return 0;
246 }
247
248 void
249 rw_downgrade(krwlock_t *rw)
250 {
251
252 /*
253 * XXX HACK: How we can downgrade re lock in rump properly.
254 */
255 rw_exit(rw);
256 rw_enter(rw, RW_READER);
257 return;
258 }
259
260 int
261 rw_write_held(krwlock_t *rw)
262 {
263
264 return rumpuser_rw_wrheld(RUMPRW(rw));
265 }
266
267 int
268 rw_read_held(krwlock_t *rw)
269 {
270
271 return rumpuser_rw_rdheld(RUMPRW(rw));
272 }
273
274 int
275 rw_lock_held(krwlock_t *rw)
276 {
277
278 return rumpuser_rw_held(RUMPRW(rw));
279 }
280
281 /* curriculum vitaes */
282
283 #define RUMPCV(cv) (*(struct rumpuser_cv **)(cv))
284
285 void
286 cv_init(kcondvar_t *cv, const char *msg)
287 {
288
289 CTASSERT(sizeof(kcondvar_t) >= sizeof(void *));
290
291 rumpuser_cv_init((struct rumpuser_cv **)cv);
292 }
293
294 void
295 cv_destroy(kcondvar_t *cv)
296 {
297
298 rumpuser_cv_destroy(RUMPCV(cv));
299 }
300
301 static int
302 docvwait(kcondvar_t *cv, kmutex_t *mtx, struct timespec *ts)
303 {
304 struct lwp *l = curlwp;
305 int rv;
306
307 if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) {
308 /*
309 * yield() here, someone might want the cpu
310 * to set a condition. otherwise we'll just
311 * loop forever.
312 */
313 yield();
314 return EINTR;
315 }
316
317 UNLOCKED(mtx, false);
318
319 l->l_private = cv;
320 rv = 0;
321 if (ts) {
322 if (rumpuser_cv_timedwait(RUMPCV(cv), RUMPMTX(mtx),
323 ts->tv_sec, ts->tv_nsec))
324 rv = EWOULDBLOCK;
325 } else {
326 rumpuser_cv_wait(RUMPCV(cv), RUMPMTX(mtx));
327 }
328
329 LOCKED(mtx, false);
330
331 /*
332 * Check for QEXIT. if so, we need to wait here until we
333 * are allowed to exit.
334 */
335 if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) {
336 struct proc *p = l->l_proc;
337
338 UNLOCKED(mtx, false);
339 mutex_exit(mtx); /* drop and retake later */
340
341 mutex_enter(p->p_lock);
342 while ((p->p_sflag & PS_RUMP_LWPEXIT) == 0) {
343 /* avoid recursion */
344 rumpuser_cv_wait(RUMPCV(&p->p_waitcv),
345 RUMPMTX(p->p_lock));
346 }
347 KASSERT(p->p_sflag & PS_RUMP_LWPEXIT);
348 mutex_exit(p->p_lock);
349
350 /* ok, we can exit and remove "reference" to l->private */
351
352 mutex_enter(mtx);
353 LOCKED(mtx, false);
354 rv = EINTR;
355 }
356 l->l_private = NULL;
357
358 return rv;
359 }
360
361 void
362 cv_wait(kcondvar_t *cv, kmutex_t *mtx)
363 {
364
365 if (__predict_false(rump_threads == 0))
366 panic("cv_wait without threads");
367 (void) docvwait(cv, mtx, NULL);
368 }
369
370 int
371 cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx)
372 {
373
374 if (__predict_false(rump_threads == 0))
375 panic("cv_wait without threads");
376 return docvwait(cv, mtx, NULL);
377 }
378
379 int
380 cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int ticks)
381 {
382 struct timespec ts, tick;
383 extern int hz;
384 int rv;
385
386 if (ticks == 0) {
387 rv = cv_wait_sig(cv, mtx);
388 } else {
389 /*
390 * XXX: this fetches rump kernel time, but
391 * rumpuser_cv_timedwait uses host time.
392 */
393 nanotime(&ts);
394 tick.tv_sec = ticks / hz;
395 tick.tv_nsec = (ticks % hz) * (1000000000/hz);
396 timespecadd(&ts, &tick, &ts);
397
398 rv = docvwait(cv, mtx, &ts);
399 }
400
401 return rv;
402 }
403 __strong_alias(cv_timedwait_sig,cv_timedwait);
404
405 void
406 cv_signal(kcondvar_t *cv)
407 {
408
409 rumpuser_cv_signal(RUMPCV(cv));
410 }
411
412 void
413 cv_broadcast(kcondvar_t *cv)
414 {
415
416 rumpuser_cv_broadcast(RUMPCV(cv));
417 }
418
419 bool
420 cv_has_waiters(kcondvar_t *cv)
421 {
422
423 return rumpuser_cv_has_waiters(RUMPCV(cv));
424 }
425
426 /* this is not much of an attempt, but ... */
427 bool
428 cv_is_valid(kcondvar_t *cv)
429 {
430
431 return RUMPCV(cv) != NULL;
432 }
433