locks.c revision 1.61 1 /* $NetBSD: locks.c,v 1.61 2013/05/02 20:33:54 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2007-2011 Antti Kantee. All Rights Reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: locks.c,v 1.61 2013/05/02 20:33:54 pooka Exp $");
30
31 #include <sys/param.h>
32 #include <sys/kmem.h>
33 #include <sys/mutex.h>
34 #include <sys/rwlock.h>
35
36 #include <rump/rumpuser.h>
37
38 #include "rump_private.h"
39
40 /*
41 * Simple lockdebug. If it's compiled in, it's always active.
42 * Currently available only for mtx/rwlock.
43 */
44 #ifdef LOCKDEBUG
45 #include <sys/lockdebug.h>
46
47 static lockops_t mutex_lockops = {
48 "mutex",
49 LOCKOPS_SLEEP,
50 NULL
51 };
52 static lockops_t rw_lockops = {
53 "rwlock",
54 LOCKOPS_SLEEP,
55 NULL
56 };
57
58 #define ALLOCK(lock, ops) \
59 lockdebug_alloc(lock, ops, (uintptr_t)__builtin_return_address(0))
60 #define FREELOCK(lock) \
61 lockdebug_free(lock)
62 #define WANTLOCK(lock, shar, try) \
63 lockdebug_wantlock(lock, (uintptr_t)__builtin_return_address(0), shar, try)
64 #define LOCKED(lock, shar) \
65 lockdebug_locked(lock, NULL, (uintptr_t)__builtin_return_address(0), shar)
66 #define UNLOCKED(lock, shar) \
67 lockdebug_unlocked(lock, (uintptr_t)__builtin_return_address(0), shar)
68 #else
69 #define ALLOCK(a, b)
70 #define FREELOCK(a)
71 #define WANTLOCK(a, b, c)
72 #define LOCKED(a, b)
73 #define UNLOCKED(a, b)
74 #endif
75
76 /*
77 * We map locks to pthread routines. The difference between kernel
78 * and rumpuser routines is that while the kernel uses static
79 * storage, rumpuser allocates the object from the heap. This
80 * indirection is necessary because we don't know the size of
81 * pthread objects here. It is also beneficial, since we can
82 * be easily compatible with the kernel ABI because all kernel
83 * objects regardless of machine architecture are always at least
84 * the size of a pointer. The downside, of course, is a performance
85 * penalty.
86 */
87
88 #define RUMPMTX(mtx) (*(struct rumpuser_mtx **)(mtx))
89
90 void
91 mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
92 {
93 int ruflags = RUMPUSER_MTX_KMUTEX;
94 int isspin;
95
96 CTASSERT(sizeof(kmutex_t) >= sizeof(void *));
97
98 /*
99 * Try to figure out if the caller wanted a spin mutex or
100 * not with this easy set of conditionals. The difference
101 * between a spin mutex and an adaptive mutex for a rump
102 * kernel is that the hypervisor does not relinquish the
103 * rump kernel CPU context for a spin mutex. The
104 * hypervisor itself may block even when "spinning".
105 */
106 if (type == MUTEX_SPIN) {
107 isspin = 1;
108 } else if (ipl == IPL_NONE || ipl == IPL_SOFTCLOCK ||
109 ipl == IPL_SOFTBIO || ipl == IPL_SOFTNET ||
110 ipl == IPL_SOFTSERIAL) {
111 isspin = 0;
112 } else {
113 isspin = 1;
114 }
115
116 /* spin mutex support needs some cpu scheduler rework */
117 if (isspin)
118 ruflags |= RUMPUSER_MTX_SPIN;
119 rumpuser_mutex_init((struct rumpuser_mtx **)mtx, ruflags);
120 ALLOCK(mtx, &mutex_lockops);
121 }
122
123 void
124 mutex_destroy(kmutex_t *mtx)
125 {
126
127 FREELOCK(mtx);
128 rumpuser_mutex_destroy(RUMPMTX(mtx));
129 }
130
131 void
132 mutex_enter(kmutex_t *mtx)
133 {
134
135 WANTLOCK(mtx, false, false);
136 rumpuser_mutex_enter(RUMPMTX(mtx));
137 LOCKED(mtx, false);
138 }
139
140 void
141 mutex_spin_enter(kmutex_t *mtx)
142 {
143
144 WANTLOCK(mtx, false, false);
145 rumpuser_mutex_enter_nowrap(RUMPMTX(mtx));
146 LOCKED(mtx, false);
147 }
148
149 int
150 mutex_tryenter(kmutex_t *mtx)
151 {
152 int error;
153
154 error = rumpuser_mutex_tryenter(RUMPMTX(mtx));
155 if (error == 0) {
156 WANTLOCK(mtx, false, true);
157 LOCKED(mtx, false);
158 }
159 return error == 0;
160 }
161
162 void
163 mutex_exit(kmutex_t *mtx)
164 {
165
166 UNLOCKED(mtx, false);
167 rumpuser_mutex_exit(RUMPMTX(mtx));
168 }
169 __strong_alias(mutex_spin_exit,mutex_exit);
170
171 int
172 mutex_owned(kmutex_t *mtx)
173 {
174
175 return mutex_owner(mtx) == curlwp;
176 }
177
178 struct lwp *
179 mutex_owner(kmutex_t *mtx)
180 {
181 struct lwp *l;
182
183 rumpuser_mutex_owner(RUMPMTX(mtx), &l);
184 return l;
185 }
186
187 #define RUMPRW(rw) (*(struct rumpuser_rw **)(rw))
188
189 /* reader/writer locks */
190
191 void
192 rw_init(krwlock_t *rw)
193 {
194
195 CTASSERT(sizeof(krwlock_t) >= sizeof(void *));
196
197 rumpuser_rw_init((struct rumpuser_rw **)rw);
198 ALLOCK(rw, &rw_lockops);
199 }
200
201 void
202 rw_destroy(krwlock_t *rw)
203 {
204
205 FREELOCK(rw);
206 rumpuser_rw_destroy(RUMPRW(rw));
207 }
208
209 void
210 rw_enter(krwlock_t *rw, const krw_t op)
211 {
212
213
214 WANTLOCK(rw, op == RW_READER, false);
215 rumpuser_rw_enter(RUMPRW(rw), op == RW_WRITER);
216 LOCKED(rw, op == RW_READER);
217 }
218
219 int
220 rw_tryenter(krwlock_t *rw, const krw_t op)
221 {
222 int error;
223
224 error = rumpuser_rw_tryenter(RUMPRW(rw), op == RW_WRITER);
225 if (error == 0) {
226 WANTLOCK(rw, op == RW_READER, true);
227 LOCKED(rw, op == RW_READER);
228 }
229 return error == 0;
230 }
231
232 void
233 rw_exit(krwlock_t *rw)
234 {
235
236 #ifdef LOCKDEBUG
237 bool shared = !rw_write_held(rw);
238
239 if (shared)
240 KASSERT(rw_read_held(rw));
241 UNLOCKED(rw, shared);
242 #endif
243 rumpuser_rw_exit(RUMPRW(rw));
244 }
245
246 /* always fails */
247 int
248 rw_tryupgrade(krwlock_t *rw)
249 {
250
251 return 0;
252 }
253
254 void
255 rw_downgrade(krwlock_t *rw)
256 {
257
258 /*
259 * XXX HACK: How we can downgrade re lock in rump properly.
260 */
261 rw_exit(rw);
262 rw_enter(rw, RW_READER);
263 return;
264 }
265
266 int
267 rw_write_held(krwlock_t *rw)
268 {
269 int rv;
270
271 rumpuser_rw_wrheld(RUMPRW(rw), &rv);
272 return rv;
273 }
274
275 int
276 rw_read_held(krwlock_t *rw)
277 {
278 int rv;
279
280 rumpuser_rw_rdheld(RUMPRW(rw), &rv);
281 return rv;
282 }
283
284 int
285 rw_lock_held(krwlock_t *rw)
286 {
287 int rv;
288
289 rumpuser_rw_held(RUMPRW(rw), &rv);
290 return rv;
291 }
292
293 /* curriculum vitaes */
294
295 #define RUMPCV(cv) (*(struct rumpuser_cv **)(cv))
296
297 void
298 cv_init(kcondvar_t *cv, const char *msg)
299 {
300
301 CTASSERT(sizeof(kcondvar_t) >= sizeof(void *));
302
303 rumpuser_cv_init((struct rumpuser_cv **)cv);
304 }
305
306 void
307 cv_destroy(kcondvar_t *cv)
308 {
309
310 rumpuser_cv_destroy(RUMPCV(cv));
311 }
312
313 static int
314 docvwait(kcondvar_t *cv, kmutex_t *mtx, struct timespec *ts)
315 {
316 struct lwp *l = curlwp;
317 int rv;
318
319 if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) {
320 /*
321 * yield() here, someone might want the cpu
322 * to set a condition. otherwise we'll just
323 * loop forever.
324 */
325 yield();
326 return EINTR;
327 }
328
329 UNLOCKED(mtx, false);
330
331 l->l_private = cv;
332 rv = 0;
333 if (ts) {
334 if (rumpuser_cv_timedwait(RUMPCV(cv), RUMPMTX(mtx),
335 ts->tv_sec, ts->tv_nsec))
336 rv = EWOULDBLOCK;
337 } else {
338 rumpuser_cv_wait(RUMPCV(cv), RUMPMTX(mtx));
339 }
340
341 LOCKED(mtx, false);
342
343 /*
344 * Check for QEXIT. if so, we need to wait here until we
345 * are allowed to exit.
346 */
347 if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) {
348 struct proc *p = l->l_proc;
349
350 UNLOCKED(mtx, false);
351 mutex_exit(mtx); /* drop and retake later */
352
353 mutex_enter(p->p_lock);
354 while ((p->p_sflag & PS_RUMP_LWPEXIT) == 0) {
355 /* avoid recursion */
356 rumpuser_cv_wait(RUMPCV(&p->p_waitcv),
357 RUMPMTX(p->p_lock));
358 }
359 KASSERT(p->p_sflag & PS_RUMP_LWPEXIT);
360 mutex_exit(p->p_lock);
361
362 /* ok, we can exit and remove "reference" to l->private */
363
364 mutex_enter(mtx);
365 LOCKED(mtx, false);
366 rv = EINTR;
367 }
368 l->l_private = NULL;
369
370 return rv;
371 }
372
373 void
374 cv_wait(kcondvar_t *cv, kmutex_t *mtx)
375 {
376
377 if (__predict_false(rump_threads == 0))
378 panic("cv_wait without threads");
379 (void) docvwait(cv, mtx, NULL);
380 }
381
382 int
383 cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx)
384 {
385
386 if (__predict_false(rump_threads == 0))
387 panic("cv_wait without threads");
388 return docvwait(cv, mtx, NULL);
389 }
390
391 int
392 cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int ticks)
393 {
394 struct timespec ts;
395 extern int hz;
396 int rv;
397
398 if (ticks == 0) {
399 rv = cv_wait_sig(cv, mtx);
400 } else {
401 ts.tv_sec = ticks / hz;
402 ts.tv_nsec = (ticks % hz) * (1000000000/hz);
403 rv = docvwait(cv, mtx, &ts);
404 }
405
406 return rv;
407 }
408 __strong_alias(cv_timedwait_sig,cv_timedwait);
409
410 void
411 cv_signal(kcondvar_t *cv)
412 {
413
414 rumpuser_cv_signal(RUMPCV(cv));
415 }
416
417 void
418 cv_broadcast(kcondvar_t *cv)
419 {
420
421 rumpuser_cv_broadcast(RUMPCV(cv));
422 }
423
424 bool
425 cv_has_waiters(kcondvar_t *cv)
426 {
427 int rv;
428
429 rumpuser_cv_has_waiters(RUMPCV(cv), &rv);
430 return rv != 0;
431 }
432
433 /* this is not much of an attempt, but ... */
434 bool
435 cv_is_valid(kcondvar_t *cv)
436 {
437
438 return RUMPCV(cv) != NULL;
439 }
440