locks.c revision 1.54.4.1 1 /* $NetBSD: locks.c,v 1.54.4.1 2012/04/17 00:08:49 yamt Exp $ */
2
3 /*
4 * Copyright (c) 2007-2011 Antti Kantee. All Rights Reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: locks.c,v 1.54.4.1 2012/04/17 00:08:49 yamt Exp $");
30
31 #include <sys/param.h>
32 #include <sys/kmem.h>
33 #include <sys/mutex.h>
34 #include <sys/rwlock.h>
35
36 #include <rump/rumpuser.h>
37
38 #include "rump_private.h"
39
40 /*
41 * Simple lockdebug. If it's compiled in, it's always active.
42 * Currently available only for mtx/rwlock.
43 */
44 #ifdef LOCKDEBUG
45 #include <sys/lockdebug.h>
46
47 static lockops_t mutex_lockops = {
48 "mutex",
49 LOCKOPS_SLEEP,
50 NULL
51 };
52 static lockops_t rw_lockops = {
53 "rwlock",
54 LOCKOPS_SLEEP,
55 NULL
56 };
57
58 #define ALLOCK(lock, ops) \
59 lockdebug_alloc(lock, ops, (uintptr_t)__builtin_return_address(0))
60 #define FREELOCK(lock) \
61 lockdebug_free(lock)
62 #define WANTLOCK(lock, shar, try) \
63 lockdebug_wantlock(lock, (uintptr_t)__builtin_return_address(0), shar, try)
64 #define LOCKED(lock, shar) \
65 lockdebug_locked(lock, NULL, (uintptr_t)__builtin_return_address(0), shar)
66 #define UNLOCKED(lock, shar) \
67 lockdebug_unlocked(lock, (uintptr_t)__builtin_return_address(0), shar)
68 #else
69 #define ALLOCK(a, b)
70 #define FREELOCK(a)
71 #define WANTLOCK(a, b, c)
72 #define LOCKED(a, b)
73 #define UNLOCKED(a, b)
74 #endif
75
76 /*
77 * We map locks to pthread routines. The difference between kernel
78 * and rumpuser routines is that while the kernel uses static
79 * storage, rumpuser allocates the object from the heap. This
80 * indirection is necessary because we don't know the size of
81 * pthread objects here. It is also beneficial, since we can
82 * be easily compatible with the kernel ABI because all kernel
83 * objects regardless of machine architecture are always at least
84 * the size of a pointer. The downside, of course, is a performance
85 * penalty.
86 */
87
88 #define RUMPMTX(mtx) (*(struct rumpuser_mtx **)(mtx))
89
90 void
91 mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
92 {
93
94 CTASSERT(sizeof(kmutex_t) >= sizeof(void *));
95
96 rumpuser_mutex_init_kmutex((struct rumpuser_mtx **)mtx);
97 ALLOCK(mtx, &mutex_lockops);
98 }
99
100 void
101 mutex_destroy(kmutex_t *mtx)
102 {
103
104 FREELOCK(mtx);
105 rumpuser_mutex_destroy(RUMPMTX(mtx));
106 }
107
108 void
109 mutex_enter(kmutex_t *mtx)
110 {
111
112 WANTLOCK(mtx, false, false);
113 rumpuser_mutex_enter(RUMPMTX(mtx));
114 LOCKED(mtx, false);
115 }
116 __strong_alias(mutex_spin_enter,mutex_enter);
117
118 int
119 mutex_tryenter(kmutex_t *mtx)
120 {
121 int rv;
122
123 rv = rumpuser_mutex_tryenter(RUMPMTX(mtx));
124 if (rv) {
125 WANTLOCK(mtx, false, true);
126 LOCKED(mtx, false);
127 }
128 return rv;
129 }
130
131 void
132 mutex_exit(kmutex_t *mtx)
133 {
134
135 UNLOCKED(mtx, false);
136 rumpuser_mutex_exit(RUMPMTX(mtx));
137 }
138 __strong_alias(mutex_spin_exit,mutex_exit);
139
140 int
141 mutex_owned(kmutex_t *mtx)
142 {
143
144 return mutex_owner(mtx) == curlwp;
145 }
146
147 struct lwp *
148 mutex_owner(kmutex_t *mtx)
149 {
150
151 return rumpuser_mutex_owner(RUMPMTX(mtx));
152 }
153
154 #define RUMPRW(rw) (*(struct rumpuser_rw **)(rw))
155
156 /* reader/writer locks */
157
158 void
159 rw_init(krwlock_t *rw)
160 {
161
162 CTASSERT(sizeof(krwlock_t) >= sizeof(void *));
163
164 rumpuser_rw_init((struct rumpuser_rw **)rw);
165 ALLOCK(rw, &rw_lockops);
166 }
167
168 void
169 rw_destroy(krwlock_t *rw)
170 {
171
172 FREELOCK(rw);
173 rumpuser_rw_destroy(RUMPRW(rw));
174 }
175
176 void
177 rw_enter(krwlock_t *rw, const krw_t op)
178 {
179
180
181 WANTLOCK(rw, op == RW_READER, false);
182 rumpuser_rw_enter(RUMPRW(rw), op == RW_WRITER);
183 LOCKED(rw, op == RW_READER);
184 }
185
186 int
187 rw_tryenter(krwlock_t *rw, const krw_t op)
188 {
189 int rv;
190
191 rv = rumpuser_rw_tryenter(RUMPRW(rw), op == RW_WRITER);
192 if (rv) {
193 WANTLOCK(rw, op == RW_READER, true);
194 LOCKED(rw, op == RW_READER);
195 }
196 return rv;
197 }
198
199 void
200 rw_exit(krwlock_t *rw)
201 {
202
203 #ifdef LOCKDEBUG
204 bool shared = !rw_write_held(rw);
205
206 if (shared)
207 KASSERT(rw_read_held(rw));
208 UNLOCKED(rw, shared);
209 #endif
210 rumpuser_rw_exit(RUMPRW(rw));
211 }
212
213 /* always fails */
214 int
215 rw_tryupgrade(krwlock_t *rw)
216 {
217
218 return 0;
219 }
220
221 void
222 rw_downgrade(krwlock_t *rw)
223 {
224
225 /*
226 * XXX HACK: How we can downgrade re lock in rump properly.
227 */
228 rw_exit(rw);
229 rw_enter(rw, RW_READER);
230 return;
231 }
232
233 int
234 rw_write_held(krwlock_t *rw)
235 {
236
237 return rumpuser_rw_wrheld(RUMPRW(rw));
238 }
239
240 int
241 rw_read_held(krwlock_t *rw)
242 {
243
244 return rumpuser_rw_rdheld(RUMPRW(rw));
245 }
246
247 int
248 rw_lock_held(krwlock_t *rw)
249 {
250
251 return rumpuser_rw_held(RUMPRW(rw));
252 }
253
254 /* curriculum vitaes */
255
256 #define RUMPCV(cv) (*(struct rumpuser_cv **)(cv))
257
258 void
259 cv_init(kcondvar_t *cv, const char *msg)
260 {
261
262 CTASSERT(sizeof(kcondvar_t) >= sizeof(void *));
263
264 rumpuser_cv_init((struct rumpuser_cv **)cv);
265 }
266
267 void
268 cv_destroy(kcondvar_t *cv)
269 {
270
271 rumpuser_cv_destroy(RUMPCV(cv));
272 }
273
274 static int
275 docvwait(kcondvar_t *cv, kmutex_t *mtx, struct timespec *ts)
276 {
277 struct lwp *l = curlwp;
278 int rv;
279
280 if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) {
281 /*
282 * yield() here, someone might want the cpu
283 * to set a condition. otherwise we'll just
284 * loop forever.
285 */
286 yield();
287 return EINTR;
288 }
289
290 UNLOCKED(mtx, false);
291
292 l->l_private = cv;
293 rv = 0;
294 if (ts) {
295 if (rumpuser_cv_timedwait(RUMPCV(cv), RUMPMTX(mtx),
296 ts->tv_sec, ts->tv_nsec))
297 rv = EWOULDBLOCK;
298 } else {
299 rumpuser_cv_wait(RUMPCV(cv), RUMPMTX(mtx));
300 }
301
302 LOCKED(mtx, false);
303
304 /*
305 * Check for QEXIT. if so, we need to wait here until we
306 * are allowed to exit.
307 */
308 if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) {
309 struct proc *p = l->l_proc;
310
311 UNLOCKED(mtx, false);
312 mutex_exit(mtx); /* drop and retake later */
313
314 mutex_enter(p->p_lock);
315 while ((p->p_sflag & PS_RUMP_LWPEXIT) == 0) {
316 /* avoid recursion */
317 rumpuser_cv_wait(RUMPCV(&p->p_waitcv),
318 RUMPMTX(p->p_lock));
319 }
320 KASSERT(p->p_sflag & PS_RUMP_LWPEXIT);
321 mutex_exit(p->p_lock);
322
323 /* ok, we can exit and remove "reference" to l->private */
324
325 mutex_enter(mtx);
326 LOCKED(mtx, false);
327 rv = EINTR;
328 }
329 l->l_private = NULL;
330
331 return rv;
332 }
333
334 void
335 cv_wait(kcondvar_t *cv, kmutex_t *mtx)
336 {
337
338 if (__predict_false(rump_threads == 0))
339 panic("cv_wait without threads");
340 (void) docvwait(cv, mtx, NULL);
341 }
342
343 int
344 cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx)
345 {
346
347 if (__predict_false(rump_threads == 0))
348 panic("cv_wait without threads");
349 return docvwait(cv, mtx, NULL);
350 }
351
352 int
353 cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int ticks)
354 {
355 struct timespec ts, tick;
356 extern int hz;
357 int rv;
358
359 if (ticks == 0) {
360 rv = cv_wait_sig(cv, mtx);
361 } else {
362 /*
363 * XXX: this fetches rump kernel time, but
364 * rumpuser_cv_timedwait uses host time.
365 */
366 nanotime(&ts);
367 tick.tv_sec = ticks / hz;
368 tick.tv_nsec = (ticks % hz) * (1000000000/hz);
369 timespecadd(&ts, &tick, &ts);
370
371 rv = docvwait(cv, mtx, &ts);
372 }
373
374 return rv;
375 }
376 __strong_alias(cv_timedwait_sig,cv_timedwait);
377
378 void
379 cv_signal(kcondvar_t *cv)
380 {
381
382 rumpuser_cv_signal(RUMPCV(cv));
383 }
384
385 void
386 cv_broadcast(kcondvar_t *cv)
387 {
388
389 rumpuser_cv_broadcast(RUMPCV(cv));
390 }
391
392 bool
393 cv_has_waiters(kcondvar_t *cv)
394 {
395
396 return rumpuser_cv_has_waiters(RUMPCV(cv));
397 }
398
399 /* this is not much of an attempt, but ... */
400 bool
401 cv_is_valid(kcondvar_t *cv)
402 {
403
404 return RUMPCV(cv) != NULL;
405 }
406