locks.c revision 1.73.4.3 1 /* $NetBSD: locks.c,v 1.73.4.3 2017/05/02 03:19:22 pgoyette Exp $ */
2
3 /*
4 * Copyright (c) 2007-2011 Antti Kantee. All Rights Reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: locks.c,v 1.73.4.3 2017/05/02 03:19:22 pgoyette Exp $");
30
31 #include <sys/param.h>
32 #include <sys/kmem.h>
33 #include <sys/mutex.h>
34 #include <sys/rwlock.h>
35
36 #include <rump-sys/kern.h>
37
38 #include <rump/rumpuser.h>
39
40 #ifdef LOCKDEBUG
41 const int rump_lockdebug = 1;
42 #else
43 const int rump_lockdebug = 0;
44 #endif
45
46 /*
47 * Simple lockdebug. If it's compiled in, it's always active.
48 * Currently available only for mtx/rwlock.
49 */
50 #ifdef LOCKDEBUG
51 #include <sys/lockdebug.h>
52
53 static lockops_t mutex_lockops = {
54 "mutex",
55 LOCKOPS_SLEEP,
56 NULL
57 };
58 static lockops_t rw_lockops = {
59 "rwlock",
60 LOCKOPS_SLEEP,
61 NULL
62 };
63
64 #define ALLOCK(lock, ops) \
65 lockdebug_alloc(__func__, __LINE__, lock, ops, \
66 (uintptr_t)__builtin_return_address(0))
67 #define FREELOCK(lock) \
68 lockdebug_free(__func__, __LINE__, lock)
69 #define WANTLOCK(lock, shar) \
70 lockdebug_wantlock(__func__, __LINE__, lock, \
71 (uintptr_t)__builtin_return_address(0), shar)
72 #define LOCKED(lock, shar) \
73 lockdebug_locked(__func__, __LINE__, lock, NULL, \
74 (uintptr_t)__builtin_return_address(0), shar)
75 #define UNLOCKED(lock, shar) \
76 lockdebug_unlocked(__func__, __LINE__, lock, \
77 (uintptr_t)__builtin_return_address(0), shar)
78 #define BARRIER(lock, slp) \
79 lockdebug_barrier(__func__, __LINE__, lock, slp)
80 #else
81 #define ALLOCK(a, b)
82 #define FREELOCK(a)
83 #define WANTLOCK(a, b)
84 #define LOCKED(a, b)
85 #define UNLOCKED(a, b)
86 #define BARRIER(a, b)
87 #endif
88
89 /*
90 * We map locks to pthread routines. The difference between kernel
91 * and rumpuser routines is that while the kernel uses static
92 * storage, rumpuser allocates the object from the heap. This
93 * indirection is necessary because we don't know the size of
94 * pthread objects here. It is also beneficial, since we can
95 * be easily compatible with the kernel ABI because all kernel
96 * objects regardless of machine architecture are always at least
97 * the size of a pointer. The downside, of course, is a performance
98 * penalty.
99 */
100
101 #define RUMPMTX(mtx) (*(struct rumpuser_mtx **)(mtx))
102
103 void
104 mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
105 {
106 int ruflags = RUMPUSER_MTX_KMUTEX;
107 int isspin;
108
109 CTASSERT(sizeof(kmutex_t) >= sizeof(void *));
110
111 /*
112 * Try to figure out if the caller wanted a spin mutex or
113 * not with this easy set of conditionals. The difference
114 * between a spin mutex and an adaptive mutex for a rump
115 * kernel is that the hypervisor does not relinquish the
116 * rump kernel CPU context for a spin mutex. The
117 * hypervisor itself may block even when "spinning".
118 */
119 if (type == MUTEX_SPIN) {
120 isspin = 1;
121 } else if (ipl == IPL_NONE || ipl == IPL_SOFTCLOCK ||
122 ipl == IPL_SOFTBIO || ipl == IPL_SOFTNET ||
123 ipl == IPL_SOFTSERIAL) {
124 isspin = 0;
125 } else {
126 isspin = 1;
127 }
128
129 if (isspin)
130 ruflags |= RUMPUSER_MTX_SPIN;
131 rumpuser_mutex_init((struct rumpuser_mtx **)mtx, ruflags);
132 ALLOCK(mtx, &mutex_lockops);
133 }
134
135 void
136 mutex_destroy(kmutex_t *mtx)
137 {
138
139 FREELOCK(mtx);
140 rumpuser_mutex_destroy(RUMPMTX(mtx));
141 }
142
143 void
144 mutex_enter(kmutex_t *mtx)
145 {
146
147 WANTLOCK(mtx, 0);
148 BARRIER(mtx, 1);
149 rumpuser_mutex_enter(RUMPMTX(mtx));
150 LOCKED(mtx, false);
151 }
152
153 void
154 mutex_spin_enter(kmutex_t *mtx)
155 {
156
157 WANTLOCK(mtx, 0);
158 BARRIER(mtx, 1);
159 rumpuser_mutex_enter_nowrap(RUMPMTX(mtx));
160 LOCKED(mtx, false);
161 }
162
163 int
164 mutex_tryenter(kmutex_t *mtx)
165 {
166 int error;
167
168 error = rumpuser_mutex_tryenter(RUMPMTX(mtx));
169 if (error == 0) {
170 WANTLOCK(mtx, 0);
171 LOCKED(mtx, false);
172 }
173 return error == 0;
174 }
175
176 void
177 mutex_exit(kmutex_t *mtx)
178 {
179
180 UNLOCKED(mtx, false);
181 rumpuser_mutex_exit(RUMPMTX(mtx));
182 }
183 __strong_alias(mutex_spin_exit,mutex_exit);
184
185 int
186 mutex_ownable(kmutex_t *mtx)
187 {
188
189 #ifdef LOCKDEBUG
190 WANTLOCK(mtx, -1);
191 #endif
192 return 1;
193 }
194
195 int
196 mutex_owned(kmutex_t *mtx)
197 {
198
199 return mutex_owner(mtx) == curlwp;
200 }
201
202 struct lwp *
203 mutex_owner(kmutex_t *mtx)
204 {
205 struct lwp *l;
206
207 rumpuser_mutex_owner(RUMPMTX(mtx), &l);
208 return l;
209 }
210
211 #define RUMPRW(rw) (*(struct rumpuser_rw **)(rw))
212
213 /* reader/writer locks */
214
215 static enum rumprwlock
216 krw2rumprw(const krw_t op)
217 {
218
219 switch (op) {
220 case RW_READER:
221 return RUMPUSER_RW_READER;
222 case RW_WRITER:
223 return RUMPUSER_RW_WRITER;
224 default:
225 panic("unknown rwlock type");
226 }
227 }
228
229 void
230 rw_init(krwlock_t *rw)
231 {
232
233 CTASSERT(sizeof(krwlock_t) >= sizeof(void *));
234
235 rumpuser_rw_init((struct rumpuser_rw **)rw);
236 ALLOCK(rw, &rw_lockops);
237 }
238
239 void
240 rw_destroy(krwlock_t *rw)
241 {
242
243 FREELOCK(rw);
244 rumpuser_rw_destroy(RUMPRW(rw));
245 }
246
247 void
248 rw_enter(krwlock_t *rw, const krw_t op)
249 {
250
251 WANTLOCK(rw, op == RW_READER);
252 BARRIER(rw, 1);
253 rumpuser_rw_enter(krw2rumprw(op), RUMPRW(rw));
254 LOCKED(rw, op == RW_READER);
255 }
256
257 int
258 rw_tryenter(krwlock_t *rw, const krw_t op)
259 {
260 int error;
261
262 error = rumpuser_rw_tryenter(krw2rumprw(op), RUMPRW(rw));
263 if (error == 0) {
264 WANTLOCK(rw, op == RW_READER);
265 LOCKED(rw, op == RW_READER);
266 }
267 return error == 0;
268 }
269
270 void
271 rw_exit(krwlock_t *rw)
272 {
273
274 #ifdef LOCKDEBUG
275 bool shared = !rw_write_held(rw);
276
277 if (shared)
278 KASSERT(rw_read_held(rw));
279 UNLOCKED(rw, shared);
280 #endif
281 rumpuser_rw_exit(RUMPRW(rw));
282 }
283
284 int
285 rw_tryupgrade(krwlock_t *rw)
286 {
287 int rv;
288
289 rv = rumpuser_rw_tryupgrade(RUMPRW(rw));
290 if (rv == 0) {
291 UNLOCKED(rw, 1);
292 WANTLOCK(rw, 0);
293 LOCKED(rw, 0);
294 }
295 return rv == 0;
296 }
297
298 void
299 rw_downgrade(krwlock_t *rw)
300 {
301
302 rumpuser_rw_downgrade(RUMPRW(rw));
303 UNLOCKED(rw, 0);
304 WANTLOCK(rw, 1);
305 LOCKED(rw, 1);
306 }
307
308 int
309 rw_read_held(krwlock_t *rw)
310 {
311 int rv;
312
313 rumpuser_rw_held(RUMPUSER_RW_READER, RUMPRW(rw), &rv);
314 return rv;
315 }
316
317 int
318 rw_write_held(krwlock_t *rw)
319 {
320 int rv;
321
322 rumpuser_rw_held(RUMPUSER_RW_WRITER, RUMPRW(rw), &rv);
323 return rv;
324 }
325
326 int
327 rw_lock_held(krwlock_t *rw)
328 {
329
330 return rw_read_held(rw) || rw_write_held(rw);
331 }
332
333 /* curriculum vitaes */
334
335 #define RUMPCV(cv) (*(struct rumpuser_cv **)(cv))
336
337 void
338 cv_init(kcondvar_t *cv, const char *msg)
339 {
340
341 CTASSERT(sizeof(kcondvar_t) >= sizeof(void *));
342
343 rumpuser_cv_init((struct rumpuser_cv **)cv);
344 }
345
346 void
347 cv_destroy(kcondvar_t *cv)
348 {
349
350 rumpuser_cv_destroy(RUMPCV(cv));
351 }
352
353 static int
354 docvwait(kcondvar_t *cv, kmutex_t *mtx, struct timespec *ts)
355 {
356 struct lwp *l = curlwp;
357 int rv;
358
359 if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) {
360 /*
361 * yield() here, someone might want the cpu
362 * to set a condition. otherwise we'll just
363 * loop forever.
364 */
365 yield();
366 return EINTR;
367 }
368
369 UNLOCKED(mtx, false);
370
371 l->l_private = cv;
372 rv = 0;
373 if (ts) {
374 if (rumpuser_cv_timedwait(RUMPCV(cv), RUMPMTX(mtx),
375 ts->tv_sec, ts->tv_nsec))
376 rv = EWOULDBLOCK;
377 } else {
378 rumpuser_cv_wait(RUMPCV(cv), RUMPMTX(mtx));
379 }
380
381 LOCKED(mtx, false);
382
383 /*
384 * Check for QEXIT. if so, we need to wait here until we
385 * are allowed to exit.
386 */
387 if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) {
388 struct proc *p = l->l_proc;
389
390 mutex_exit(mtx); /* drop and retake later */
391
392 mutex_enter(p->p_lock);
393 while ((p->p_sflag & PS_RUMP_LWPEXIT) == 0) {
394 /* avoid recursion */
395 rumpuser_cv_wait(RUMPCV(&p->p_waitcv),
396 RUMPMTX(p->p_lock));
397 }
398 KASSERT(p->p_sflag & PS_RUMP_LWPEXIT);
399 mutex_exit(p->p_lock);
400
401 /* ok, we can exit and remove "reference" to l->private */
402
403 mutex_enter(mtx);
404 rv = EINTR;
405 }
406 l->l_private = NULL;
407
408 return rv;
409 }
410
411 void
412 cv_wait(kcondvar_t *cv, kmutex_t *mtx)
413 {
414
415 if (__predict_false(rump_threads == 0))
416 panic("cv_wait without threads");
417 (void) docvwait(cv, mtx, NULL);
418 }
419
420 int
421 cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx)
422 {
423
424 if (__predict_false(rump_threads == 0))
425 panic("cv_wait without threads");
426 return docvwait(cv, mtx, NULL);
427 }
428
429 int
430 cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int ticks)
431 {
432 struct timespec ts;
433 extern int hz;
434 int rv;
435
436 if (ticks == 0) {
437 rv = cv_wait_sig(cv, mtx);
438 } else {
439 ts.tv_sec = ticks / hz;
440 ts.tv_nsec = (ticks % hz) * (1000000000/hz);
441 rv = docvwait(cv, mtx, &ts);
442 }
443
444 return rv;
445 }
446 __strong_alias(cv_timedwait_sig,cv_timedwait);
447
448 void
449 cv_signal(kcondvar_t *cv)
450 {
451
452 rumpuser_cv_signal(RUMPCV(cv));
453 }
454
455 void
456 cv_broadcast(kcondvar_t *cv)
457 {
458
459 rumpuser_cv_broadcast(RUMPCV(cv));
460 }
461
462 bool
463 cv_has_waiters(kcondvar_t *cv)
464 {
465 int rv;
466
467 rumpuser_cv_has_waiters(RUMPCV(cv), &rv);
468 return rv != 0;
469 }
470
471 /* this is not much of an attempt, but ... */
472 bool
473 cv_is_valid(kcondvar_t *cv)
474 {
475
476 return RUMPCV(cv) != NULL;
477 }
478