locks.c revision 1.78 1 /* $NetBSD: locks.c,v 1.78 2017/12/27 09:01:53 ozaki-r Exp $ */
2
3 /*
4 * Copyright (c) 2007-2011 Antti Kantee. All Rights Reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: locks.c,v 1.78 2017/12/27 09:01:53 ozaki-r Exp $");
30
31 #include <sys/param.h>
32 #include <sys/kmem.h>
33 #include <sys/mutex.h>
34 #include <sys/rwlock.h>
35
36 #include <rump-sys/kern.h>
37
38 #include <rump/rumpuser.h>
39
40 #ifdef LOCKDEBUG
41 const int rump_lockdebug = 1;
42 #else
43 const int rump_lockdebug = 0;
44 #endif
45
46 /*
47 * Simple lockdebug. If it's compiled in, it's always active.
48 * Currently available only for mtx/rwlock.
49 */
50 #ifdef LOCKDEBUG
51 #include <sys/lockdebug.h>
52
53 static lockops_t mutex_spin_lockops = {
54 .lo_name = "mutex",
55 .lo_type = LOCKOPS_SPIN,
56 .lo_dump = NULL,
57 };
58 static lockops_t mutex_adaptive_lockops = {
59 .lo_name = "mutex",
60 .lo_type = LOCKOPS_SLEEP,
61 .lo_dump = NULL,
62 };
63 static lockops_t rw_lockops = {
64 .lo_name = "rwlock",
65 .lo_type = LOCKOPS_SLEEP,
66 .lo_dump = NULL,
67 };
68
69 #define ALLOCK(lock, ops) \
70 lockdebug_alloc(__func__, __LINE__, lock, ops, \
71 (uintptr_t)__builtin_return_address(0))
72 #define FREELOCK(lock) \
73 lockdebug_free(__func__, __LINE__, lock)
74 #define WANTLOCK(lock, shar) \
75 lockdebug_wantlock(__func__, __LINE__, lock, \
76 (uintptr_t)__builtin_return_address(0), shar)
77 #define LOCKED(lock, shar) \
78 lockdebug_locked(__func__, __LINE__, lock, NULL,\
79 (uintptr_t)__builtin_return_address(0), shar)
80 #define UNLOCKED(lock, shar) \
81 lockdebug_unlocked(__func__, __LINE__, lock, \
82 (uintptr_t)__builtin_return_address(0), shar)
83 #define BARRIER(lock, slp) \
84 lockdebug_barrier(__func__, __LINE__, lock, slp)
85 #else
86 #define ALLOCK(a, b) do {} while (0)
87 #define FREELOCK(a) do {} while (0)
88 #define WANTLOCK(a, b) do {} while (0)
89 #define LOCKED(a, b) do {} while (0)
90 #define UNLOCKED(a, b) do {} while (0)
91 #define BARRIER(a, b) do {} while (0)
92 #endif
93
94 /*
95 * We map locks to pthread routines. The difference between kernel
96 * and rumpuser routines is that while the kernel uses static
97 * storage, rumpuser allocates the object from the heap. This
98 * indirection is necessary because we don't know the size of
99 * pthread objects here. It is also beneficial, since we can
100 * be easily compatible with the kernel ABI because all kernel
101 * objects regardless of machine architecture are always at least
102 * the size of a pointer. The downside, of course, is a performance
103 * penalty.
104 */
105
106 #define RUMPMTX(mtx) (*(struct rumpuser_mtx *const*)(mtx))
107
108 void
109 mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
110 {
111 int ruflags = RUMPUSER_MTX_KMUTEX;
112 int isspin;
113
114 CTASSERT(sizeof(kmutex_t) >= sizeof(void *));
115
116 /*
117 * Try to figure out if the caller wanted a spin mutex or
118 * not with this easy set of conditionals. The difference
119 * between a spin mutex and an adaptive mutex for a rump
120 * kernel is that the hypervisor does not relinquish the
121 * rump kernel CPU context for a spin mutex. The
122 * hypervisor itself may block even when "spinning".
123 */
124 if (type == MUTEX_SPIN) {
125 isspin = 1;
126 } else if (ipl == IPL_NONE || ipl == IPL_SOFTCLOCK ||
127 ipl == IPL_SOFTBIO || ipl == IPL_SOFTNET ||
128 ipl == IPL_SOFTSERIAL) {
129 isspin = 0;
130 } else {
131 isspin = 1;
132 }
133
134 if (isspin)
135 ruflags |= RUMPUSER_MTX_SPIN;
136 rumpuser_mutex_init((struct rumpuser_mtx **)mtx, ruflags);
137 if (isspin)
138 ALLOCK(mtx, &mutex_spin_lockops);
139 else
140 ALLOCK(mtx, &mutex_adaptive_lockops);
141 }
142
143 void
144 mutex_destroy(kmutex_t *mtx)
145 {
146
147 FREELOCK(mtx);
148 rumpuser_mutex_destroy(RUMPMTX(mtx));
149 }
150
151 void
152 mutex_enter(kmutex_t *mtx)
153 {
154
155 WANTLOCK(mtx, 0);
156 if (!rumpuser_mutex_spin_p(RUMPMTX(mtx)))
157 BARRIER(mtx, 1);
158 rumpuser_mutex_enter(RUMPMTX(mtx));
159 LOCKED(mtx, false);
160 }
161
162 void
163 mutex_spin_enter(kmutex_t *mtx)
164 {
165
166 KASSERT(rumpuser_mutex_spin_p(RUMPMTX(mtx)));
167 WANTLOCK(mtx, 0);
168 rumpuser_mutex_enter_nowrap(RUMPMTX(mtx));
169 LOCKED(mtx, false);
170 }
171
172 int
173 mutex_tryenter(kmutex_t *mtx)
174 {
175 int error;
176
177 error = rumpuser_mutex_tryenter(RUMPMTX(mtx));
178 if (error == 0) {
179 WANTLOCK(mtx, 0);
180 LOCKED(mtx, false);
181 }
182 return error == 0;
183 }
184
185 void
186 mutex_exit(kmutex_t *mtx)
187 {
188
189 UNLOCKED(mtx, false);
190 rumpuser_mutex_exit(RUMPMTX(mtx));
191 }
192 __strong_alias(mutex_spin_exit,mutex_exit);
193
194 int
195 mutex_ownable(const kmutex_t *mtx)
196 {
197
198 #ifdef LOCKDEBUG
199 WANTLOCK(mtx, -1);
200 #endif
201 return 1;
202 }
203
204 int
205 mutex_owned(const kmutex_t *mtx)
206 {
207
208 return mutex_owner(mtx) == curlwp;
209 }
210
211 lwp_t *
212 mutex_owner(const kmutex_t *mtx)
213 {
214 struct lwp *l;
215
216 rumpuser_mutex_owner(RUMPMTX(mtx), &l);
217 return l;
218 }
219
220 #define RUMPRW(rw) (*(struct rumpuser_rw **)(rw))
221
222 /* reader/writer locks */
223
224 static enum rumprwlock
225 krw2rumprw(const krw_t op)
226 {
227
228 switch (op) {
229 case RW_READER:
230 return RUMPUSER_RW_READER;
231 case RW_WRITER:
232 return RUMPUSER_RW_WRITER;
233 default:
234 panic("unknown rwlock type");
235 }
236 }
237
238 void
239 rw_init(krwlock_t *rw)
240 {
241
242 CTASSERT(sizeof(krwlock_t) >= sizeof(void *));
243
244 rumpuser_rw_init((struct rumpuser_rw **)rw);
245 ALLOCK(rw, &rw_lockops);
246 }
247
248 void
249 rw_destroy(krwlock_t *rw)
250 {
251
252 FREELOCK(rw);
253 rumpuser_rw_destroy(RUMPRW(rw));
254 }
255
256 void
257 rw_enter(krwlock_t *rw, const krw_t op)
258 {
259
260 WANTLOCK(rw, op == RW_READER);
261 BARRIER(rw, 1);
262 rumpuser_rw_enter(krw2rumprw(op), RUMPRW(rw));
263 LOCKED(rw, op == RW_READER);
264 }
265
266 int
267 rw_tryenter(krwlock_t *rw, const krw_t op)
268 {
269 int error;
270
271 error = rumpuser_rw_tryenter(krw2rumprw(op), RUMPRW(rw));
272 if (error == 0) {
273 WANTLOCK(rw, op == RW_READER);
274 LOCKED(rw, op == RW_READER);
275 }
276 return error == 0;
277 }
278
279 void
280 rw_exit(krwlock_t *rw)
281 {
282
283 #ifdef LOCKDEBUG
284 bool shared = !rw_write_held(rw);
285
286 if (shared)
287 KASSERT(rw_read_held(rw));
288 UNLOCKED(rw, shared);
289 #endif
290 rumpuser_rw_exit(RUMPRW(rw));
291 }
292
293 int
294 rw_tryupgrade(krwlock_t *rw)
295 {
296 int rv;
297
298 rv = rumpuser_rw_tryupgrade(RUMPRW(rw));
299 if (rv == 0) {
300 UNLOCKED(rw, 1);
301 WANTLOCK(rw, 0);
302 LOCKED(rw, 0);
303 }
304 return rv == 0;
305 }
306
307 void
308 rw_downgrade(krwlock_t *rw)
309 {
310
311 rumpuser_rw_downgrade(RUMPRW(rw));
312 UNLOCKED(rw, 0);
313 WANTLOCK(rw, 1);
314 LOCKED(rw, 1);
315 }
316
317 int
318 rw_read_held(krwlock_t *rw)
319 {
320 int rv;
321
322 rumpuser_rw_held(RUMPUSER_RW_READER, RUMPRW(rw), &rv);
323 return rv;
324 }
325
326 int
327 rw_write_held(krwlock_t *rw)
328 {
329 int rv;
330
331 rumpuser_rw_held(RUMPUSER_RW_WRITER, RUMPRW(rw), &rv);
332 return rv;
333 }
334
335 int
336 rw_lock_held(krwlock_t *rw)
337 {
338
339 return rw_read_held(rw) || rw_write_held(rw);
340 }
341
342 /* curriculum vitaes */
343
344 #define RUMPCV(cv) (*(struct rumpuser_cv **)(cv))
345
346 void
347 cv_init(kcondvar_t *cv, const char *msg)
348 {
349
350 CTASSERT(sizeof(kcondvar_t) >= sizeof(void *));
351
352 rumpuser_cv_init((struct rumpuser_cv **)cv);
353 }
354
355 void
356 cv_destroy(kcondvar_t *cv)
357 {
358
359 rumpuser_cv_destroy(RUMPCV(cv));
360 }
361
362 static int
363 docvwait(kcondvar_t *cv, kmutex_t *mtx, struct timespec *ts)
364 {
365 struct lwp *l = curlwp;
366 int rv;
367
368 if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) {
369 /*
370 * yield() here, someone might want the cpu
371 * to set a condition. otherwise we'll just
372 * loop forever.
373 */
374 yield();
375 return EINTR;
376 }
377
378 UNLOCKED(mtx, false);
379
380 l->l_private = cv;
381 rv = 0;
382 if (ts) {
383 if (rumpuser_cv_timedwait(RUMPCV(cv), RUMPMTX(mtx),
384 ts->tv_sec, ts->tv_nsec))
385 rv = EWOULDBLOCK;
386 } else {
387 rumpuser_cv_wait(RUMPCV(cv), RUMPMTX(mtx));
388 }
389
390 LOCKED(mtx, false);
391
392 /*
393 * Check for QEXIT. if so, we need to wait here until we
394 * are allowed to exit.
395 */
396 if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) {
397 struct proc *p = l->l_proc;
398
399 mutex_exit(mtx); /* drop and retake later */
400
401 mutex_enter(p->p_lock);
402 while ((p->p_sflag & PS_RUMP_LWPEXIT) == 0) {
403 /* avoid recursion */
404 rumpuser_cv_wait(RUMPCV(&p->p_waitcv),
405 RUMPMTX(p->p_lock));
406 }
407 KASSERT(p->p_sflag & PS_RUMP_LWPEXIT);
408 mutex_exit(p->p_lock);
409
410 /* ok, we can exit and remove "reference" to l->private */
411
412 mutex_enter(mtx);
413 rv = EINTR;
414 }
415 l->l_private = NULL;
416
417 return rv;
418 }
419
420 void
421 cv_wait(kcondvar_t *cv, kmutex_t *mtx)
422 {
423
424 if (__predict_false(rump_threads == 0))
425 panic("cv_wait without threads");
426 (void) docvwait(cv, mtx, NULL);
427 }
428
429 int
430 cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx)
431 {
432
433 if (__predict_false(rump_threads == 0))
434 panic("cv_wait without threads");
435 return docvwait(cv, mtx, NULL);
436 }
437
438 int
439 cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int ticks)
440 {
441 struct timespec ts;
442 extern int hz;
443 int rv;
444
445 if (ticks == 0) {
446 rv = cv_wait_sig(cv, mtx);
447 } else {
448 ts.tv_sec = ticks / hz;
449 ts.tv_nsec = (ticks % hz) * (1000000000/hz);
450 rv = docvwait(cv, mtx, &ts);
451 }
452
453 return rv;
454 }
455 __strong_alias(cv_timedwait_sig,cv_timedwait);
456
457 void
458 cv_signal(kcondvar_t *cv)
459 {
460
461 rumpuser_cv_signal(RUMPCV(cv));
462 }
463
464 void
465 cv_broadcast(kcondvar_t *cv)
466 {
467
468 rumpuser_cv_broadcast(RUMPCV(cv));
469 }
470
471 bool
472 cv_has_waiters(kcondvar_t *cv)
473 {
474 int rv;
475
476 rumpuser_cv_has_waiters(RUMPCV(cv), &rv);
477 return rv != 0;
478 }
479
480 /* this is not much of an attempt, but ... */
481 bool
482 cv_is_valid(kcondvar_t *cv)
483 {
484
485 return RUMPCV(cv) != NULL;
486 }
487