locks.c revision 1.79 1 /* $NetBSD: locks.c,v 1.79 2017/12/27 09:03:22 ozaki-r Exp $ */
2
3 /*
4 * Copyright (c) 2007-2011 Antti Kantee. All Rights Reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: locks.c,v 1.79 2017/12/27 09:03:22 ozaki-r Exp $");
30
31 #include <sys/param.h>
32 #include <sys/kmem.h>
33 #include <sys/mutex.h>
34 #include <sys/rwlock.h>
35
36 #include <rump-sys/kern.h>
37
38 #include <rump/rumpuser.h>
39
40 #ifdef LOCKDEBUG
41 const int rump_lockdebug = 1;
42 #else
43 const int rump_lockdebug = 0;
44 #endif
45
46 /*
47 * Simple lockdebug. If it's compiled in, it's always active.
48 * Currently available only for mtx/rwlock.
49 */
50 #ifdef LOCKDEBUG
51 #include <sys/lockdebug.h>
52
53 static lockops_t mutex_spin_lockops = {
54 .lo_name = "mutex",
55 .lo_type = LOCKOPS_SPIN,
56 .lo_dump = NULL,
57 };
58 static lockops_t mutex_adaptive_lockops = {
59 .lo_name = "mutex",
60 .lo_type = LOCKOPS_SLEEP,
61 .lo_dump = NULL,
62 };
63 static lockops_t rw_lockops = {
64 .lo_name = "rwlock",
65 .lo_type = LOCKOPS_SLEEP,
66 .lo_dump = NULL,
67 };
68
69 #define ALLOCK(lock, ops) \
70 lockdebug_alloc(__func__, __LINE__, lock, ops, \
71 (uintptr_t)__builtin_return_address(0))
72 #define FREELOCK(lock) \
73 lockdebug_free(__func__, __LINE__, lock)
74 #define WANTLOCK(lock, shar) \
75 lockdebug_wantlock(__func__, __LINE__, lock, \
76 (uintptr_t)__builtin_return_address(0), shar)
77 #define LOCKED(lock, shar) \
78 lockdebug_locked(__func__, __LINE__, lock, NULL,\
79 (uintptr_t)__builtin_return_address(0), shar)
80 #define UNLOCKED(lock, shar) \
81 lockdebug_unlocked(__func__, __LINE__, lock, \
82 (uintptr_t)__builtin_return_address(0), shar)
83 #define BARRIER(lock, slp) \
84 lockdebug_barrier(__func__, __LINE__, lock, slp)
85 #else
86 #define ALLOCK(a, b) do {} while (0)
87 #define FREELOCK(a) do {} while (0)
88 #define WANTLOCK(a, b) do {} while (0)
89 #define LOCKED(a, b) do {} while (0)
90 #define UNLOCKED(a, b) do {} while (0)
91 #define BARRIER(a, b) do {} while (0)
92 #endif
93
94 /*
95 * We map locks to pthread routines. The difference between kernel
96 * and rumpuser routines is that while the kernel uses static
97 * storage, rumpuser allocates the object from the heap. This
98 * indirection is necessary because we don't know the size of
99 * pthread objects here. It is also beneficial, since we can
100 * be easily compatible with the kernel ABI because all kernel
101 * objects regardless of machine architecture are always at least
102 * the size of a pointer. The downside, of course, is a performance
103 * penalty.
104 */
105
106 #define RUMPMTX(mtx) (*(struct rumpuser_mtx *const*)(mtx))
107
108 void
109 mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
110 {
111 int ruflags = RUMPUSER_MTX_KMUTEX;
112 int isspin;
113
114 CTASSERT(sizeof(kmutex_t) >= sizeof(void *));
115
116 /*
117 * Try to figure out if the caller wanted a spin mutex or
118 * not with this easy set of conditionals. The difference
119 * between a spin mutex and an adaptive mutex for a rump
120 * kernel is that the hypervisor does not relinquish the
121 * rump kernel CPU context for a spin mutex. The
122 * hypervisor itself may block even when "spinning".
123 */
124 if (type == MUTEX_SPIN) {
125 isspin = 1;
126 } else if (ipl == IPL_NONE || ipl == IPL_SOFTCLOCK ||
127 ipl == IPL_SOFTBIO || ipl == IPL_SOFTNET ||
128 ipl == IPL_SOFTSERIAL) {
129 isspin = 0;
130 } else {
131 isspin = 1;
132 }
133
134 if (isspin)
135 ruflags |= RUMPUSER_MTX_SPIN;
136 rumpuser_mutex_init((struct rumpuser_mtx **)mtx, ruflags);
137 if (isspin)
138 ALLOCK(mtx, &mutex_spin_lockops);
139 else
140 ALLOCK(mtx, &mutex_adaptive_lockops);
141 }
142
143 void
144 mutex_destroy(kmutex_t *mtx)
145 {
146
147 FREELOCK(mtx);
148 rumpuser_mutex_destroy(RUMPMTX(mtx));
149 }
150
151 void
152 mutex_enter(kmutex_t *mtx)
153 {
154
155 WANTLOCK(mtx, 0);
156 if (!rumpuser_mutex_spin_p(RUMPMTX(mtx)))
157 BARRIER(mtx, 1);
158 rumpuser_mutex_enter(RUMPMTX(mtx));
159 LOCKED(mtx, false);
160 }
161
162 void
163 mutex_spin_enter(kmutex_t *mtx)
164 {
165
166 KASSERT(rumpuser_mutex_spin_p(RUMPMTX(mtx)));
167 WANTLOCK(mtx, 0);
168 rumpuser_mutex_enter_nowrap(RUMPMTX(mtx));
169 LOCKED(mtx, false);
170 }
171
172 int
173 mutex_tryenter(kmutex_t *mtx)
174 {
175 int error;
176
177 error = rumpuser_mutex_tryenter(RUMPMTX(mtx));
178 if (error == 0) {
179 WANTLOCK(mtx, 0);
180 LOCKED(mtx, false);
181 }
182 return error == 0;
183 }
184
185 void
186 mutex_exit(kmutex_t *mtx)
187 {
188
189 #ifndef LOCKDEBUG
190 KASSERT(mutex_owned(mtx));
191 #endif
192 UNLOCKED(mtx, false);
193 rumpuser_mutex_exit(RUMPMTX(mtx));
194 }
195 __strong_alias(mutex_spin_exit,mutex_exit);
196
197 int
198 mutex_ownable(const kmutex_t *mtx)
199 {
200
201 #ifdef LOCKDEBUG
202 WANTLOCK(mtx, -1);
203 #endif
204 return 1;
205 }
206
207 int
208 mutex_owned(const kmutex_t *mtx)
209 {
210
211 return mutex_owner(mtx) == curlwp;
212 }
213
214 lwp_t *
215 mutex_owner(const kmutex_t *mtx)
216 {
217 struct lwp *l;
218
219 rumpuser_mutex_owner(RUMPMTX(mtx), &l);
220 return l;
221 }
222
223 #define RUMPRW(rw) (*(struct rumpuser_rw **)(rw))
224
225 /* reader/writer locks */
226
227 static enum rumprwlock
228 krw2rumprw(const krw_t op)
229 {
230
231 switch (op) {
232 case RW_READER:
233 return RUMPUSER_RW_READER;
234 case RW_WRITER:
235 return RUMPUSER_RW_WRITER;
236 default:
237 panic("unknown rwlock type");
238 }
239 }
240
241 void
242 rw_init(krwlock_t *rw)
243 {
244
245 CTASSERT(sizeof(krwlock_t) >= sizeof(void *));
246
247 rumpuser_rw_init((struct rumpuser_rw **)rw);
248 ALLOCK(rw, &rw_lockops);
249 }
250
251 void
252 rw_destroy(krwlock_t *rw)
253 {
254
255 FREELOCK(rw);
256 rumpuser_rw_destroy(RUMPRW(rw));
257 }
258
259 void
260 rw_enter(krwlock_t *rw, const krw_t op)
261 {
262
263 WANTLOCK(rw, op == RW_READER);
264 BARRIER(rw, 1);
265 rumpuser_rw_enter(krw2rumprw(op), RUMPRW(rw));
266 LOCKED(rw, op == RW_READER);
267 }
268
269 int
270 rw_tryenter(krwlock_t *rw, const krw_t op)
271 {
272 int error;
273
274 error = rumpuser_rw_tryenter(krw2rumprw(op), RUMPRW(rw));
275 if (error == 0) {
276 WANTLOCK(rw, op == RW_READER);
277 LOCKED(rw, op == RW_READER);
278 }
279 return error == 0;
280 }
281
282 void
283 rw_exit(krwlock_t *rw)
284 {
285
286 #ifdef LOCKDEBUG
287 bool shared = !rw_write_held(rw);
288
289 if (shared)
290 KASSERT(rw_read_held(rw));
291 UNLOCKED(rw, shared);
292 #endif
293 rumpuser_rw_exit(RUMPRW(rw));
294 }
295
296 int
297 rw_tryupgrade(krwlock_t *rw)
298 {
299 int rv;
300
301 rv = rumpuser_rw_tryupgrade(RUMPRW(rw));
302 if (rv == 0) {
303 UNLOCKED(rw, 1);
304 WANTLOCK(rw, 0);
305 LOCKED(rw, 0);
306 }
307 return rv == 0;
308 }
309
310 void
311 rw_downgrade(krwlock_t *rw)
312 {
313
314 rumpuser_rw_downgrade(RUMPRW(rw));
315 UNLOCKED(rw, 0);
316 WANTLOCK(rw, 1);
317 LOCKED(rw, 1);
318 }
319
320 int
321 rw_read_held(krwlock_t *rw)
322 {
323 int rv;
324
325 rumpuser_rw_held(RUMPUSER_RW_READER, RUMPRW(rw), &rv);
326 return rv;
327 }
328
329 int
330 rw_write_held(krwlock_t *rw)
331 {
332 int rv;
333
334 rumpuser_rw_held(RUMPUSER_RW_WRITER, RUMPRW(rw), &rv);
335 return rv;
336 }
337
338 int
339 rw_lock_held(krwlock_t *rw)
340 {
341
342 return rw_read_held(rw) || rw_write_held(rw);
343 }
344
345 /* curriculum vitaes */
346
347 #define RUMPCV(cv) (*(struct rumpuser_cv **)(cv))
348
349 void
350 cv_init(kcondvar_t *cv, const char *msg)
351 {
352
353 CTASSERT(sizeof(kcondvar_t) >= sizeof(void *));
354
355 rumpuser_cv_init((struct rumpuser_cv **)cv);
356 }
357
358 void
359 cv_destroy(kcondvar_t *cv)
360 {
361
362 rumpuser_cv_destroy(RUMPCV(cv));
363 }
364
365 static int
366 docvwait(kcondvar_t *cv, kmutex_t *mtx, struct timespec *ts)
367 {
368 struct lwp *l = curlwp;
369 int rv;
370
371 if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) {
372 /*
373 * yield() here, someone might want the cpu
374 * to set a condition. otherwise we'll just
375 * loop forever.
376 */
377 yield();
378 return EINTR;
379 }
380
381 UNLOCKED(mtx, false);
382
383 l->l_private = cv;
384 rv = 0;
385 if (ts) {
386 if (rumpuser_cv_timedwait(RUMPCV(cv), RUMPMTX(mtx),
387 ts->tv_sec, ts->tv_nsec))
388 rv = EWOULDBLOCK;
389 } else {
390 rumpuser_cv_wait(RUMPCV(cv), RUMPMTX(mtx));
391 }
392
393 LOCKED(mtx, false);
394
395 /*
396 * Check for QEXIT. if so, we need to wait here until we
397 * are allowed to exit.
398 */
399 if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) {
400 struct proc *p = l->l_proc;
401
402 mutex_exit(mtx); /* drop and retake later */
403
404 mutex_enter(p->p_lock);
405 while ((p->p_sflag & PS_RUMP_LWPEXIT) == 0) {
406 /* avoid recursion */
407 rumpuser_cv_wait(RUMPCV(&p->p_waitcv),
408 RUMPMTX(p->p_lock));
409 }
410 KASSERT(p->p_sflag & PS_RUMP_LWPEXIT);
411 mutex_exit(p->p_lock);
412
413 /* ok, we can exit and remove "reference" to l->private */
414
415 mutex_enter(mtx);
416 rv = EINTR;
417 }
418 l->l_private = NULL;
419
420 return rv;
421 }
422
423 void
424 cv_wait(kcondvar_t *cv, kmutex_t *mtx)
425 {
426
427 if (__predict_false(rump_threads == 0))
428 panic("cv_wait without threads");
429 (void) docvwait(cv, mtx, NULL);
430 }
431
432 int
433 cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx)
434 {
435
436 if (__predict_false(rump_threads == 0))
437 panic("cv_wait without threads");
438 return docvwait(cv, mtx, NULL);
439 }
440
441 int
442 cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int ticks)
443 {
444 struct timespec ts;
445 extern int hz;
446 int rv;
447
448 if (ticks == 0) {
449 rv = cv_wait_sig(cv, mtx);
450 } else {
451 ts.tv_sec = ticks / hz;
452 ts.tv_nsec = (ticks % hz) * (1000000000/hz);
453 rv = docvwait(cv, mtx, &ts);
454 }
455
456 return rv;
457 }
458 __strong_alias(cv_timedwait_sig,cv_timedwait);
459
460 void
461 cv_signal(kcondvar_t *cv)
462 {
463
464 rumpuser_cv_signal(RUMPCV(cv));
465 }
466
467 void
468 cv_broadcast(kcondvar_t *cv)
469 {
470
471 rumpuser_cv_broadcast(RUMPCV(cv));
472 }
473
474 bool
475 cv_has_waiters(kcondvar_t *cv)
476 {
477 int rv;
478
479 rumpuser_cv_has_waiters(RUMPCV(cv), &rv);
480 return rv != 0;
481 }
482
483 /* this is not much of an attempt, but ... */
484 bool
485 cv_is_valid(kcondvar_t *cv)
486 {
487
488 return RUMPCV(cv) != NULL;
489 }
490