locks.c revision 1.73.4.1 1 1.73.4.1 pgoyette /* $NetBSD: locks.c,v 1.73.4.1 2017/04/30 04:56:55 pgoyette Exp $ */
2 1.1 pooka
3 1.1 pooka /*
4 1.54 pooka * Copyright (c) 2007-2011 Antti Kantee. All Rights Reserved.
5 1.1 pooka *
6 1.1 pooka * Redistribution and use in source and binary forms, with or without
7 1.1 pooka * modification, are permitted provided that the following conditions
8 1.1 pooka * are met:
9 1.1 pooka * 1. Redistributions of source code must retain the above copyright
10 1.1 pooka * notice, this list of conditions and the following disclaimer.
11 1.1 pooka * 2. Redistributions in binary form must reproduce the above copyright
12 1.1 pooka * notice, this list of conditions and the following disclaimer in the
13 1.1 pooka * documentation and/or other materials provided with the distribution.
14 1.1 pooka *
15 1.1 pooka * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 1.1 pooka * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 1.1 pooka * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 1.1 pooka * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 1.1 pooka * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 1.1 pooka * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 1.1 pooka * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 1.1 pooka * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 1.1 pooka * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 1.1 pooka * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 1.1 pooka * SUCH DAMAGE.
26 1.1 pooka */
27 1.1 pooka
28 1.23 pooka #include <sys/cdefs.h>
29 1.73.4.1 pgoyette __KERNEL_RCSID(0, "$NetBSD: locks.c,v 1.73.4.1 2017/04/30 04:56:55 pgoyette Exp $");
30 1.23 pooka
31 1.1 pooka #include <sys/param.h>
32 1.26 pooka #include <sys/kmem.h>
33 1.1 pooka #include <sys/mutex.h>
34 1.1 pooka #include <sys/rwlock.h>
35 1.1 pooka
36 1.72 pooka #include <rump-sys/kern.h>
37 1.72 pooka
38 1.18 pooka #include <rump/rumpuser.h>
39 1.18 pooka
40 1.69 pooka #ifdef LOCKDEBUG
41 1.69 pooka const int rump_lockdebug = 1;
42 1.69 pooka #else
43 1.69 pooka const int rump_lockdebug = 0;
44 1.69 pooka #endif
45 1.69 pooka
46 1.22 pooka /*
47 1.45 pooka * Simple lockdebug. If it's compiled in, it's always active.
48 1.45 pooka * Currently available only for mtx/rwlock.
49 1.45 pooka */
50 1.45 pooka #ifdef LOCKDEBUG
51 1.45 pooka #include <sys/lockdebug.h>
52 1.45 pooka
53 1.45 pooka static lockops_t mutex_lockops = {
54 1.45 pooka "mutex",
55 1.45 pooka LOCKOPS_SLEEP,
56 1.45 pooka NULL
57 1.45 pooka };
58 1.45 pooka static lockops_t rw_lockops = {
59 1.46 pooka "rwlock",
60 1.45 pooka LOCKOPS_SLEEP,
61 1.45 pooka NULL
62 1.45 pooka };
63 1.45 pooka
64 1.73 ozaki #define ALLOCK(lock, ops) \
65 1.73 ozaki lockdebug_alloc(__func__, __LINE__, lock, ops, \
66 1.73 ozaki (uintptr_t)__builtin_return_address(0))
67 1.45 pooka #define FREELOCK(lock) \
68 1.73 ozaki lockdebug_free(__func__, __LINE__, lock)
69 1.73 ozaki #define WANTLOCK(lock, shar) \
70 1.73 ozaki lockdebug_wantlock(__func__, __LINE__, lock, \
71 1.73 ozaki (uintptr_t)__builtin_return_address(0), shar)
72 1.73 ozaki #define LOCKED(lock, shar) \
73 1.73 ozaki lockdebug_locked(__func__, __LINE__, lock, NULL, \
74 1.73 ozaki (uintptr_t)__builtin_return_address(0), shar)
75 1.45 pooka #define UNLOCKED(lock, shar) \
76 1.73 ozaki lockdebug_unlocked(__func__, __LINE__, lock, \
77 1.73 ozaki (uintptr_t)__builtin_return_address(0), shar)
78 1.71 ozaki #define BARRIER(lock, slp) \
79 1.73 ozaki lockdebug_barrier(__func__, __LINE__, lock, slp)
80 1.45 pooka #else
81 1.45 pooka #define ALLOCK(a, b)
82 1.45 pooka #define FREELOCK(a)
83 1.65 njoly #define WANTLOCK(a, b)
84 1.45 pooka #define LOCKED(a, b)
85 1.45 pooka #define UNLOCKED(a, b)
86 1.71 ozaki #define BARRIER(a, b)
87 1.45 pooka #endif
88 1.45 pooka
89 1.45 pooka /*
90 1.22 pooka * We map locks to pthread routines. The difference between kernel
91 1.22 pooka * and rumpuser routines is that while the kernel uses static
92 1.22 pooka * storage, rumpuser allocates the object from the heap. This
93 1.22 pooka * indirection is necessary because we don't know the size of
94 1.38 snj * pthread objects here. It is also beneficial, since we can
95 1.22 pooka * be easily compatible with the kernel ABI because all kernel
96 1.22 pooka * objects regardless of machine architecture are always at least
97 1.22 pooka * the size of a pointer. The downside, of course, is a performance
98 1.22 pooka * penalty.
99 1.22 pooka */
100 1.22 pooka
101 1.22 pooka #define RUMPMTX(mtx) (*(struct rumpuser_mtx **)(mtx))
102 1.22 pooka
103 1.1 pooka void
104 1.1 pooka mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
105 1.1 pooka {
106 1.57 pooka int ruflags = RUMPUSER_MTX_KMUTEX;
107 1.56 pooka int isspin;
108 1.56 pooka
109 1.57 pooka CTASSERT(sizeof(kmutex_t) >= sizeof(void *));
110 1.57 pooka
111 1.56 pooka /*
112 1.56 pooka * Try to figure out if the caller wanted a spin mutex or
113 1.56 pooka * not with this easy set of conditionals. The difference
114 1.56 pooka * between a spin mutex and an adaptive mutex for a rump
115 1.56 pooka * kernel is that the hypervisor does not relinquish the
116 1.56 pooka * rump kernel CPU context for a spin mutex. The
117 1.56 pooka * hypervisor itself may block even when "spinning".
118 1.56 pooka */
119 1.56 pooka if (type == MUTEX_SPIN) {
120 1.56 pooka isspin = 1;
121 1.56 pooka } else if (ipl == IPL_NONE || ipl == IPL_SOFTCLOCK ||
122 1.56 pooka ipl == IPL_SOFTBIO || ipl == IPL_SOFTNET ||
123 1.56 pooka ipl == IPL_SOFTSERIAL) {
124 1.56 pooka isspin = 0;
125 1.56 pooka } else {
126 1.56 pooka isspin = 1;
127 1.56 pooka }
128 1.1 pooka
129 1.57 pooka if (isspin)
130 1.57 pooka ruflags |= RUMPUSER_MTX_SPIN;
131 1.57 pooka rumpuser_mutex_init((struct rumpuser_mtx **)mtx, ruflags);
132 1.45 pooka ALLOCK(mtx, &mutex_lockops);
133 1.1 pooka }
134 1.1 pooka
135 1.1 pooka void
136 1.1 pooka mutex_destroy(kmutex_t *mtx)
137 1.1 pooka {
138 1.1 pooka
139 1.45 pooka FREELOCK(mtx);
140 1.22 pooka rumpuser_mutex_destroy(RUMPMTX(mtx));
141 1.1 pooka }
142 1.1 pooka
143 1.1 pooka void
144 1.1 pooka mutex_enter(kmutex_t *mtx)
145 1.1 pooka {
146 1.1 pooka
147 1.65 njoly WANTLOCK(mtx, 0);
148 1.71 ozaki BARRIER(mtx, 1);
149 1.22 pooka rumpuser_mutex_enter(RUMPMTX(mtx));
150 1.45 pooka LOCKED(mtx, false);
151 1.1 pooka }
152 1.56 pooka
153 1.56 pooka void
154 1.56 pooka mutex_spin_enter(kmutex_t *mtx)
155 1.56 pooka {
156 1.56 pooka
157 1.65 njoly WANTLOCK(mtx, 0);
158 1.71 ozaki BARRIER(mtx, 1);
159 1.61 pooka rumpuser_mutex_enter_nowrap(RUMPMTX(mtx));
160 1.56 pooka LOCKED(mtx, false);
161 1.56 pooka }
162 1.6 pooka
163 1.1 pooka int
164 1.1 pooka mutex_tryenter(kmutex_t *mtx)
165 1.1 pooka {
166 1.60 pooka int error;
167 1.1 pooka
168 1.60 pooka error = rumpuser_mutex_tryenter(RUMPMTX(mtx));
169 1.60 pooka if (error == 0) {
170 1.65 njoly WANTLOCK(mtx, 0);
171 1.45 pooka LOCKED(mtx, false);
172 1.45 pooka }
173 1.60 pooka return error == 0;
174 1.1 pooka }
175 1.1 pooka
176 1.1 pooka void
177 1.1 pooka mutex_exit(kmutex_t *mtx)
178 1.1 pooka {
179 1.1 pooka
180 1.45 pooka UNLOCKED(mtx, false);
181 1.22 pooka rumpuser_mutex_exit(RUMPMTX(mtx));
182 1.1 pooka }
183 1.45 pooka __strong_alias(mutex_spin_exit,mutex_exit);
184 1.6 pooka
185 1.1 pooka int
186 1.73.4.1 pgoyette mutex_ownable(kmutex_t *mtx)
187 1.73.4.1 pgoyette {
188 1.73.4.1 pgoyette
189 1.73.4.1 pgoyette #ifdef RUMP_LOCKDEBUG
190 1.73.4.1 pgoyette mutex_enter(mtx);
191 1.73.4.1 pgoyette mutex_exit(mtx);
192 1.73.4.1 pgoyette #endif
193 1.73.4.1 pgoyette return 1;
194 1.73.4.1 pgoyette }
195 1.73.4.1 pgoyette
196 1.73.4.1 pgoyette int
197 1.1 pooka mutex_owned(kmutex_t *mtx)
198 1.1 pooka {
199 1.1 pooka
200 1.44 pooka return mutex_owner(mtx) == curlwp;
201 1.44 pooka }
202 1.44 pooka
203 1.44 pooka struct lwp *
204 1.44 pooka mutex_owner(kmutex_t *mtx)
205 1.44 pooka {
206 1.60 pooka struct lwp *l;
207 1.44 pooka
208 1.60 pooka rumpuser_mutex_owner(RUMPMTX(mtx), &l);
209 1.60 pooka return l;
210 1.1 pooka }
211 1.1 pooka
212 1.22 pooka #define RUMPRW(rw) (*(struct rumpuser_rw **)(rw))
213 1.22 pooka
214 1.1 pooka /* reader/writer locks */
215 1.1 pooka
216 1.63 pooka static enum rumprwlock
217 1.63 pooka krw2rumprw(const krw_t op)
218 1.63 pooka {
219 1.63 pooka
220 1.63 pooka switch (op) {
221 1.63 pooka case RW_READER:
222 1.63 pooka return RUMPUSER_RW_READER;
223 1.63 pooka case RW_WRITER:
224 1.63 pooka return RUMPUSER_RW_WRITER;
225 1.63 pooka default:
226 1.63 pooka panic("unknown rwlock type");
227 1.63 pooka }
228 1.63 pooka }
229 1.63 pooka
230 1.1 pooka void
231 1.1 pooka rw_init(krwlock_t *rw)
232 1.1 pooka {
233 1.1 pooka
234 1.22 pooka CTASSERT(sizeof(krwlock_t) >= sizeof(void *));
235 1.22 pooka
236 1.22 pooka rumpuser_rw_init((struct rumpuser_rw **)rw);
237 1.45 pooka ALLOCK(rw, &rw_lockops);
238 1.1 pooka }
239 1.1 pooka
240 1.1 pooka void
241 1.1 pooka rw_destroy(krwlock_t *rw)
242 1.1 pooka {
243 1.1 pooka
244 1.45 pooka FREELOCK(rw);
245 1.22 pooka rumpuser_rw_destroy(RUMPRW(rw));
246 1.1 pooka }
247 1.1 pooka
248 1.1 pooka void
249 1.1 pooka rw_enter(krwlock_t *rw, const krw_t op)
250 1.1 pooka {
251 1.1 pooka
252 1.65 njoly WANTLOCK(rw, op == RW_READER);
253 1.71 ozaki BARRIER(rw, 1);
254 1.64 pooka rumpuser_rw_enter(krw2rumprw(op), RUMPRW(rw));
255 1.45 pooka LOCKED(rw, op == RW_READER);
256 1.1 pooka }
257 1.1 pooka
258 1.1 pooka int
259 1.1 pooka rw_tryenter(krwlock_t *rw, const krw_t op)
260 1.1 pooka {
261 1.60 pooka int error;
262 1.1 pooka
263 1.64 pooka error = rumpuser_rw_tryenter(krw2rumprw(op), RUMPRW(rw));
264 1.60 pooka if (error == 0) {
265 1.65 njoly WANTLOCK(rw, op == RW_READER);
266 1.45 pooka LOCKED(rw, op == RW_READER);
267 1.45 pooka }
268 1.60 pooka return error == 0;
269 1.1 pooka }
270 1.1 pooka
271 1.1 pooka void
272 1.1 pooka rw_exit(krwlock_t *rw)
273 1.1 pooka {
274 1.1 pooka
275 1.45 pooka #ifdef LOCKDEBUG
276 1.45 pooka bool shared = !rw_write_held(rw);
277 1.45 pooka
278 1.45 pooka if (shared)
279 1.45 pooka KASSERT(rw_read_held(rw));
280 1.45 pooka UNLOCKED(rw, shared);
281 1.45 pooka #endif
282 1.22 pooka rumpuser_rw_exit(RUMPRW(rw));
283 1.1 pooka }
284 1.1 pooka
285 1.1 pooka int
286 1.1 pooka rw_tryupgrade(krwlock_t *rw)
287 1.1 pooka {
288 1.63 pooka int rv;
289 1.1 pooka
290 1.63 pooka rv = rumpuser_rw_tryupgrade(RUMPRW(rw));
291 1.63 pooka if (rv == 0) {
292 1.63 pooka UNLOCKED(rw, 1);
293 1.65 njoly WANTLOCK(rw, 0);
294 1.63 pooka LOCKED(rw, 0);
295 1.63 pooka }
296 1.63 pooka return rv == 0;
297 1.1 pooka }
298 1.1 pooka
299 1.48 haad void
300 1.48 haad rw_downgrade(krwlock_t *rw)
301 1.48 haad {
302 1.48 haad
303 1.63 pooka rumpuser_rw_downgrade(RUMPRW(rw));
304 1.63 pooka UNLOCKED(rw, 0);
305 1.65 njoly WANTLOCK(rw, 1);
306 1.63 pooka LOCKED(rw, 1);
307 1.48 haad }
308 1.48 haad
309 1.6 pooka int
310 1.63 pooka rw_read_held(krwlock_t *rw)
311 1.6 pooka {
312 1.60 pooka int rv;
313 1.6 pooka
314 1.64 pooka rumpuser_rw_held(RUMPUSER_RW_READER, RUMPRW(rw), &rv);
315 1.60 pooka return rv;
316 1.10 ad }
317 1.10 ad
318 1.10 ad int
319 1.63 pooka rw_write_held(krwlock_t *rw)
320 1.10 ad {
321 1.60 pooka int rv;
322 1.10 ad
323 1.64 pooka rumpuser_rw_held(RUMPUSER_RW_WRITER, RUMPRW(rw), &rv);
324 1.60 pooka return rv;
325 1.10 ad }
326 1.10 ad
327 1.10 ad int
328 1.10 ad rw_lock_held(krwlock_t *rw)
329 1.10 ad {
330 1.10 ad
331 1.63 pooka return rw_read_held(rw) || rw_write_held(rw);
332 1.6 pooka }
333 1.6 pooka
334 1.1 pooka /* curriculum vitaes */
335 1.1 pooka
336 1.24 pooka #define RUMPCV(cv) (*(struct rumpuser_cv **)(cv))
337 1.1 pooka
338 1.1 pooka void
339 1.1 pooka cv_init(kcondvar_t *cv, const char *msg)
340 1.1 pooka {
341 1.1 pooka
342 1.25 pooka CTASSERT(sizeof(kcondvar_t) >= sizeof(void *));
343 1.25 pooka
344 1.24 pooka rumpuser_cv_init((struct rumpuser_cv **)cv);
345 1.1 pooka }
346 1.1 pooka
347 1.1 pooka void
348 1.1 pooka cv_destroy(kcondvar_t *cv)
349 1.1 pooka {
350 1.1 pooka
351 1.1 pooka rumpuser_cv_destroy(RUMPCV(cv));
352 1.1 pooka }
353 1.1 pooka
354 1.47 pooka static int
355 1.47 pooka docvwait(kcondvar_t *cv, kmutex_t *mtx, struct timespec *ts)
356 1.47 pooka {
357 1.47 pooka struct lwp *l = curlwp;
358 1.47 pooka int rv;
359 1.47 pooka
360 1.51 pooka if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) {
361 1.47 pooka /*
362 1.50 pooka * yield() here, someone might want the cpu
363 1.50 pooka * to set a condition. otherwise we'll just
364 1.50 pooka * loop forever.
365 1.47 pooka */
366 1.50 pooka yield();
367 1.47 pooka return EINTR;
368 1.47 pooka }
369 1.47 pooka
370 1.47 pooka UNLOCKED(mtx, false);
371 1.47 pooka
372 1.47 pooka l->l_private = cv;
373 1.47 pooka rv = 0;
374 1.47 pooka if (ts) {
375 1.47 pooka if (rumpuser_cv_timedwait(RUMPCV(cv), RUMPMTX(mtx),
376 1.47 pooka ts->tv_sec, ts->tv_nsec))
377 1.47 pooka rv = EWOULDBLOCK;
378 1.47 pooka } else {
379 1.47 pooka rumpuser_cv_wait(RUMPCV(cv), RUMPMTX(mtx));
380 1.47 pooka }
381 1.47 pooka
382 1.52 pooka LOCKED(mtx, false);
383 1.52 pooka
384 1.47 pooka /*
385 1.51 pooka * Check for QEXIT. if so, we need to wait here until we
386 1.47 pooka * are allowed to exit.
387 1.47 pooka */
388 1.51 pooka if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) {
389 1.47 pooka struct proc *p = l->l_proc;
390 1.47 pooka
391 1.47 pooka mutex_exit(mtx); /* drop and retake later */
392 1.47 pooka
393 1.47 pooka mutex_enter(p->p_lock);
394 1.51 pooka while ((p->p_sflag & PS_RUMP_LWPEXIT) == 0) {
395 1.47 pooka /* avoid recursion */
396 1.47 pooka rumpuser_cv_wait(RUMPCV(&p->p_waitcv),
397 1.47 pooka RUMPMTX(p->p_lock));
398 1.47 pooka }
399 1.51 pooka KASSERT(p->p_sflag & PS_RUMP_LWPEXIT);
400 1.47 pooka mutex_exit(p->p_lock);
401 1.47 pooka
402 1.47 pooka /* ok, we can exit and remove "reference" to l->private */
403 1.47 pooka
404 1.47 pooka mutex_enter(mtx);
405 1.47 pooka rv = EINTR;
406 1.47 pooka }
407 1.47 pooka l->l_private = NULL;
408 1.47 pooka
409 1.47 pooka return rv;
410 1.47 pooka }
411 1.47 pooka
412 1.1 pooka void
413 1.1 pooka cv_wait(kcondvar_t *cv, kmutex_t *mtx)
414 1.1 pooka {
415 1.1 pooka
416 1.42 pooka if (__predict_false(rump_threads == 0))
417 1.28 pooka panic("cv_wait without threads");
418 1.47 pooka (void) docvwait(cv, mtx, NULL);
419 1.1 pooka }
420 1.1 pooka
421 1.3 pooka int
422 1.5 pooka cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx)
423 1.5 pooka {
424 1.5 pooka
425 1.42 pooka if (__predict_false(rump_threads == 0))
426 1.42 pooka panic("cv_wait without threads");
427 1.47 pooka return docvwait(cv, mtx, NULL);
428 1.5 pooka }
429 1.5 pooka
430 1.5 pooka int
431 1.3 pooka cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int ticks)
432 1.3 pooka {
433 1.58 pooka struct timespec ts;
434 1.3 pooka extern int hz;
435 1.45 pooka int rv;
436 1.27 pooka
437 1.9 pooka if (ticks == 0) {
438 1.47 pooka rv = cv_wait_sig(cv, mtx);
439 1.9 pooka } else {
440 1.58 pooka ts.tv_sec = ticks / hz;
441 1.58 pooka ts.tv_nsec = (ticks % hz) * (1000000000/hz);
442 1.47 pooka rv = docvwait(cv, mtx, &ts);
443 1.9 pooka }
444 1.5 pooka
445 1.45 pooka return rv;
446 1.5 pooka }
447 1.45 pooka __strong_alias(cv_timedwait_sig,cv_timedwait);
448 1.5 pooka
449 1.1 pooka void
450 1.1 pooka cv_signal(kcondvar_t *cv)
451 1.1 pooka {
452 1.1 pooka
453 1.1 pooka rumpuser_cv_signal(RUMPCV(cv));
454 1.1 pooka }
455 1.2 pooka
456 1.4 pooka void
457 1.4 pooka cv_broadcast(kcondvar_t *cv)
458 1.4 pooka {
459 1.4 pooka
460 1.4 pooka rumpuser_cv_broadcast(RUMPCV(cv));
461 1.4 pooka }
462 1.4 pooka
463 1.17 pooka bool
464 1.17 pooka cv_has_waiters(kcondvar_t *cv)
465 1.17 pooka {
466 1.60 pooka int rv;
467 1.17 pooka
468 1.60 pooka rumpuser_cv_has_waiters(RUMPCV(cv), &rv);
469 1.60 pooka return rv != 0;
470 1.17 pooka }
471 1.17 pooka
472 1.35 pooka /* this is not much of an attempt, but ... */
473 1.35 pooka bool
474 1.35 pooka cv_is_valid(kcondvar_t *cv)
475 1.35 pooka {
476 1.35 pooka
477 1.35 pooka return RUMPCV(cv) != NULL;
478 1.35 pooka }
479