locks.c revision 1.31 1 /* $NetBSD: locks.c,v 1.31 2009/10/15 23:15:55 pooka Exp $ */
2
3 /*-
4 * Copyright (c) 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * Copyright (c) 2007, 2008 Antti Kantee. All Rights Reserved.
31 *
32 * Development of this software was supported by the
33 * Finnish Cultural Foundation.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 *
44 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
45 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
46 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
47 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
48 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
49 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
50 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
51 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
53 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
54 * SUCH DAMAGE.
55 */
56
57 #include <sys/cdefs.h>
58 __KERNEL_RCSID(0, "$NetBSD: locks.c,v 1.31 2009/10/15 23:15:55 pooka Exp $");
59
60 #include <sys/param.h>
61 #include <sys/atomic.h>
62 #include <sys/kmem.h>
63 #include <sys/mutex.h>
64 #include <sys/rwlock.h>
65
66 #include <rump/rumpuser.h>
67
68 #include "rump_private.h"
69
70 /*
71 * We map locks to pthread routines. The difference between kernel
72 * and rumpuser routines is that while the kernel uses static
73 * storage, rumpuser allocates the object from the heap. This
74 * indirection is necessary because we don't know the size of
75 * pthread objects here. It is also benefitial, since we can
76 * be easily compatible with the kernel ABI because all kernel
77 * objects regardless of machine architecture are always at least
78 * the size of a pointer. The downside, of course, is a performance
79 * penalty.
80 */
81
82 #define RUMPMTX(mtx) (*(struct rumpuser_mtx **)(mtx))
83
84 void
85 mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
86 {
87
88 CTASSERT(sizeof(kmutex_t) >= sizeof(void *));
89
90 rumpuser_mutex_init((struct rumpuser_mtx **)mtx);
91 }
92
93 void
94 mutex_destroy(kmutex_t *mtx)
95 {
96
97 rumpuser_mutex_destroy(RUMPMTX(mtx));
98 }
99
100 void
101 mutex_enter(kmutex_t *mtx)
102 {
103
104 rumpuser_mutex_enter(RUMPMTX(mtx));
105 }
106
107 void
108 mutex_spin_enter(kmutex_t *mtx)
109 {
110
111 if (__predict_true(mtx != RUMP_LMUTEX_MAGIC))
112 mutex_enter(mtx);
113 }
114
115 int
116 mutex_tryenter(kmutex_t *mtx)
117 {
118
119 return rumpuser_mutex_tryenter(RUMPMTX(mtx));
120 }
121
122 void
123 mutex_exit(kmutex_t *mtx)
124 {
125
126 rumpuser_mutex_exit(RUMPMTX(mtx));
127 }
128
129 void
130 mutex_spin_exit(kmutex_t *mtx)
131 {
132
133 if (__predict_true(mtx != RUMP_LMUTEX_MAGIC))
134 mutex_exit(mtx);
135 }
136
137 int
138 mutex_owned(kmutex_t *mtx)
139 {
140
141 return rumpuser_mutex_held(RUMPMTX(mtx));
142 }
143
144 #define RUMPRW(rw) (*(struct rumpuser_rw **)(rw))
145
146 /* reader/writer locks */
147
148 void
149 rw_init(krwlock_t *rw)
150 {
151
152 CTASSERT(sizeof(krwlock_t) >= sizeof(void *));
153
154 rumpuser_rw_init((struct rumpuser_rw **)rw);
155 }
156
157 void
158 rw_destroy(krwlock_t *rw)
159 {
160
161 rumpuser_rw_destroy(RUMPRW(rw));
162 }
163
164 void
165 rw_enter(krwlock_t *rw, const krw_t op)
166 {
167
168 rumpuser_rw_enter(RUMPRW(rw), op == RW_WRITER);
169 }
170
171 int
172 rw_tryenter(krwlock_t *rw, const krw_t op)
173 {
174
175 return rumpuser_rw_tryenter(RUMPRW(rw), op == RW_WRITER);
176 }
177
178 void
179 rw_exit(krwlock_t *rw)
180 {
181
182 rumpuser_rw_exit(RUMPRW(rw));
183 }
184
185 /* always fails */
186 int
187 rw_tryupgrade(krwlock_t *rw)
188 {
189
190 return 0;
191 }
192
193 int
194 rw_write_held(krwlock_t *rw)
195 {
196
197 return rumpuser_rw_wrheld(RUMPRW(rw));
198 }
199
200 int
201 rw_read_held(krwlock_t *rw)
202 {
203
204 return rumpuser_rw_rdheld(RUMPRW(rw));
205 }
206
207 int
208 rw_lock_held(krwlock_t *rw)
209 {
210
211 return rumpuser_rw_held(RUMPRW(rw));
212 }
213
214 /* curriculum vitaes */
215
216 #define RUMPCV(cv) (*(struct rumpuser_cv **)(cv))
217
218 void
219 cv_init(kcondvar_t *cv, const char *msg)
220 {
221
222 CTASSERT(sizeof(kcondvar_t) >= sizeof(void *));
223
224 rumpuser_cv_init((struct rumpuser_cv **)cv);
225 }
226
227 void
228 cv_destroy(kcondvar_t *cv)
229 {
230
231 rumpuser_cv_destroy(RUMPCV(cv));
232 }
233
234 void
235 cv_wait(kcondvar_t *cv, kmutex_t *mtx)
236 {
237
238 if (rump_threads == 0)
239 panic("cv_wait without threads");
240 rumpuser_cv_wait(RUMPCV(cv), RUMPMTX(mtx));
241 }
242
243 int
244 cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx)
245 {
246
247 rumpuser_cv_wait(RUMPCV(cv), RUMPMTX(mtx));
248 return 0;
249 }
250
251 int
252 cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int ticks)
253 {
254 struct timespec ts, tick;
255 extern int hz;
256
257 nanotime(&ts);
258 tick.tv_sec = ticks / hz;
259 tick.tv_nsec = (ticks % hz) * (1000000000/hz);
260 timespecadd(&ts, &tick, &ts);
261
262 if (ticks == 0) {
263 cv_wait(cv, mtx);
264 return 0;
265 } else {
266 return rumpuser_cv_timedwait(RUMPCV(cv), RUMPMTX(mtx), &ts);
267 }
268 }
269
270 int
271 cv_timedwait_sig(kcondvar_t *cv, kmutex_t *mtx, int ticks)
272 {
273
274 return cv_timedwait(cv, mtx, ticks);
275 }
276
277 void
278 cv_signal(kcondvar_t *cv)
279 {
280
281 rumpuser_cv_signal(RUMPCV(cv));
282 }
283
284 void
285 cv_broadcast(kcondvar_t *cv)
286 {
287
288 rumpuser_cv_broadcast(RUMPCV(cv));
289 }
290
291 bool
292 cv_has_waiters(kcondvar_t *cv)
293 {
294
295 return rumpuser_cv_has_waiters(RUMPCV(cv));
296 }
297
298 /*
299 * giant lock
300 */
301
302 static volatile int lockcnt;
303
304 bool
305 kernel_biglocked()
306 {
307
308 return rumpuser_mutex_held(rump_giantlock) && lockcnt > 0;
309 }
310
311 void
312 kernel_unlock_allbutone(int *countp)
313 {
314 int minusone = lockcnt-1;
315
316 KASSERT(kernel_biglocked());
317 if (minusone) {
318 _kernel_unlock(minusone, countp);
319 }
320 KASSERT(lockcnt == 1);
321 *countp = minusone;
322
323 /*
324 * We drop lockcnt to 0 since rumpuser doesn't know that the
325 * kernel biglock is being used as the interlock for cv in
326 * tsleep.
327 */
328 lockcnt = 0;
329 }
330
331 void
332 kernel_ununlock_allbutone(int nlocks)
333 {
334
335 KASSERT(rumpuser_mutex_held(rump_giantlock) && lockcnt == 0);
336 lockcnt = 1;
337 _kernel_lock(nlocks);
338 }
339
340 void
341 _kernel_lock(int nlocks)
342 {
343
344 while (nlocks--) {
345 if (!rumpuser_mutex_tryenter(rump_giantlock)) {
346 struct lwp *l = curlwp;
347
348 rump_unschedule_cpu(l);
349 rumpuser_mutex_enter_nowrap(rump_giantlock);
350 l->l_cpu = rump_schedule_cpu();
351 }
352 lockcnt++;
353 }
354 }
355
356 void
357 _kernel_unlock(int nlocks, int *countp)
358 {
359
360 if (!rumpuser_mutex_held(rump_giantlock)) {
361 KASSERT(nlocks == 0);
362 if (countp)
363 *countp = 0;
364 return;
365 }
366
367 if (countp)
368 *countp = lockcnt;
369 if (nlocks == 0)
370 nlocks = lockcnt;
371 if (nlocks == -1) {
372 KASSERT(lockcnt == 1);
373 nlocks = 1;
374 }
375 KASSERT(nlocks <= lockcnt);
376 while (nlocks--) {
377 lockcnt--;
378 rumpuser_mutex_exit(rump_giantlock);
379 }
380 }
381
382 void
383 rump_user_unschedule(int nlocks, int *countp)
384 {
385
386 _kernel_unlock(nlocks, countp);
387 rump_unschedule_cpu(curlwp);
388 }
389
390 void
391 rump_user_schedule(int nlocks)
392 {
393
394 curlwp->l_cpu = rump_schedule_cpu();
395
396 if (nlocks)
397 _kernel_lock(nlocks);
398 }
399
400 struct kmutexobj {
401 kmutex_t mo_lock;
402 u_int mo_refcnt;
403 };
404
405 kmutex_t *
406 mutex_obj_alloc(kmutex_type_t type, int ipl)
407 {
408 struct kmutexobj *mo;
409
410 mo = kmem_alloc(sizeof(*mo), KM_SLEEP);
411 mutex_init(&mo->mo_lock, type, ipl);
412 mo->mo_refcnt = 1;
413
414 return (kmutex_t *)mo;
415 }
416
417 void
418 mutex_obj_hold(kmutex_t *lock)
419 {
420 struct kmutexobj *mo = (struct kmutexobj *)lock;
421
422 atomic_inc_uint(&mo->mo_refcnt);
423 }
424
425 bool
426 mutex_obj_free(kmutex_t *lock)
427 {
428 struct kmutexobj *mo = (struct kmutexobj *)lock;
429
430 if (atomic_dec_uint_nv(&mo->mo_refcnt) > 0) {
431 return false;
432 }
433 mutex_destroy(&mo->mo_lock);
434 kmem_free(mo, sizeof(*mo));
435 return true;
436 }
437