locks.c revision 1.22 1 /* $NetBSD: locks.c,v 1.22 2008/12/13 15:34:48 pooka Exp $ */
2
3 /*-
4 * Copyright (c) 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * Copyright (c) 2007, 2008 Antti Kantee. All Rights Reserved.
31 *
32 * Development of this software was supported by the
33 * Finnish Cultural Foundation.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 *
44 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
45 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
46 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
47 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
48 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
49 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
50 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
51 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
53 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
54 * SUCH DAMAGE.
55 */
56
57 #include <sys/param.h>
58 #include <sys/mutex.h>
59 #include <sys/rwlock.h>
60 #include <sys/atomic.h>
61
62 #include <rump/rumpuser.h>
63
64 #include "rump_private.h"
65
66 /*
67 * We map locks to pthread routines. The difference between kernel
68 * and rumpuser routines is that while the kernel uses static
69 * storage, rumpuser allocates the object from the heap. This
70 * indirection is necessary because we don't know the size of
71 * pthread objects here. It is also benefitial, since we can
72 * be easily compatible with the kernel ABI because all kernel
73 * objects regardless of machine architecture are always at least
74 * the size of a pointer. The downside, of course, is a performance
75 * penalty.
76 */
77
78 #define RUMPMTX(mtx) (*(struct rumpuser_mtx **)(mtx))
79
80 void
81 mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
82 {
83
84 CTASSERT(sizeof(kmutex_t) >= sizeof(void *));
85
86 rumpuser_mutex_init((struct rumpuser_mtx **)mtx);
87 }
88
89 void
90 mutex_destroy(kmutex_t *mtx)
91 {
92
93 rumpuser_mutex_destroy(RUMPMTX(mtx));
94 }
95
96 void
97 mutex_enter(kmutex_t *mtx)
98 {
99
100 rumpuser_mutex_enter(RUMPMTX(mtx));
101 }
102
103 void
104 mutex_spin_enter(kmutex_t *mtx)
105 {
106
107 if (__predict_true(mtx != RUMP_LMUTEX_MAGIC))
108 mutex_enter(mtx);
109 }
110
111 int
112 mutex_tryenter(kmutex_t *mtx)
113 {
114
115 return rumpuser_mutex_tryenter(RUMPMTX(mtx));
116 }
117
118 void
119 mutex_exit(kmutex_t *mtx)
120 {
121
122 rumpuser_mutex_exit(RUMPMTX(mtx));
123 }
124
125 void
126 mutex_spin_exit(kmutex_t *mtx)
127 {
128
129 if (__predict_true(mtx != RUMP_LMUTEX_MAGIC))
130 mutex_exit(mtx);
131 }
132
133 int
134 mutex_owned(kmutex_t *mtx)
135 {
136
137 return rumpuser_mutex_held(RUMPMTX(mtx));
138 }
139
140 #define RUMPRW(rw) (*(struct rumpuser_rw **)(rw))
141
142 /* reader/writer locks */
143
144 void
145 rw_init(krwlock_t *rw)
146 {
147
148 CTASSERT(sizeof(krwlock_t) >= sizeof(void *));
149
150 rumpuser_rw_init((struct rumpuser_rw **)rw);
151 }
152
153 void
154 rw_destroy(krwlock_t *rw)
155 {
156
157 rumpuser_rw_destroy(RUMPRW(rw));
158 }
159
160 void
161 rw_enter(krwlock_t *rw, const krw_t op)
162 {
163
164 rumpuser_rw_enter(RUMPRW(rw), op == RW_WRITER);
165 }
166
167 int
168 rw_tryenter(krwlock_t *rw, const krw_t op)
169 {
170
171 return rumpuser_rw_tryenter(RUMPRW(rw), op == RW_WRITER);
172 }
173
174 void
175 rw_exit(krwlock_t *rw)
176 {
177
178 rumpuser_rw_exit(RUMPRW(rw));
179 }
180
181 /* always fails */
182 int
183 rw_tryupgrade(krwlock_t *rw)
184 {
185
186 return 0;
187 }
188
189 int
190 rw_write_held(krwlock_t *rw)
191 {
192
193 return rumpuser_rw_wrheld(RUMPRW(rw));
194 }
195
196 int
197 rw_read_held(krwlock_t *rw)
198 {
199
200 return rumpuser_rw_rdheld(RUMPRW(rw));
201 }
202
203 int
204 rw_lock_held(krwlock_t *rw)
205 {
206
207 return rumpuser_rw_held(RUMPRW(rw));
208 }
209
210 /* curriculum vitaes */
211
212 /* forgive me for I have sinned */
213 #define RUMPCV(a) ((struct rumpuser_cv *)(__UNCONST((a)->cv_wmesg)))
214
215 void
216 cv_init(kcondvar_t *cv, const char *msg)
217 {
218
219 rumpuser_cv_init((struct rumpuser_cv **)__UNCONST(&cv->cv_wmesg));
220 }
221
222 void
223 cv_destroy(kcondvar_t *cv)
224 {
225
226 rumpuser_cv_destroy(RUMPCV(cv));
227 }
228
229 void
230 cv_wait(kcondvar_t *cv, kmutex_t *mtx)
231 {
232
233 rumpuser_cv_wait(RUMPCV(cv), RUMPMTX(mtx));
234 }
235
236 int
237 cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx)
238 {
239
240 rumpuser_cv_wait(RUMPCV(cv), RUMPMTX(mtx));
241 return 0;
242 }
243
244 int
245 cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int ticks)
246 {
247 #ifdef DIAGNOSTIC
248 extern int hz;
249 #endif
250
251 if (ticks == 0) {
252 cv_wait(cv, mtx);
253 return 0;
254 } else {
255 KASSERT(hz == 100);
256 return rumpuser_cv_timedwait(RUMPCV(cv), RUMPMTX(mtx), ticks);
257 }
258 }
259
260 int
261 cv_timedwait_sig(kcondvar_t *cv, kmutex_t *mtx, int ticks)
262 {
263
264 return cv_timedwait(cv, mtx, ticks);
265 }
266
267 void
268 cv_signal(kcondvar_t *cv)
269 {
270
271 rumpuser_cv_signal(RUMPCV(cv));
272 }
273
274 void
275 cv_broadcast(kcondvar_t *cv)
276 {
277
278 rumpuser_cv_broadcast(RUMPCV(cv));
279 }
280
281 bool
282 cv_has_waiters(kcondvar_t *cv)
283 {
284
285 return rumpuser_cv_has_waiters(RUMPCV(cv));
286 }
287
288 /*
289 * giant lock
290 */
291
292 static volatile int lockcnt;
293 void
294 _kernel_lock(int nlocks)
295 {
296
297 while (nlocks--) {
298 rumpuser_mutex_enter(rump_giantlock);
299 lockcnt++;
300 }
301 }
302
303 void
304 _kernel_unlock(int nlocks, int *countp)
305 {
306
307 if (!rumpuser_mutex_held(rump_giantlock)) {
308 KASSERT(nlocks == 0);
309 if (countp)
310 *countp = 0;
311 return;
312 }
313
314 if (countp)
315 *countp = lockcnt;
316 if (nlocks == 0)
317 nlocks = lockcnt;
318 if (nlocks == -1) {
319 KASSERT(lockcnt == 1);
320 nlocks = 1;
321 }
322 KASSERT(nlocks <= lockcnt);
323 while (nlocks--) {
324 lockcnt--;
325 rumpuser_mutex_exit(rump_giantlock);
326 }
327 }
328
329 struct kmutexobj {
330 kmutex_t mo_lock;
331 u_int mo_refcnt;
332 };
333
334 kmutex_t *
335 mutex_obj_alloc(kmutex_type_t type, int ipl)
336 {
337 struct kmutexobj *mo;
338
339 mo = kmem_alloc(sizeof(*mo), KM_SLEEP);
340 mutex_init(&mo->mo_lock, type, ipl);
341 mo->mo_refcnt = 1;
342
343 return (kmutex_t *)mo;
344 }
345
346 void
347 mutex_obj_hold(kmutex_t *lock)
348 {
349 struct kmutexobj *mo = (struct kmutexobj *)lock;
350
351 atomic_inc_uint(&mo->mo_refcnt);
352 }
353
354 bool
355 mutex_obj_free(kmutex_t *lock)
356 {
357 struct kmutexobj *mo = (struct kmutexobj *)lock;
358
359 if (atomic_dec_uint_nv(&mo->mo_refcnt) > 0) {
360 return false;
361 }
362 mutex_destroy(&mo->mo_lock);
363 kmem_free(mo, sizeof(*mo));
364 return true;
365 }
366