locks.c revision 1.34 1 /* $NetBSD: locks.c,v 1.34 2009/11/11 16:46:50 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2007, 2008 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by the
7 * Finnish Cultural Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
19 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: locks.c,v 1.34 2009/11/11 16:46:50 pooka Exp $");
33
34 #include <sys/param.h>
35 #include <sys/kmem.h>
36 #include <sys/mutex.h>
37 #include <sys/rwlock.h>
38
39 #include <rump/rumpuser.h>
40
41 #include "rump_private.h"
42
43 /*
44 * We map locks to pthread routines. The difference between kernel
45 * and rumpuser routines is that while the kernel uses static
46 * storage, rumpuser allocates the object from the heap. This
47 * indirection is necessary because we don't know the size of
48 * pthread objects here. It is also benefitial, since we can
49 * be easily compatible with the kernel ABI because all kernel
50 * objects regardless of machine architecture are always at least
51 * the size of a pointer. The downside, of course, is a performance
52 * penalty.
53 */
54
55 #define RUMPMTX(mtx) (*(struct rumpuser_mtx **)(mtx))
56
57 void
58 mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
59 {
60
61 CTASSERT(sizeof(kmutex_t) >= sizeof(void *));
62
63 rumpuser_mutex_init((struct rumpuser_mtx **)mtx);
64 }
65
66 void
67 mutex_destroy(kmutex_t *mtx)
68 {
69
70 rumpuser_mutex_destroy(RUMPMTX(mtx));
71 }
72
73 void
74 mutex_enter(kmutex_t *mtx)
75 {
76
77 rumpuser_mutex_enter(RUMPMTX(mtx));
78 }
79
80 void
81 mutex_spin_enter(kmutex_t *mtx)
82 {
83
84 if (__predict_true(mtx != RUMP_LMUTEX_MAGIC))
85 mutex_enter(mtx);
86 }
87
88 int
89 mutex_tryenter(kmutex_t *mtx)
90 {
91
92 return rumpuser_mutex_tryenter(RUMPMTX(mtx));
93 }
94
95 void
96 mutex_exit(kmutex_t *mtx)
97 {
98
99 rumpuser_mutex_exit(RUMPMTX(mtx));
100 }
101
102 void
103 mutex_spin_exit(kmutex_t *mtx)
104 {
105
106 if (__predict_true(mtx != RUMP_LMUTEX_MAGIC))
107 mutex_exit(mtx);
108 }
109
110 int
111 mutex_owned(kmutex_t *mtx)
112 {
113
114 return rumpuser_mutex_held(RUMPMTX(mtx));
115 }
116
117 #define RUMPRW(rw) (*(struct rumpuser_rw **)(rw))
118
119 /* reader/writer locks */
120
121 void
122 rw_init(krwlock_t *rw)
123 {
124
125 CTASSERT(sizeof(krwlock_t) >= sizeof(void *));
126
127 rumpuser_rw_init((struct rumpuser_rw **)rw);
128 }
129
130 void
131 rw_destroy(krwlock_t *rw)
132 {
133
134 rumpuser_rw_destroy(RUMPRW(rw));
135 }
136
137 void
138 rw_enter(krwlock_t *rw, const krw_t op)
139 {
140
141 rumpuser_rw_enter(RUMPRW(rw), op == RW_WRITER);
142 }
143
144 int
145 rw_tryenter(krwlock_t *rw, const krw_t op)
146 {
147
148 return rumpuser_rw_tryenter(RUMPRW(rw), op == RW_WRITER);
149 }
150
151 void
152 rw_exit(krwlock_t *rw)
153 {
154
155 rumpuser_rw_exit(RUMPRW(rw));
156 }
157
158 /* always fails */
159 int
160 rw_tryupgrade(krwlock_t *rw)
161 {
162
163 return 0;
164 }
165
166 int
167 rw_write_held(krwlock_t *rw)
168 {
169
170 return rumpuser_rw_wrheld(RUMPRW(rw));
171 }
172
173 int
174 rw_read_held(krwlock_t *rw)
175 {
176
177 return rumpuser_rw_rdheld(RUMPRW(rw));
178 }
179
180 int
181 rw_lock_held(krwlock_t *rw)
182 {
183
184 return rumpuser_rw_held(RUMPRW(rw));
185 }
186
187 /* curriculum vitaes */
188
189 #define RUMPCV(cv) (*(struct rumpuser_cv **)(cv))
190
191 void
192 cv_init(kcondvar_t *cv, const char *msg)
193 {
194
195 CTASSERT(sizeof(kcondvar_t) >= sizeof(void *));
196
197 rumpuser_cv_init((struct rumpuser_cv **)cv);
198 }
199
200 void
201 cv_destroy(kcondvar_t *cv)
202 {
203
204 rumpuser_cv_destroy(RUMPCV(cv));
205 }
206
207 void
208 cv_wait(kcondvar_t *cv, kmutex_t *mtx)
209 {
210
211 if (rump_threads == 0)
212 panic("cv_wait without threads");
213 rumpuser_cv_wait(RUMPCV(cv), RUMPMTX(mtx));
214 }
215
216 int
217 cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx)
218 {
219
220 rumpuser_cv_wait(RUMPCV(cv), RUMPMTX(mtx));
221 return 0;
222 }
223
224 int
225 cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int ticks)
226 {
227 struct timespec ts, tick;
228 extern int hz;
229
230 nanotime(&ts);
231 tick.tv_sec = ticks / hz;
232 tick.tv_nsec = (ticks % hz) * (1000000000/hz);
233 timespecadd(&ts, &tick, &ts);
234
235 if (ticks == 0) {
236 cv_wait(cv, mtx);
237 return 0;
238 } else {
239 if (rumpuser_cv_timedwait(RUMPCV(cv), RUMPMTX(mtx),
240 ts.tv_sec, ts.tv_nsec))
241 return EWOULDBLOCK;
242 else
243 return 0;
244 }
245 }
246
247 int
248 cv_timedwait_sig(kcondvar_t *cv, kmutex_t *mtx, int ticks)
249 {
250
251 return cv_timedwait(cv, mtx, ticks);
252 }
253
254 void
255 cv_signal(kcondvar_t *cv)
256 {
257
258 rumpuser_cv_signal(RUMPCV(cv));
259 }
260
261 void
262 cv_broadcast(kcondvar_t *cv)
263 {
264
265 rumpuser_cv_broadcast(RUMPCV(cv));
266 }
267
268 bool
269 cv_has_waiters(kcondvar_t *cv)
270 {
271
272 return rumpuser_cv_has_waiters(RUMPCV(cv));
273 }
274
275 /*
276 * giant lock
277 */
278
279 static volatile int lockcnt;
280
281 bool
282 kernel_biglocked()
283 {
284
285 return rumpuser_mutex_held(rump_giantlock) && lockcnt > 0;
286 }
287
288 void
289 kernel_unlock_allbutone(int *countp)
290 {
291 int minusone = lockcnt-1;
292
293 KASSERT(kernel_biglocked());
294 if (minusone) {
295 _kernel_unlock(minusone, countp);
296 }
297 KASSERT(lockcnt == 1);
298 *countp = minusone;
299
300 /*
301 * We drop lockcnt to 0 since rumpuser doesn't know that the
302 * kernel biglock is being used as the interlock for cv in
303 * tsleep.
304 */
305 lockcnt = 0;
306 }
307
308 void
309 kernel_ununlock_allbutone(int nlocks)
310 {
311
312 KASSERT(rumpuser_mutex_held(rump_giantlock) && lockcnt == 0);
313 lockcnt = 1;
314 _kernel_lock(nlocks);
315 }
316
317 void
318 _kernel_lock(int nlocks)
319 {
320
321 while (nlocks--) {
322 if (!rumpuser_mutex_tryenter(rump_giantlock)) {
323 struct lwp *l = curlwp;
324
325 rump_unschedule_cpu(l);
326 rumpuser_mutex_enter_nowrap(rump_giantlock);
327 rump_schedule_cpu(l);
328 }
329 lockcnt++;
330 }
331 }
332
333 void
334 _kernel_unlock(int nlocks, int *countp)
335 {
336
337 if (!rumpuser_mutex_held(rump_giantlock)) {
338 KASSERT(nlocks == 0);
339 if (countp)
340 *countp = 0;
341 return;
342 }
343
344 if (countp)
345 *countp = lockcnt;
346 if (nlocks == 0)
347 nlocks = lockcnt;
348 if (nlocks == -1) {
349 KASSERT(lockcnt == 1);
350 nlocks = 1;
351 }
352 KASSERT(nlocks <= lockcnt);
353 while (nlocks--) {
354 lockcnt--;
355 rumpuser_mutex_exit(rump_giantlock);
356 }
357 }
358
359 void
360 rump_user_unschedule(int nlocks, int *countp)
361 {
362
363 _kernel_unlock(nlocks, countp);
364 rump_unschedule_cpu(curlwp);
365 }
366
367 void
368 rump_user_schedule(int nlocks)
369 {
370
371 rump_schedule_cpu(curlwp);
372
373 if (nlocks)
374 _kernel_lock(nlocks);
375 }
376