locks.c revision 1.14.4.3 1 /* $NetBSD: locks.c,v 1.14.4.3 2010/03/11 15:04:38 yamt Exp $ */
2
3 /*
4 * Copyright (c) 2007, 2008 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by the
7 * Finnish Cultural Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
19 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: locks.c,v 1.14.4.3 2010/03/11 15:04:38 yamt Exp $");
33
34 #include <sys/param.h>
35 #include <sys/kmem.h>
36 #include <sys/mutex.h>
37 #include <sys/rwlock.h>
38
39 #include <rump/rumpuser.h>
40
41 #include "rump_private.h"
42
43 /*
44 * We map locks to pthread routines. The difference between kernel
45 * and rumpuser routines is that while the kernel uses static
46 * storage, rumpuser allocates the object from the heap. This
47 * indirection is necessary because we don't know the size of
48 * pthread objects here. It is also beneficial, since we can
49 * be easily compatible with the kernel ABI because all kernel
50 * objects regardless of machine architecture are always at least
51 * the size of a pointer. The downside, of course, is a performance
52 * penalty.
53 */
54
55 #define RUMPMTX(mtx) (*(struct rumpuser_mtx **)(mtx))
56
57 void
58 mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
59 {
60
61 CTASSERT(sizeof(kmutex_t) >= sizeof(void *));
62
63 rumpuser_mutex_init((struct rumpuser_mtx **)mtx);
64 }
65
66 void
67 mutex_destroy(kmutex_t *mtx)
68 {
69
70 rumpuser_mutex_destroy(RUMPMTX(mtx));
71 }
72
73 void
74 mutex_enter(kmutex_t *mtx)
75 {
76
77 rumpuser_mutex_enter(RUMPMTX(mtx));
78 }
79
80 void
81 mutex_spin_enter(kmutex_t *mtx)
82 {
83
84 mutex_enter(mtx);
85 }
86
87 int
88 mutex_tryenter(kmutex_t *mtx)
89 {
90
91 return rumpuser_mutex_tryenter(RUMPMTX(mtx));
92 }
93
94 void
95 mutex_exit(kmutex_t *mtx)
96 {
97
98 rumpuser_mutex_exit(RUMPMTX(mtx));
99 }
100
101 void
102 mutex_spin_exit(kmutex_t *mtx)
103 {
104
105 mutex_exit(mtx);
106 }
107
108 int
109 mutex_owned(kmutex_t *mtx)
110 {
111
112 return rumpuser_mutex_held(RUMPMTX(mtx));
113 }
114
115 #define RUMPRW(rw) (*(struct rumpuser_rw **)(rw))
116
117 /* reader/writer locks */
118
119 void
120 rw_init(krwlock_t *rw)
121 {
122
123 CTASSERT(sizeof(krwlock_t) >= sizeof(void *));
124
125 rumpuser_rw_init((struct rumpuser_rw **)rw);
126 }
127
128 void
129 rw_destroy(krwlock_t *rw)
130 {
131
132 rumpuser_rw_destroy(RUMPRW(rw));
133 }
134
135 void
136 rw_enter(krwlock_t *rw, const krw_t op)
137 {
138
139 rumpuser_rw_enter(RUMPRW(rw), op == RW_WRITER);
140 }
141
142 int
143 rw_tryenter(krwlock_t *rw, const krw_t op)
144 {
145
146 return rumpuser_rw_tryenter(RUMPRW(rw), op == RW_WRITER);
147 }
148
149 void
150 rw_exit(krwlock_t *rw)
151 {
152
153 rumpuser_rw_exit(RUMPRW(rw));
154 }
155
156 /* always fails */
157 int
158 rw_tryupgrade(krwlock_t *rw)
159 {
160
161 return 0;
162 }
163
164 int
165 rw_write_held(krwlock_t *rw)
166 {
167
168 return rumpuser_rw_wrheld(RUMPRW(rw));
169 }
170
171 int
172 rw_read_held(krwlock_t *rw)
173 {
174
175 return rumpuser_rw_rdheld(RUMPRW(rw));
176 }
177
178 int
179 rw_lock_held(krwlock_t *rw)
180 {
181
182 return rumpuser_rw_held(RUMPRW(rw));
183 }
184
185 /* curriculum vitaes */
186
187 #define RUMPCV(cv) (*(struct rumpuser_cv **)(cv))
188
189 void
190 cv_init(kcondvar_t *cv, const char *msg)
191 {
192
193 CTASSERT(sizeof(kcondvar_t) >= sizeof(void *));
194
195 rumpuser_cv_init((struct rumpuser_cv **)cv);
196 }
197
198 void
199 cv_destroy(kcondvar_t *cv)
200 {
201
202 rumpuser_cv_destroy(RUMPCV(cv));
203 }
204
205 void
206 cv_wait(kcondvar_t *cv, kmutex_t *mtx)
207 {
208
209 if (rump_threads == 0)
210 panic("cv_wait without threads");
211 rumpuser_cv_wait(RUMPCV(cv), RUMPMTX(mtx));
212 }
213
214 int
215 cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx)
216 {
217
218 rumpuser_cv_wait(RUMPCV(cv), RUMPMTX(mtx));
219 return 0;
220 }
221
222 int
223 cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int ticks)
224 {
225 struct timespec ts, tick;
226 extern int hz;
227
228 nanotime(&ts);
229 tick.tv_sec = ticks / hz;
230 tick.tv_nsec = (ticks % hz) * (1000000000/hz);
231 timespecadd(&ts, &tick, &ts);
232
233 if (ticks == 0) {
234 cv_wait(cv, mtx);
235 return 0;
236 } else {
237 if (rumpuser_cv_timedwait(RUMPCV(cv), RUMPMTX(mtx),
238 ts.tv_sec, ts.tv_nsec))
239 return EWOULDBLOCK;
240 else
241 return 0;
242 }
243 }
244
245 int
246 cv_timedwait_sig(kcondvar_t *cv, kmutex_t *mtx, int ticks)
247 {
248
249 return cv_timedwait(cv, mtx, ticks);
250 }
251
252 void
253 cv_signal(kcondvar_t *cv)
254 {
255
256 rumpuser_cv_signal(RUMPCV(cv));
257 }
258
259 void
260 cv_broadcast(kcondvar_t *cv)
261 {
262
263 rumpuser_cv_broadcast(RUMPCV(cv));
264 }
265
266 bool
267 cv_has_waiters(kcondvar_t *cv)
268 {
269
270 return rumpuser_cv_has_waiters(RUMPCV(cv));
271 }
272
273 /* this is not much of an attempt, but ... */
274 bool
275 cv_is_valid(kcondvar_t *cv)
276 {
277
278 return RUMPCV(cv) != NULL;
279 }
280
281 /*
282 * giant lock
283 */
284
285 static volatile int lockcnt;
286
287 bool
288 kernel_biglocked()
289 {
290
291 return rumpuser_mutex_held(rump_giantlock) && lockcnt > 0;
292 }
293
294 void
295 kernel_unlock_allbutone(int *countp)
296 {
297 int minusone = lockcnt-1;
298
299 KASSERT(kernel_biglocked());
300 if (minusone) {
301 _kernel_unlock(minusone, countp);
302 }
303 KASSERT(lockcnt == 1);
304 *countp = minusone;
305
306 /*
307 * We drop lockcnt to 0 since rumpuser doesn't know that the
308 * kernel biglock is being used as the interlock for cv in
309 * tsleep.
310 */
311 lockcnt = 0;
312 }
313
314 void
315 kernel_ununlock_allbutone(int nlocks)
316 {
317
318 KASSERT(rumpuser_mutex_held(rump_giantlock) && lockcnt == 0);
319 lockcnt = 1;
320 _kernel_lock(nlocks);
321 }
322
323 void
324 _kernel_lock(int nlocks)
325 {
326
327 while (nlocks--) {
328 if (!rumpuser_mutex_tryenter(rump_giantlock)) {
329 struct lwp *l = curlwp;
330
331 rump_unschedule_cpu1(l);
332 rumpuser_mutex_enter_nowrap(rump_giantlock);
333 rump_schedule_cpu(l);
334 }
335 lockcnt++;
336 }
337 }
338
339 void
340 _kernel_unlock(int nlocks, int *countp)
341 {
342
343 if (!rumpuser_mutex_held(rump_giantlock)) {
344 KASSERT(nlocks == 0);
345 if (countp)
346 *countp = 0;
347 return;
348 }
349
350 if (countp)
351 *countp = lockcnt;
352 if (nlocks == 0)
353 nlocks = lockcnt;
354 if (nlocks == -1) {
355 KASSERT(lockcnt == 1);
356 nlocks = 1;
357 }
358 KASSERT(nlocks <= lockcnt);
359 while (nlocks--) {
360 lockcnt--;
361 rumpuser_mutex_exit(rump_giantlock);
362 }
363 }
364
365 void
366 rump_user_unschedule(int nlocks, int *countp)
367 {
368
369 _kernel_unlock(nlocks, countp);
370 /*
371 * XXX: technically we should unschedule_cpu1() here, but that
372 * requires rump_intr_enter/exit to be implemented.
373 */
374 rump_unschedule_cpu(curlwp);
375 }
376
377 void
378 rump_user_schedule(int nlocks)
379 {
380
381 rump_schedule_cpu(curlwp);
382
383 if (nlocks)
384 _kernel_lock(nlocks);
385 }
386