rumpuser_pth.c revision 1.7 1 /* $NetBSD: rumpuser_pth.c,v 1.7 2011/02/05 13:51:56 yamt Exp $ */
2
3 /*
4 * Copyright (c) 2007-2010 Antti Kantee. All Rights Reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 #if !defined(lint)
30 __RCSID("$NetBSD: rumpuser_pth.c,v 1.7 2011/02/05 13:51:56 yamt Exp $");
31 #endif /* !lint */
32
33 #ifdef __linux__
34 #define _XOPEN_SOURCE 500
35 #define _BSD_SOURCE
36 #define _FILE_OFFSET_BITS 64
37 #endif
38
39 #include <assert.h>
40 #include <errno.h>
41 #include <pthread.h>
42 #include <stdlib.h>
43 #include <stdio.h>
44 #include <string.h>
45 #include <stdint.h>
46 #include <unistd.h>
47
48 #include <rump/rumpuser.h>
49
50 #include "rumpuser_int.h"
51
52 static pthread_key_t curlwpkey;
53
54 #define NOFAIL(a) do {if (!(a)) abort();} while (/*CONSTCOND*/0)
55 #define NOFAIL_ERRNO(a) \
56 do { \
57 int fail_rv = (a); \
58 if (fail_rv) { \
59 printf("panic: rumpuser fatal failure %d (%s)\n", \
60 fail_rv, strerror(fail_rv)); \
61 abort(); \
62 } \
63 } while (/*CONSTCOND*/0)
64
65 struct rumpuser_mtx {
66 pthread_mutex_t pthmtx;
67 struct lwp *owner;
68 int iskmutex;
69 };
70
71 #define RURW_AMWRITER(rw) (rw->writer == rumpuser_get_curlwp() \
72 && rw->readers == -1)
73 #define RURW_HASREAD(rw) (rw->readers > 0)
74
75 #define RURW_SETWRITE(rw) \
76 do { \
77 assert(rw->readers == 0); \
78 rw->writer = rumpuser_get_curlwp(); \
79 rw->readers = -1; \
80 } while (/*CONSTCOND*/0)
81 #define RURW_CLRWRITE(rw) \
82 do { \
83 assert(rw->readers == -1 && RURW_AMWRITER(rw)); \
84 rw->readers = 0; \
85 } while (/*CONSTCOND*/0)
86 #define RURW_INCREAD(rw) \
87 do { \
88 pthread_spin_lock(&rw->spin); \
89 assert(rw->readers >= 0); \
90 ++(rw)->readers; \
91 pthread_spin_unlock(&rw->spin); \
92 } while (/*CONSTCOND*/0)
93 #define RURW_DECREAD(rw) \
94 do { \
95 pthread_spin_lock(&rw->spin); \
96 assert(rw->readers > 0); \
97 --(rw)->readers; \
98 pthread_spin_unlock(&rw->spin); \
99 } while (/*CONSTCOND*/0)
100
101 struct rumpuser_rw {
102 pthread_rwlock_t pthrw;
103 pthread_spinlock_t spin;
104 int readers;
105 struct lwp *writer;
106 };
107
108 struct rumpuser_cv {
109 pthread_cond_t pthcv;
110 int nwaiters;
111 };
112
113 struct rumpuser_mtx rumpuser_aio_mtx;
114 struct rumpuser_cv rumpuser_aio_cv;
115 int rumpuser_aio_head, rumpuser_aio_tail;
116 struct rumpuser_aio rumpuser_aios[N_AIOS];
117
118 kernel_lockfn rumpuser__klock;
119 kernel_unlockfn rumpuser__kunlock;
120 int rumpuser__wantthreads;
121
122 void
123 /*ARGSUSED*/
124 rumpuser_biothread(void *arg)
125 {
126 struct rumpuser_aio *rua;
127 rump_biodone_fn biodone = arg;
128 ssize_t rv;
129 int error, dummy;
130
131 /* unschedule from CPU. we reschedule before running the interrupt */
132 rumpuser__kunlock(0, &dummy, NULL);
133 assert(dummy == 0);
134
135 NOFAIL_ERRNO(pthread_mutex_lock(&rumpuser_aio_mtx.pthmtx));
136 for (;;) {
137 while (rumpuser_aio_head == rumpuser_aio_tail) {
138 NOFAIL_ERRNO(pthread_cond_wait(&rumpuser_aio_cv.pthcv,
139 &rumpuser_aio_mtx.pthmtx));
140 }
141
142 rua = &rumpuser_aios[rumpuser_aio_tail];
143 assert(rua->rua_bp != NULL);
144 pthread_mutex_unlock(&rumpuser_aio_mtx.pthmtx);
145
146 if (rua->rua_op & RUA_OP_READ) {
147 error = 0;
148 rv = pread(rua->rua_fd, rua->rua_data,
149 rua->rua_dlen, rua->rua_off);
150 if (rv < 0) {
151 rv = 0;
152 error = errno;
153 }
154 } else {
155 error = 0;
156 rv = pwrite(rua->rua_fd, rua->rua_data,
157 rua->rua_dlen, rua->rua_off);
158 if (rv < 0) {
159 rv = 0;
160 error = errno;
161 } else if (rua->rua_op & RUA_OP_SYNC) {
162 #ifdef __NetBSD__
163 fsync_range(rua->rua_fd, FDATASYNC,
164 rua->rua_off, rua->rua_dlen);
165 #else
166 fsync(rua->rua_fd);
167 #endif
168 }
169 }
170 rumpuser__klock(0, NULL);
171 biodone(rua->rua_bp, (size_t)rv, error);
172 rumpuser__kunlock(0, &dummy, NULL);
173
174 rua->rua_bp = NULL;
175
176 NOFAIL_ERRNO(pthread_mutex_lock(&rumpuser_aio_mtx.pthmtx));
177 rumpuser_aio_tail = (rumpuser_aio_tail+1) % N_AIOS;
178 pthread_cond_signal(&rumpuser_aio_cv.pthcv);
179 }
180
181 /*NOTREACHED*/
182 fprintf(stderr, "error: rumpuser_biothread reached unreachable\n");
183 abort();
184 }
185
186 void
187 rumpuser_thrinit(kernel_lockfn lockfn, kernel_unlockfn unlockfn, int threads)
188 {
189
190 pthread_mutex_init(&rumpuser_aio_mtx.pthmtx, NULL);
191 pthread_cond_init(&rumpuser_aio_cv.pthcv, NULL);
192
193 pthread_key_create(&curlwpkey, NULL);
194
195 rumpuser__klock = lockfn;
196 rumpuser__kunlock = unlockfn;
197 rumpuser__wantthreads = threads;
198 }
199
200 #if 0
201 void
202 rumpuser__thrdestroy(void)
203 {
204
205 pthread_key_delete(curlwpkey);
206 }
207 #endif
208
209 int
210 rumpuser_thread_create(void *(*f)(void *), void *arg, const char *thrname,
211 int joinable, void **ptcookie)
212 {
213 pthread_t ptid;
214 pthread_t *ptidp;
215 pthread_attr_t pattr;
216 int rv;
217
218 if ((rv = pthread_attr_init(&pattr)) != 0)
219 return rv;
220
221 if (joinable) {
222 NOFAIL(ptidp = malloc(sizeof(*ptidp)));
223 pthread_attr_setdetachstate(&pattr, PTHREAD_CREATE_JOINABLE);
224 } else {
225 ptidp = &ptid;
226 pthread_attr_setdetachstate(&pattr, PTHREAD_CREATE_DETACHED);
227 }
228
229 rv = pthread_create(ptidp, &pattr, f, arg);
230 #ifdef __NetBSD__
231 if (rv == 0 && thrname)
232 pthread_setname_np(ptid, thrname, NULL);
233 #endif
234
235 if (joinable) {
236 assert(ptcookie);
237 *ptcookie = ptidp;
238 }
239
240 pthread_attr_destroy(&pattr);
241
242 return rv;
243 }
244
245 __dead void
246 rumpuser_thread_exit(void)
247 {
248
249 pthread_exit(NULL);
250 }
251
252 int
253 rumpuser_thread_join(void *ptcookie)
254 {
255 pthread_t *pt = ptcookie;
256 int rv;
257
258 KLOCK_WRAP((rv = pthread_join(*pt, NULL)));
259 if (rv == 0)
260 free(pt);
261
262 return rv;
263 }
264
265 void
266 rumpuser_mutex_init(struct rumpuser_mtx **mtx)
267 {
268 pthread_mutexattr_t att;
269
270 NOFAIL(*mtx = malloc(sizeof(struct rumpuser_mtx)));
271
272 pthread_mutexattr_init(&att);
273 pthread_mutexattr_settype(&att, PTHREAD_MUTEX_ERRORCHECK);
274 NOFAIL_ERRNO(pthread_mutex_init(&((*mtx)->pthmtx), &att));
275 pthread_mutexattr_destroy(&att);
276
277 (*mtx)->owner = NULL;
278 (*mtx)->iskmutex = 0;
279 }
280
281 void
282 rumpuser_mutex_init_kmutex(struct rumpuser_mtx **mtx)
283 {
284
285 rumpuser_mutex_init(mtx);
286 (*mtx)->iskmutex = 1;
287 }
288
289 static void
290 mtxenter(struct rumpuser_mtx *mtx)
291 {
292
293 if (!mtx->iskmutex)
294 return;
295
296 assert(mtx->owner == NULL);
297 mtx->owner = rumpuser_get_curlwp();
298 }
299
300 static void
301 mtxexit(struct rumpuser_mtx *mtx)
302 {
303
304 if (!mtx->iskmutex)
305 return;
306
307 assert(mtx->owner != NULL);
308 mtx->owner = NULL;
309 }
310
311 void
312 rumpuser_mutex_enter(struct rumpuser_mtx *mtx)
313 {
314
315 if (pthread_mutex_trylock(&mtx->pthmtx) != 0)
316 KLOCK_WRAP(NOFAIL_ERRNO(pthread_mutex_lock(&mtx->pthmtx)));
317 mtxenter(mtx);
318 }
319
320 void
321 rumpuser_mutex_enter_nowrap(struct rumpuser_mtx *mtx)
322 {
323
324 NOFAIL_ERRNO(pthread_mutex_lock(&mtx->pthmtx));
325 mtxenter(mtx);
326 }
327
328 int
329 rumpuser_mutex_tryenter(struct rumpuser_mtx *mtx)
330 {
331 int rv;
332
333 rv = pthread_mutex_trylock(&mtx->pthmtx);
334 if (rv == 0) {
335 mtxenter(mtx);
336 }
337
338 return rv == 0;
339 }
340
341 void
342 rumpuser_mutex_exit(struct rumpuser_mtx *mtx)
343 {
344
345 mtxexit(mtx);
346 NOFAIL_ERRNO(pthread_mutex_unlock(&mtx->pthmtx));
347 }
348
349 void
350 rumpuser_mutex_destroy(struct rumpuser_mtx *mtx)
351 {
352
353 NOFAIL_ERRNO(pthread_mutex_destroy(&mtx->pthmtx));
354 free(mtx);
355 }
356
357 struct lwp *
358 rumpuser_mutex_owner(struct rumpuser_mtx *mtx)
359 {
360
361 if (__predict_false(!mtx->iskmutex)) {
362 printf("panic: rumpuser_mutex_held unsupported on non-kmtx\n");
363 abort();
364 }
365
366 return mtx->owner;
367 }
368
369 void
370 rumpuser_rw_init(struct rumpuser_rw **rw)
371 {
372
373 NOFAIL(*rw = malloc(sizeof(struct rumpuser_rw)));
374 NOFAIL_ERRNO(pthread_rwlock_init(&((*rw)->pthrw), NULL));
375 NOFAIL_ERRNO(pthread_spin_init(&((*rw)->spin), PTHREAD_PROCESS_SHARED));
376 (*rw)->readers = 0;
377 (*rw)->writer = NULL;
378 }
379
380 void
381 rumpuser_rw_enter(struct rumpuser_rw *rw, int iswrite)
382 {
383
384 if (iswrite) {
385 if (pthread_rwlock_trywrlock(&rw->pthrw) != 0)
386 KLOCK_WRAP(NOFAIL_ERRNO(
387 pthread_rwlock_wrlock(&rw->pthrw)));
388 RURW_SETWRITE(rw);
389 } else {
390 if (pthread_rwlock_tryrdlock(&rw->pthrw) != 0)
391 KLOCK_WRAP(NOFAIL_ERRNO(
392 pthread_rwlock_rdlock(&rw->pthrw)));
393 RURW_INCREAD(rw);
394 }
395 }
396
397 int
398 rumpuser_rw_tryenter(struct rumpuser_rw *rw, int iswrite)
399 {
400 int rv;
401
402 if (iswrite) {
403 rv = pthread_rwlock_trywrlock(&rw->pthrw);
404 if (rv == 0)
405 RURW_SETWRITE(rw);
406 } else {
407 rv = pthread_rwlock_tryrdlock(&rw->pthrw);
408 if (rv == 0)
409 RURW_INCREAD(rw);
410 }
411
412 return rv == 0;
413 }
414
415 void
416 rumpuser_rw_exit(struct rumpuser_rw *rw)
417 {
418
419 if (RURW_HASREAD(rw))
420 RURW_DECREAD(rw);
421 else
422 RURW_CLRWRITE(rw);
423 NOFAIL_ERRNO(pthread_rwlock_unlock(&rw->pthrw));
424 }
425
426 void
427 rumpuser_rw_destroy(struct rumpuser_rw *rw)
428 {
429
430 NOFAIL_ERRNO(pthread_rwlock_destroy(&rw->pthrw));
431 NOFAIL_ERRNO(pthread_spin_destroy(&rw->spin));
432 free(rw);
433 }
434
435 int
436 rumpuser_rw_held(struct rumpuser_rw *rw)
437 {
438
439 return rw->readers != 0;
440 }
441
442 int
443 rumpuser_rw_rdheld(struct rumpuser_rw *rw)
444 {
445
446 return RURW_HASREAD(rw);
447 }
448
449 int
450 rumpuser_rw_wrheld(struct rumpuser_rw *rw)
451 {
452
453 return RURW_AMWRITER(rw);
454 }
455
456 void
457 rumpuser_cv_init(struct rumpuser_cv **cv)
458 {
459
460 NOFAIL(*cv = malloc(sizeof(struct rumpuser_cv)));
461 NOFAIL_ERRNO(pthread_cond_init(&((*cv)->pthcv), NULL));
462 (*cv)->nwaiters = 0;
463 }
464
465 void
466 rumpuser_cv_destroy(struct rumpuser_cv *cv)
467 {
468
469 NOFAIL_ERRNO(pthread_cond_destroy(&cv->pthcv));
470 free(cv);
471 }
472
473 void
474 rumpuser_cv_wait(struct rumpuser_cv *cv, struct rumpuser_mtx *mtx)
475 {
476 int nlocks;
477
478 cv->nwaiters++;
479 rumpuser__kunlock(0, &nlocks, mtx);
480 mtxexit(mtx);
481 NOFAIL_ERRNO(pthread_cond_wait(&cv->pthcv, &mtx->pthmtx));
482 mtxenter(mtx);
483 rumpuser__klock(nlocks, mtx);
484 cv->nwaiters--;
485 }
486
487 void
488 rumpuser_cv_wait_nowrap(struct rumpuser_cv *cv, struct rumpuser_mtx *mtx)
489 {
490
491 cv->nwaiters++;
492 mtxexit(mtx);
493 NOFAIL_ERRNO(pthread_cond_wait(&cv->pthcv, &mtx->pthmtx));
494 mtxenter(mtx);
495 cv->nwaiters--;
496 }
497
498 int
499 rumpuser_cv_timedwait(struct rumpuser_cv *cv, struct rumpuser_mtx *mtx,
500 int64_t sec, int64_t nsec)
501 {
502 struct timespec ts;
503 int rv, nlocks;
504
505 /* LINTED */
506 ts.tv_sec = sec; ts.tv_nsec = nsec;
507
508 cv->nwaiters++;
509 rumpuser__kunlock(0, &nlocks, mtx);
510 mtxexit(mtx);
511 rv = pthread_cond_timedwait(&cv->pthcv, &mtx->pthmtx, &ts);
512 mtxenter(mtx);
513 rumpuser__klock(nlocks, mtx);
514 cv->nwaiters--;
515 if (rv != 0 && rv != ETIMEDOUT)
516 abort();
517
518 return rv == ETIMEDOUT;
519 }
520
521 void
522 rumpuser_cv_signal(struct rumpuser_cv *cv)
523 {
524
525 NOFAIL_ERRNO(pthread_cond_signal(&cv->pthcv));
526 }
527
528 void
529 rumpuser_cv_broadcast(struct rumpuser_cv *cv)
530 {
531
532 NOFAIL_ERRNO(pthread_cond_broadcast(&cv->pthcv));
533 }
534
535 int
536 rumpuser_cv_has_waiters(struct rumpuser_cv *cv)
537 {
538
539 return cv->nwaiters;
540 }
541
542 /*
543 * curlwp
544 */
545
546 void
547 rumpuser_set_curlwp(struct lwp *l)
548 {
549
550 assert(pthread_getspecific(curlwpkey) == NULL || l == NULL);
551 pthread_setspecific(curlwpkey, l);
552 }
553
554 struct lwp *
555 rumpuser_get_curlwp(void)
556 {
557
558 return pthread_getspecific(curlwpkey);
559 }
560