rumpuser_pth.c revision 1.23 1 /* $NetBSD: rumpuser_pth.c,v 1.23 2013/05/02 19:14:59 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2007-2010 Antti Kantee. All Rights Reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include "rumpuser_port.h"
29
30 #if !defined(lint)
31 __RCSID("$NetBSD: rumpuser_pth.c,v 1.23 2013/05/02 19:14:59 pooka Exp $");
32 #endif /* !lint */
33
34 #include <sys/queue.h>
35
36 #include <assert.h>
37 #include <errno.h>
38 #include <fcntl.h>
39 #include <pthread.h>
40 #include <stdlib.h>
41 #include <stdio.h>
42 #include <string.h>
43 #include <stdint.h>
44 #include <unistd.h>
45
46 #include <rump/rumpuser.h>
47
48 #include "rumpuser_int.h"
49
50 static pthread_key_t curlwpkey;
51
52 struct rumpuser_mtx {
53 pthread_mutex_t pthmtx;
54 struct lwp *owner;
55 int flags;
56 };
57
58 #define RURW_AMWRITER(rw) (rw->writer == rumpuser_curlwp() \
59 && rw->readers == -1)
60 #define RURW_HASREAD(rw) (rw->readers > 0)
61
62 #define RURW_SETWRITE(rw) \
63 do { \
64 assert(rw->readers == 0); \
65 rw->writer = rumpuser_curlwp(); \
66 rw->readers = -1; \
67 } while (/*CONSTCOND*/0)
68 #define RURW_CLRWRITE(rw) \
69 do { \
70 assert(RURW_AMWRITER(rw)); \
71 rw->readers = 0; \
72 rw->writer = NULL; \
73 } while (/*CONSTCOND*/0)
74 #define RURW_INCREAD(rw) \
75 do { \
76 pthread_spin_lock(&rw->spin); \
77 assert(rw->readers >= 0); \
78 ++(rw)->readers; \
79 pthread_spin_unlock(&rw->spin); \
80 } while (/*CONSTCOND*/0)
81 #define RURW_DECREAD(rw) \
82 do { \
83 pthread_spin_lock(&rw->spin); \
84 assert(rw->readers > 0); \
85 --(rw)->readers; \
86 pthread_spin_unlock(&rw->spin); \
87 } while (/*CONSTCOND*/0)
88
89 struct rumpuser_rw {
90 pthread_rwlock_t pthrw;
91 pthread_spinlock_t spin;
92 int readers;
93 struct lwp *writer;
94 };
95
96 struct rumpuser_cv {
97 pthread_cond_t pthcv;
98 int nwaiters;
99 };
100
101 void
102 rumpuser__thrinit(void)
103 {
104
105 pthread_key_create(&curlwpkey, NULL);
106 }
107
108 int
109 rumpuser_thread_create(void *(*f)(void *), void *arg, const char *thrname,
110 int joinable, int priority, int cpuidx, void **ptcookie)
111 {
112 pthread_t ptid;
113 pthread_t *ptidp;
114 pthread_attr_t pattr;
115 int rv;
116
117 if ((rv = pthread_attr_init(&pattr)) != 0)
118 return rv;
119
120 if (joinable) {
121 NOFAIL(ptidp = malloc(sizeof(*ptidp)));
122 pthread_attr_setdetachstate(&pattr, PTHREAD_CREATE_JOINABLE);
123 } else {
124 ptidp = &ptid;
125 pthread_attr_setdetachstate(&pattr, PTHREAD_CREATE_DETACHED);
126 }
127
128 rv = pthread_create(ptidp, &pattr, f, arg);
129 #if defined(__NetBSD__)
130 if (rv == 0 && thrname)
131 pthread_setname_np(ptid, thrname, NULL);
132 #elif defined(__linux__)
133 /*
134 * The pthread_setname_np() call varies from one Linux distro to
135 * another. Comment out the call pending autoconf support.
136 */
137 #if 0
138 if (rv == 0 && thrname)
139 pthread_setname_np(ptid, thrname);
140 #endif
141 #endif
142
143 if (joinable) {
144 assert(ptcookie);
145 *ptcookie = ptidp;
146 }
147
148 pthread_attr_destroy(&pattr);
149
150 ET(rv);
151 }
152
153 __dead void
154 rumpuser_thread_exit(void)
155 {
156
157 pthread_exit(NULL);
158 }
159
160 int
161 rumpuser_thread_join(void *ptcookie)
162 {
163 pthread_t *pt = ptcookie;
164 int rv;
165
166 KLOCK_WRAP((rv = pthread_join(*pt, NULL)));
167 if (rv == 0)
168 free(pt);
169
170 ET(rv);
171 }
172
173 void
174 rumpuser_mutex_init(struct rumpuser_mtx **mtx, int flags)
175 {
176 pthread_mutexattr_t att;
177
178 NOFAIL(*mtx = malloc(sizeof(struct rumpuser_mtx)));
179
180 pthread_mutexattr_init(&att);
181 pthread_mutexattr_settype(&att, PTHREAD_MUTEX_ERRORCHECK);
182 NOFAIL_ERRNO(pthread_mutex_init(&((*mtx)->pthmtx), &att));
183 pthread_mutexattr_destroy(&att);
184
185 (*mtx)->owner = NULL;
186 assert(flags != 0);
187 (*mtx)->flags = flags;
188 }
189
190 static void
191 mtxenter(struct rumpuser_mtx *mtx)
192 {
193
194 if (!(mtx->flags & RUMPUSER_MTX_KMUTEX))
195 return;
196
197 assert(mtx->owner == NULL);
198 mtx->owner = rumpuser_curlwp();
199 }
200
201 static void
202 mtxexit(struct rumpuser_mtx *mtx)
203 {
204
205 if (!(mtx->flags & RUMPUSER_MTX_KMUTEX))
206 return;
207
208 assert(mtx->owner != NULL);
209 mtx->owner = NULL;
210 }
211
212 void
213 rumpuser_mutex_enter(struct rumpuser_mtx *mtx)
214 {
215
216 if (mtx->flags & RUMPUSER_MTX_SPIN) {
217 rumpuser_mutex_enter_nowrap(mtx);
218 return;
219 }
220
221 assert(mtx->flags & RUMPUSER_MTX_KMUTEX);
222 if (pthread_mutex_trylock(&mtx->pthmtx) != 0)
223 KLOCK_WRAP(NOFAIL_ERRNO(pthread_mutex_lock(&mtx->pthmtx)));
224 mtxenter(mtx);
225 }
226
227 void
228 rumpuser_mutex_enter_nowrap(struct rumpuser_mtx *mtx)
229 {
230
231 assert(mtx->flags & RUMPUSER_MTX_SPIN);
232 NOFAIL_ERRNO(pthread_mutex_lock(&mtx->pthmtx));
233 mtxenter(mtx);
234 }
235
236 int
237 rumpuser_mutex_tryenter(struct rumpuser_mtx *mtx)
238 {
239 int rv;
240
241 rv = pthread_mutex_trylock(&mtx->pthmtx);
242 if (rv == 0) {
243 mtxenter(mtx);
244 }
245
246 ET(rv);
247 }
248
249 void
250 rumpuser_mutex_exit(struct rumpuser_mtx *mtx)
251 {
252
253 mtxexit(mtx);
254 NOFAIL_ERRNO(pthread_mutex_unlock(&mtx->pthmtx));
255 }
256
257 void
258 rumpuser_mutex_destroy(struct rumpuser_mtx *mtx)
259 {
260
261 NOFAIL_ERRNO(pthread_mutex_destroy(&mtx->pthmtx));
262 free(mtx);
263 }
264
265 void
266 rumpuser_mutex_owner(struct rumpuser_mtx *mtx, struct lwp **lp)
267 {
268
269 if (__predict_false(!(mtx->flags & RUMPUSER_MTX_KMUTEX))) {
270 printf("panic: rumpuser_mutex_held unsupported on non-kmtx\n");
271 abort();
272 }
273
274 *lp = mtx->owner;
275 }
276
277 void
278 rumpuser_rw_init(struct rumpuser_rw **rw)
279 {
280
281 NOFAIL(*rw = malloc(sizeof(struct rumpuser_rw)));
282 NOFAIL_ERRNO(pthread_rwlock_init(&((*rw)->pthrw), NULL));
283 NOFAIL_ERRNO(pthread_spin_init(&((*rw)->spin),PTHREAD_PROCESS_PRIVATE));
284 (*rw)->readers = 0;
285 (*rw)->writer = NULL;
286 }
287
288 void
289 rumpuser_rw_enter(struct rumpuser_rw *rw, int iswrite)
290 {
291
292 if (iswrite) {
293 if (pthread_rwlock_trywrlock(&rw->pthrw) != 0)
294 KLOCK_WRAP(NOFAIL_ERRNO(
295 pthread_rwlock_wrlock(&rw->pthrw)));
296 RURW_SETWRITE(rw);
297 } else {
298 if (pthread_rwlock_tryrdlock(&rw->pthrw) != 0)
299 KLOCK_WRAP(NOFAIL_ERRNO(
300 pthread_rwlock_rdlock(&rw->pthrw)));
301 RURW_INCREAD(rw);
302 }
303 }
304
305 int
306 rumpuser_rw_tryenter(struct rumpuser_rw *rw, int iswrite)
307 {
308 int rv;
309
310 if (iswrite) {
311 rv = pthread_rwlock_trywrlock(&rw->pthrw);
312 if (rv == 0)
313 RURW_SETWRITE(rw);
314 } else {
315 rv = pthread_rwlock_tryrdlock(&rw->pthrw);
316 if (rv == 0)
317 RURW_INCREAD(rw);
318 }
319
320 ET(rv);
321 }
322
323 void
324 rumpuser_rw_exit(struct rumpuser_rw *rw)
325 {
326
327 if (RURW_HASREAD(rw))
328 RURW_DECREAD(rw);
329 else
330 RURW_CLRWRITE(rw);
331 NOFAIL_ERRNO(pthread_rwlock_unlock(&rw->pthrw));
332 }
333
334 void
335 rumpuser_rw_destroy(struct rumpuser_rw *rw)
336 {
337
338 NOFAIL_ERRNO(pthread_rwlock_destroy(&rw->pthrw));
339 NOFAIL_ERRNO(pthread_spin_destroy(&rw->spin));
340 free(rw);
341 }
342
343 void
344 rumpuser_rw_held(struct rumpuser_rw *rw, int *rv)
345 {
346
347 *rv = rw->readers != 0;
348 }
349
350 void
351 rumpuser_rw_rdheld(struct rumpuser_rw *rw, int *rv)
352 {
353
354 *rv = RURW_HASREAD(rw);
355 }
356
357 void
358 rumpuser_rw_wrheld(struct rumpuser_rw *rw, int *rv)
359 {
360
361 *rv = RURW_AMWRITER(rw);
362 }
363
364 void
365 rumpuser_cv_init(struct rumpuser_cv **cv)
366 {
367
368 NOFAIL(*cv = malloc(sizeof(struct rumpuser_cv)));
369 NOFAIL_ERRNO(pthread_cond_init(&((*cv)->pthcv), NULL));
370 (*cv)->nwaiters = 0;
371 }
372
373 void
374 rumpuser_cv_destroy(struct rumpuser_cv *cv)
375 {
376
377 NOFAIL_ERRNO(pthread_cond_destroy(&cv->pthcv));
378 free(cv);
379 }
380
381 void
382 rumpuser_cv_wait(struct rumpuser_cv *cv, struct rumpuser_mtx *mtx)
383 {
384 int nlocks;
385
386 cv->nwaiters++;
387 rumpkern_unsched(&nlocks, mtx);
388 mtxexit(mtx);
389 NOFAIL_ERRNO(pthread_cond_wait(&cv->pthcv, &mtx->pthmtx));
390 mtxenter(mtx);
391 rumpkern_sched(nlocks, mtx);
392 cv->nwaiters--;
393 }
394
395 void
396 rumpuser_cv_wait_nowrap(struct rumpuser_cv *cv, struct rumpuser_mtx *mtx)
397 {
398
399 cv->nwaiters++;
400 mtxexit(mtx);
401 NOFAIL_ERRNO(pthread_cond_wait(&cv->pthcv, &mtx->pthmtx));
402 mtxenter(mtx);
403 cv->nwaiters--;
404 }
405
406 int
407 rumpuser_cv_timedwait(struct rumpuser_cv *cv, struct rumpuser_mtx *mtx,
408 int64_t sec, int64_t nsec)
409 {
410 struct timespec ts;
411 int rv, nlocks;
412
413 /*
414 * Get clock already here, just in case we will be put to sleep
415 * after releasing the kernel context.
416 *
417 * The condition variables should use CLOCK_MONOTONIC, but since
418 * that's not available everywhere, leave it for another day.
419 */
420 clock_gettime(CLOCK_REALTIME, &ts);
421
422 cv->nwaiters++;
423 rumpkern_unsched(&nlocks, mtx);
424 mtxexit(mtx);
425
426 ts.tv_sec += sec;
427 ts.tv_nsec += nsec;
428 if (ts.tv_nsec >= 1000*1000*1000) {
429 ts.tv_sec++;
430 ts.tv_nsec -= 1000*1000*1000;
431 }
432 rv = pthread_cond_timedwait(&cv->pthcv, &mtx->pthmtx, &ts);
433 mtxenter(mtx);
434 rumpkern_sched(nlocks, mtx);
435 cv->nwaiters--;
436
437 ET(rv);
438 }
439
440 void
441 rumpuser_cv_signal(struct rumpuser_cv *cv)
442 {
443
444 NOFAIL_ERRNO(pthread_cond_signal(&cv->pthcv));
445 }
446
447 void
448 rumpuser_cv_broadcast(struct rumpuser_cv *cv)
449 {
450
451 NOFAIL_ERRNO(pthread_cond_broadcast(&cv->pthcv));
452 }
453
454 void
455 rumpuser_cv_has_waiters(struct rumpuser_cv *cv, int *nwaiters)
456 {
457
458 *nwaiters = cv->nwaiters;
459 }
460
461 /*
462 * curlwp
463 */
464
465 /*
466 * the if0'd curlwp implementation is not used by this hypervisor,
467 * but serves as test code to check that the intended usage works.
468 */
469 #if 0
470 struct rumpuser_lwp {
471 struct lwp *l;
472 LIST_ENTRY(rumpuser_lwp) l_entries;
473 };
474 static LIST_HEAD(, rumpuser_lwp) lwps = LIST_HEAD_INITIALIZER(lwps);
475 static pthread_mutex_t lwplock = PTHREAD_MUTEX_INITIALIZER;
476
477 void
478 rumpuser_curlwpop(enum rumplwpop op, struct lwp *l)
479 {
480 struct rumpuser_lwp *rl, *rliter;
481
482 switch (op) {
483 case RUMPUSER_LWP_CREATE:
484 rl = malloc(sizeof(*rl));
485 rl->l = l;
486 pthread_mutex_lock(&lwplock);
487 LIST_FOREACH(rliter, &lwps, l_entries) {
488 if (rliter->l == l) {
489 fprintf(stderr, "LWP_CREATE: %p exists\n", l);
490 abort();
491 }
492 }
493 LIST_INSERT_HEAD(&lwps, rl, l_entries);
494 pthread_mutex_unlock(&lwplock);
495 break;
496 case RUMPUSER_LWP_DESTROY:
497 pthread_mutex_lock(&lwplock);
498 LIST_FOREACH(rl, &lwps, l_entries) {
499 if (rl->l == l)
500 break;
501 }
502 if (!rl) {
503 fprintf(stderr, "LWP_DESTROY: %p does not exist\n", l);
504 abort();
505 }
506 LIST_REMOVE(rl, l_entries);
507 pthread_mutex_unlock(&lwplock);
508 free(rl);
509 break;
510 case RUMPUSER_LWP_SET:
511 assert(pthread_getspecific(curlwpkey) == NULL || l == NULL);
512
513 if (l) {
514 pthread_mutex_lock(&lwplock);
515 LIST_FOREACH(rl, &lwps, l_entries) {
516 if (rl->l == l)
517 break;
518 }
519 if (!rl) {
520 fprintf(stderr,
521 "LWP_SET: %p does not exist\n", l);
522 abort();
523 }
524 pthread_mutex_unlock(&lwplock);
525 } else {
526 rl = NULL;
527 }
528
529 pthread_setspecific(curlwpkey, rl);
530 break;
531 }
532 }
533
534 struct lwp *
535 rumpuser_curlwp(void)
536 {
537 struct rumpuser_lwp *rl;
538
539 rl = pthread_getspecific(curlwpkey);
540 return rl ? rl->l : NULL;
541 }
542
543 #else
544
545 void
546 rumpuser_curlwpop(enum rumplwpop op, struct lwp *l)
547 {
548
549 switch (op) {
550 case RUMPUSER_LWP_CREATE:
551 break;
552 case RUMPUSER_LWP_DESTROY:
553 break;
554 case RUMPUSER_LWP_SET:
555 assert(pthread_getspecific(curlwpkey) == NULL || l == NULL);
556 pthread_setspecific(curlwpkey, l);
557 break;
558 }
559 }
560
561 struct lwp *
562 rumpuser_curlwp(void)
563 {
564
565 return pthread_getspecific(curlwpkey);
566 }
567 #endif
568