rumpfiber.c revision 1.9 1 /* $NetBSD: rumpfiber.c,v 1.9 2014/12/29 21:50:09 justin Exp $ */
2
3 /*
4 * Copyright (c) 2007-2013 Antti Kantee. All Rights Reserved.
5 * Copyright (c) 2014 Justin Cormack. All Rights Reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
17 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 /* Based partly on code from Xen Minios with the following license */
30
31 /*
32 ****************************************************************************
33 * (C) 2005 - Grzegorz Milos - Intel Research Cambridge
34 ****************************************************************************
35 *
36 * File: sched.c
37 * Author: Grzegorz Milos
38 * Changes: Robert Kaiser
39 *
40 * Date: Aug 2005
41 *
42 * Environment: Xen Minimal OS
43 * Description: simple scheduler for Mini-Os
44 *
45 * The scheduler is non-preemptive (cooperative), and schedules according
46 * to Round Robin algorithm.
47 *
48 ****************************************************************************
49 * Permission is hereby granted, free of charge, to any person obtaining a copy
50 * of this software and associated documentation files (the "Software"), to
51 * deal in the Software without restriction, including without limitation the
52 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
53 * sell copies of the Software, and to permit persons to whom the Software is
54 * furnished to do so, subject to the following conditions:
55 *
56 * The above copyright notice and this permission notice shall be included in
57 * all copies or substantial portions of the Software.
58 *
59 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
60 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
61 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
62 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
63 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
64 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
65 * DEALINGS IN THE SOFTWARE.
66 */
67
68 #include "rumpuser_port.h"
69
70 #if !defined(lint)
71 __RCSID("$NetBSD: rumpfiber.c,v 1.9 2014/12/29 21:50:09 justin Exp $");
72 #endif /* !lint */
73
74 #include <sys/ioctl.h>
75 #include <sys/mman.h>
76 #include <sys/time.h>
77
78 #include <assert.h>
79 #include <errno.h>
80 #include <fcntl.h>
81 #include <signal.h>
82 #include <stdarg.h>
83 #include <stdint.h>
84 #include <stdio.h>
85 #include <stdlib.h>
86 #include <string.h>
87 #include <time.h>
88 #include <unistd.h>
89
90 #include <rump/rumpuser.h>
91
92 #include "rumpuser_int.h"
93 #include "rumpfiber.h"
94
95 static void init_sched(void);
96 static void join_thread(struct thread *);
97 static void switch_threads(struct thread *prev, struct thread *next);
98 static struct thread *get_current(void);
99 static int64_t now(void);
100 static void msleep(uint64_t millisecs);
101 static void abssleep(uint64_t millisecs);
102
103 TAILQ_HEAD(thread_list, thread);
104
105 static struct thread_list exited_threads = TAILQ_HEAD_INITIALIZER(exited_threads);
106 static struct thread_list thread_list = TAILQ_HEAD_INITIALIZER(thread_list);
107 static struct thread *current_thread = NULL;
108
109 static void (*scheduler_hook)(void *, void *);
110
111 static void printk(const char *s);
112
113 static void
114 printk(const char *msg)
115 {
116 int ret __attribute__((unused));
117
118 ret = write(2, msg, strlen(msg));
119 }
120
121 static struct thread *
122 get_current(void)
123 {
124
125 return current_thread;
126 }
127
128 static int64_t
129 now(void)
130 {
131 struct timespec ts;
132 int rv;
133
134 rv = clock_gettime(CLOCK_MONOTONIC, &ts);
135 assert(rv == 0);
136 return (ts.tv_sec * 1000LL) + (ts.tv_nsec / 1000000LL);
137 }
138
139 void
140 schedule(void)
141 {
142 struct thread *prev, *next, *thread, *tmp;
143 int64_t tm, wakeup;
144 struct timespec sl;
145
146 prev = get_current();
147
148 do {
149 tm = now();
150 wakeup = tm + 1000; /* wake up in 1s max */
151 next = NULL;
152 TAILQ_FOREACH_SAFE(thread, &thread_list, thread_list, tmp) {
153 if (!is_runnable(thread) && thread->wakeup_time >= 0) {
154 if (thread->wakeup_time <= tm) {
155 thread->flags |= THREAD_TIMEDOUT;
156 wake(thread);
157 } else if (thread->wakeup_time < wakeup)
158 wakeup = thread->wakeup_time;
159 }
160 if (is_runnable(thread)) {
161 next = thread;
162 /* Put this thread on the end of the list */
163 TAILQ_REMOVE(&thread_list, thread, thread_list);
164 TAILQ_INSERT_TAIL(&thread_list, thread, thread_list);
165 break;
166 }
167 }
168 if (next)
169 break;
170 sl.tv_sec = (wakeup - tm) / 1000;
171 sl.tv_nsec = ((wakeup - tm) - 1000 * sl.tv_sec) * 1000000;
172 #ifdef HAVE_CLOCK_NANOSLEEP
173 clock_nanosleep(CLOCK_MONOTONIC, 0, &sl, NULL);
174 #else
175 nanosleep(&sl, NULL);
176 #endif
177 } while (1);
178
179 if (prev != next)
180 switch_threads(prev, next);
181
182 TAILQ_FOREACH_SAFE(thread, &exited_threads, thread_list, tmp) {
183 if (thread != prev) {
184 TAILQ_REMOVE(&exited_threads, thread, thread_list);
185 if ((thread->flags & THREAD_EXTSTACK) == 0)
186 munmap(thread->ctx.uc_stack.ss_sp, STACKSIZE);
187 free(thread->name);
188 free(thread);
189 }
190 }
191 }
192
193 static void
194 create_ctx(ucontext_t *ctx, void *stack, size_t stack_size,
195 void (*f)(void *), void *data)
196 {
197
198 getcontext(ctx);
199 ctx->uc_stack.ss_sp = stack;
200 ctx->uc_stack.ss_size = stack_size;
201 ctx->uc_stack.ss_flags = 0;
202 ctx->uc_link = NULL; /* TODO may link to main thread */
203 /* may have to do bounce function to call, if args to makecontext are ints */
204 makecontext(ctx, (void (*)(void))f, 1, data);
205 }
206
207 /* TODO see notes in rumpuser_thread_create, have flags here */
208 struct thread *
209 create_thread(const char *name, void *cookie, void (*f)(void *), void *data,
210 void *stack, size_t stack_size)
211 {
212 struct thread *thread = calloc(1, sizeof(struct thread));
213
214 if (!thread) {
215 return NULL;
216 }
217
218 if (!stack) {
219 assert(stack_size == 0);
220 stack = mmap(NULL, STACKSIZE, PROT_READ | PROT_WRITE,
221 MAP_SHARED | MAP_ANON, -1, 0);
222 if (stack == MAP_FAILED) {
223 free(thread);
224 return NULL;
225 }
226 stack_size = STACKSIZE;
227 } else {
228 thread->flags = THREAD_EXTSTACK;
229 }
230 create_ctx(&thread->ctx, stack, stack_size, f, data);
231
232 thread->name = strdup(name);
233 thread->cookie = cookie;
234
235 /* Not runnable, not exited, not sleeping */
236 thread->wakeup_time = -1;
237 thread->lwp = NULL;
238 set_runnable(thread);
239 TAILQ_INSERT_TAIL(&thread_list, thread, thread_list);
240
241 return thread;
242 }
243
244 static void
245 switch_threads(struct thread *prev, struct thread *next)
246 {
247 int ret;
248
249 current_thread = next;
250 if (scheduler_hook)
251 scheduler_hook(prev->cookie, next->cookie);
252 ret = swapcontext(&prev->ctx, &next->ctx);
253 if (ret < 0) {
254 printk("swapcontext failed\n");
255 abort();
256 }
257 }
258
259 struct join_waiter {
260 struct thread *jw_thread;
261 struct thread *jw_wanted;
262 TAILQ_ENTRY(join_waiter) jw_entries;
263 };
264 static TAILQ_HEAD(, join_waiter) joinwq = TAILQ_HEAD_INITIALIZER(joinwq);
265
266 void
267 exit_thread(void)
268 {
269 struct thread *thread = get_current();
270 struct join_waiter *jw_iter;
271
272 /* if joinable, gate until we are allowed to exit */
273 while (thread->flags & THREAD_MUSTJOIN) {
274 thread->flags |= THREAD_JOINED;
275
276 /* see if the joiner is already there */
277 TAILQ_FOREACH(jw_iter, &joinwq, jw_entries) {
278 if (jw_iter->jw_wanted == thread) {
279 wake(jw_iter->jw_thread);
280 break;
281 }
282 }
283 block(thread);
284 schedule();
285 }
286
287 /* Remove from the thread list */
288 TAILQ_REMOVE(&thread_list, thread, thread_list);
289 clear_runnable(thread);
290 /* Put onto exited list */
291 TAILQ_INSERT_HEAD(&exited_threads, thread, thread_list);
292
293 /* Schedule will free the resources */
294 while (1) {
295 schedule();
296 printk("schedule() returned! Trying again\n");
297 }
298 }
299
300 static void
301 join_thread(struct thread *joinable)
302 {
303 struct join_waiter jw;
304 struct thread *thread = get_current();
305
306 assert(joinable->flags & THREAD_MUSTJOIN);
307
308 /* wait for exiting thread to hit thread_exit() */
309 while (! (joinable->flags & THREAD_JOINED)) {
310
311 jw.jw_thread = thread;
312 jw.jw_wanted = joinable;
313 TAILQ_INSERT_TAIL(&joinwq, &jw, jw_entries);
314 block(thread);
315 schedule();
316 TAILQ_REMOVE(&joinwq, &jw, jw_entries);
317 }
318
319 /* signal exiting thread that we have seen it and it may now exit */
320 assert(joinable->flags & THREAD_JOINED);
321 joinable->flags &= ~THREAD_MUSTJOIN;
322
323 wake(joinable);
324 }
325
326 static void msleep(uint64_t millisecs)
327 {
328 struct thread *thread = get_current();
329
330 thread->wakeup_time = now() + millisecs;
331 clear_runnable(thread);
332 schedule();
333 }
334
335 static void abssleep(uint64_t millisecs)
336 {
337 struct thread *thread = get_current();
338
339 thread->wakeup_time = millisecs;
340 clear_runnable(thread);
341 schedule();
342 }
343
344 /* like abssleep, except against realtime clock instead of monotonic clock */
345 int abssleep_real(uint64_t millisecs)
346 {
347 struct thread *thread = get_current();
348 struct timespec ts;
349 uint64_t real_now;
350 int rv;
351
352 clock_gettime(CLOCK_REALTIME, &ts);
353 real_now = 1000*ts.tv_sec + ts.tv_nsec/(1000*1000);
354 thread->wakeup_time = now() + (millisecs - real_now);
355
356 clear_runnable(thread);
357 schedule();
358
359 rv = !!(thread->flags & THREAD_TIMEDOUT);
360 thread->flags &= ~THREAD_TIMEDOUT;
361 return rv;
362 }
363
364 void wake(struct thread *thread)
365 {
366
367 thread->wakeup_time = -1;
368 set_runnable(thread);
369 }
370
371 void block(struct thread *thread)
372 {
373
374 thread->wakeup_time = -1;
375 clear_runnable(thread);
376 }
377
378 int is_runnable(struct thread *thread)
379 {
380
381 return thread->flags & RUNNABLE_FLAG;
382 }
383
384 void set_runnable(struct thread *thread)
385 {
386
387 thread->flags |= RUNNABLE_FLAG;
388 }
389
390 void clear_runnable(struct thread *thread)
391 {
392
393 thread->flags &= ~RUNNABLE_FLAG;
394 }
395
396 static void
397 init_sched(void)
398 {
399 struct thread *thread = calloc(1, sizeof(struct thread));
400
401 thread->name = strdup("init");
402 thread->flags = 0;
403 thread->wakeup_time = -1;
404 thread->lwp = NULL;
405 set_runnable(thread);
406 TAILQ_INSERT_TAIL(&thread_list, thread, thread_list);
407 current_thread = thread;
408 }
409
410 void
411 set_sched_hook(void (*f)(void *, void *))
412 {
413
414 scheduler_hook = f;
415 }
416
417 struct thread *
418 init_mainthread(void *cookie)
419 {
420
421 current_thread->cookie = cookie;
422 return current_thread;
423 }
424
425 /* rump functions below */
426
427 struct rumpuser_hyperup rumpuser__hyp;
428
429 int
430 rumpuser_init(int version, const struct rumpuser_hyperup *hyp)
431 {
432 int rv;
433
434 if (version != RUMPUSER_VERSION) {
435 printk("rumpuser version mismatch\n");
436 abort();
437 }
438
439 rv = rumpuser__random_init();
440 if (rv != 0) {
441 ET(rv);
442 }
443
444 rumpuser__hyp = *hyp;
445
446 init_sched();
447
448 return 0;
449 }
450
451 int
452 rumpuser_clock_gettime(int enum_rumpclock, int64_t *sec, long *nsec)
453 {
454 enum rumpclock rclk = enum_rumpclock;
455 struct timespec ts;
456 clockid_t clk;
457 int rv;
458
459 switch (rclk) {
460 case RUMPUSER_CLOCK_RELWALL:
461 clk = CLOCK_REALTIME;
462 break;
463 case RUMPUSER_CLOCK_ABSMONO:
464 clk = CLOCK_MONOTONIC;
465 break;
466 default:
467 abort();
468 }
469
470 if (clock_gettime(clk, &ts) == -1) {
471 rv = errno;
472 } else {
473 *sec = ts.tv_sec;
474 *nsec = ts.tv_nsec;
475 rv = 0;
476 }
477
478 ET(rv);
479 }
480
481 int
482 rumpuser_clock_sleep(int enum_rumpclock, int64_t sec, long nsec)
483 {
484 enum rumpclock rclk = enum_rumpclock;
485 uint32_t msec;
486 int nlocks;
487
488 rumpkern_unsched(&nlocks, NULL);
489 switch (rclk) {
490 case RUMPUSER_CLOCK_RELWALL:
491 msec = sec * 1000 + nsec / (1000*1000UL);
492 msleep(msec);
493 break;
494 case RUMPUSER_CLOCK_ABSMONO:
495 msec = sec * 1000 + nsec / (1000*1000UL);
496 abssleep(msec);
497 break;
498 }
499 rumpkern_sched(nlocks, NULL);
500
501 return 0;
502 }
503
504 int
505 rumpuser_getparam(const char *name, void *buf, size_t blen)
506 {
507 int rv;
508 const char *ncpu = "1";
509
510 if (strcmp(name, RUMPUSER_PARAM_NCPU) == 0) {
511 strncpy(buf, ncpu, blen);
512 rv = 0;
513 } else if (strcmp(name, RUMPUSER_PARAM_HOSTNAME) == 0) {
514 char tmp[MAXHOSTNAMELEN];
515
516 if (gethostname(tmp, sizeof(tmp)) == -1) {
517 snprintf(buf, blen, "rump-%05d", (int)getpid());
518 } else {
519 snprintf(buf, blen, "rump-%05d.%s",
520 (int)getpid(), tmp);
521 }
522 rv = 0;
523 } else if (*name == '_') {
524 rv = EINVAL;
525 } else {
526 if (getenv_r(name, buf, blen) == -1)
527 rv = errno;
528 else
529 rv = 0;
530 }
531
532 ET(rv);
533 }
534
535 void
536 rumpuser_putchar(int c)
537 {
538
539 putchar(c);
540 }
541
542 __dead void
543 rumpuser_exit(int rv)
544 {
545
546 if (rv == RUMPUSER_PANIC)
547 abort();
548 else
549 exit(rv);
550 }
551
552 void
553 rumpuser_seterrno(int error)
554 {
555
556 errno = error;
557 }
558
559 /*
560 * This is meant for safe debugging prints from the kernel.
561 */
562 void
563 rumpuser_dprintf(const char *format, ...)
564 {
565 va_list ap;
566
567 va_start(ap, format);
568 vfprintf(stderr, format, ap);
569 va_end(ap);
570 }
571
572 int
573 rumpuser_kill(int64_t pid, int rumpsig)
574 {
575 int sig;
576
577 sig = rumpuser__sig_rump2host(rumpsig);
578 if (sig > 0)
579 raise(sig);
580 return 0;
581 }
582
583 /* thread functions */
584
585 TAILQ_HEAD(waithead, waiter);
586 struct waiter {
587 struct thread *who;
588 TAILQ_ENTRY(waiter) entries;
589 int onlist;
590 };
591
592 static int
593 wait(struct waithead *wh, uint64_t msec)
594 {
595 struct waiter w;
596
597 w.who = get_current();
598 TAILQ_INSERT_TAIL(wh, &w, entries);
599 w.onlist = 1;
600 block(w.who);
601 if (msec)
602 w.who->wakeup_time = now() + msec;
603 schedule();
604
605 /* woken up by timeout? */
606 if (w.onlist)
607 TAILQ_REMOVE(wh, &w, entries);
608
609 return w.onlist ? ETIMEDOUT : 0;
610 }
611
612 static void
613 wakeup_one(struct waithead *wh)
614 {
615 struct waiter *w;
616
617 if ((w = TAILQ_FIRST(wh)) != NULL) {
618 TAILQ_REMOVE(wh, w, entries);
619 w->onlist = 0;
620 wake(w->who);
621 }
622 }
623
624 static void
625 wakeup_all(struct waithead *wh)
626 {
627 struct waiter *w;
628
629 while ((w = TAILQ_FIRST(wh)) != NULL) {
630 TAILQ_REMOVE(wh, w, entries);
631 w->onlist = 0;
632 wake(w->who);
633 }
634 }
635
636 int
637 rumpuser_thread_create(void *(*f)(void *), void *arg, const char *thrname,
638 int joinable, int pri, int cpuidx, void **tptr)
639 {
640 struct thread *thr;
641
642 thr = create_thread(thrname, NULL, (void (*)(void *))f, arg, NULL, 0);
643 /*
644 * XXX: should be supplied as a flag to create_thread() so as to
645 * _ensure_ it's set before the thread runs (and could exit).
646 * now we're trusting unclear semantics of create_thread()
647 */
648 if (thr && joinable)
649 thr->flags |= THREAD_MUSTJOIN;
650
651 if (!thr)
652 return EINVAL;
653
654 *tptr = thr;
655 return 0;
656 }
657
658 void
659 rumpuser_thread_exit(void)
660 {
661
662 exit_thread();
663 }
664
665 int
666 rumpuser_thread_join(void *p)
667 {
668
669 join_thread(p);
670 return 0;
671 }
672
673 struct rumpuser_mtx {
674 struct waithead waiters;
675 int v;
676 int flags;
677 struct lwp *o;
678 };
679
680 void
681 rumpuser_mutex_init(struct rumpuser_mtx **mtxp, int flags)
682 {
683 struct rumpuser_mtx *mtx;
684
685 mtx = malloc(sizeof(*mtx));
686 memset(mtx, 0, sizeof(*mtx));
687 mtx->flags = flags;
688 TAILQ_INIT(&mtx->waiters);
689 *mtxp = mtx;
690 }
691
692 void
693 rumpuser_mutex_enter(struct rumpuser_mtx *mtx)
694 {
695 int nlocks;
696
697 if (rumpuser_mutex_tryenter(mtx) != 0) {
698 rumpkern_unsched(&nlocks, NULL);
699 while (rumpuser_mutex_tryenter(mtx) != 0)
700 wait(&mtx->waiters, 0);
701 rumpkern_sched(nlocks, NULL);
702 }
703 }
704
705 void
706 rumpuser_mutex_enter_nowrap(struct rumpuser_mtx *mtx)
707 {
708 int rv;
709
710 rv = rumpuser_mutex_tryenter(mtx);
711 /* one VCPU supported, no preemption => must succeed */
712 if (rv != 0) {
713 printk("no voi ei\n");
714 }
715 }
716
717 int
718 rumpuser_mutex_tryenter(struct rumpuser_mtx *mtx)
719 {
720 struct lwp *l = get_current()->lwp;
721
722 if (mtx->v && mtx->o != l)
723 return EBUSY;
724
725 mtx->v++;
726 mtx->o = l;
727
728 return 0;
729 }
730
731 void
732 rumpuser_mutex_exit(struct rumpuser_mtx *mtx)
733 {
734
735 assert(mtx->v > 0);
736 if (--mtx->v == 0) {
737 mtx->o = NULL;
738 wakeup_one(&mtx->waiters);
739 }
740 }
741
742 void
743 rumpuser_mutex_destroy(struct rumpuser_mtx *mtx)
744 {
745
746 assert(TAILQ_EMPTY(&mtx->waiters) && mtx->o == NULL);
747 free(mtx);
748 }
749
750 void
751 rumpuser_mutex_owner(struct rumpuser_mtx *mtx, struct lwp **lp)
752 {
753
754 *lp = mtx->o;
755 }
756
757 struct rumpuser_rw {
758 struct waithead rwait;
759 struct waithead wwait;
760 int v;
761 struct lwp *o;
762 };
763
764 void
765 rumpuser_rw_init(struct rumpuser_rw **rwp)
766 {
767 struct rumpuser_rw *rw;
768
769 rw = malloc(sizeof(*rw));
770 memset(rw, 0, sizeof(*rw));
771 TAILQ_INIT(&rw->rwait);
772 TAILQ_INIT(&rw->wwait);
773
774 *rwp = rw;
775 }
776
777 void
778 rumpuser_rw_enter(int enum_rumprwlock, struct rumpuser_rw *rw)
779 {
780 enum rumprwlock lk = enum_rumprwlock;
781 struct waithead *w = NULL;
782 int nlocks;
783
784 switch (lk) {
785 case RUMPUSER_RW_WRITER:
786 w = &rw->wwait;
787 break;
788 case RUMPUSER_RW_READER:
789 w = &rw->rwait;
790 break;
791 }
792
793 if (rumpuser_rw_tryenter(enum_rumprwlock, rw) != 0) {
794 rumpkern_unsched(&nlocks, NULL);
795 while (rumpuser_rw_tryenter(enum_rumprwlock, rw) != 0)
796 wait(w, 0);
797 rumpkern_sched(nlocks, NULL);
798 }
799 }
800
801 int
802 rumpuser_rw_tryenter(int enum_rumprwlock, struct rumpuser_rw *rw)
803 {
804 enum rumprwlock lk = enum_rumprwlock;
805 int rv;
806
807 switch (lk) {
808 case RUMPUSER_RW_WRITER:
809 if (rw->o == NULL) {
810 rw->o = rumpuser_curlwp();
811 rv = 0;
812 } else {
813 rv = EBUSY;
814 }
815 break;
816 case RUMPUSER_RW_READER:
817 if (rw->o == NULL && TAILQ_EMPTY(&rw->wwait)) {
818 rw->v++;
819 rv = 0;
820 } else {
821 rv = EBUSY;
822 }
823 break;
824 default:
825 rv = EINVAL;
826 }
827
828 return rv;
829 }
830
831 void
832 rumpuser_rw_exit(struct rumpuser_rw *rw)
833 {
834
835 if (rw->o) {
836 rw->o = NULL;
837 } else {
838 rw->v--;
839 }
840
841 /* standard procedure, don't let readers starve out writers */
842 if (!TAILQ_EMPTY(&rw->wwait)) {
843 if (rw->o == NULL)
844 wakeup_one(&rw->wwait);
845 } else if (!TAILQ_EMPTY(&rw->rwait) && rw->o == NULL) {
846 wakeup_all(&rw->rwait);
847 }
848 }
849
850 void
851 rumpuser_rw_destroy(struct rumpuser_rw *rw)
852 {
853
854 free(rw);
855 }
856
857 void
858 rumpuser_rw_held(int enum_rumprwlock, struct rumpuser_rw *rw, int *rvp)
859 {
860 enum rumprwlock lk = enum_rumprwlock;
861
862 switch (lk) {
863 case RUMPUSER_RW_WRITER:
864 *rvp = rw->o == rumpuser_curlwp();
865 break;
866 case RUMPUSER_RW_READER:
867 *rvp = rw->v > 0;
868 break;
869 }
870 }
871
872 void
873 rumpuser_rw_downgrade(struct rumpuser_rw *rw)
874 {
875
876 assert(rw->o == rumpuser_curlwp());
877 rw->v = -1;
878 }
879
880 int
881 rumpuser_rw_tryupgrade(struct rumpuser_rw *rw)
882 {
883
884 if (rw->v == -1) {
885 rw->v = 1;
886 rw->o = rumpuser_curlwp();
887 return 0;
888 }
889
890 return EBUSY;
891 }
892
893 struct rumpuser_cv {
894 struct waithead waiters;
895 int nwaiters;
896 };
897
898 void
899 rumpuser_cv_init(struct rumpuser_cv **cvp)
900 {
901 struct rumpuser_cv *cv;
902
903 cv = malloc(sizeof(*cv));
904 memset(cv, 0, sizeof(*cv));
905 TAILQ_INIT(&cv->waiters);
906 *cvp = cv;
907 }
908
909 void
910 rumpuser_cv_destroy(struct rumpuser_cv *cv)
911 {
912
913 assert(cv->nwaiters == 0);
914 free(cv);
915 }
916
917 static void
918 cv_unsched(struct rumpuser_mtx *mtx, int *nlocks)
919 {
920
921 rumpkern_unsched(nlocks, mtx);
922 rumpuser_mutex_exit(mtx);
923 }
924
925 static void
926 cv_resched(struct rumpuser_mtx *mtx, int nlocks)
927 {
928
929 /* see rumpuser(3) */
930 if ((mtx->flags & (RUMPUSER_MTX_KMUTEX | RUMPUSER_MTX_SPIN)) ==
931 (RUMPUSER_MTX_KMUTEX | RUMPUSER_MTX_SPIN)) {
932 rumpkern_sched(nlocks, mtx);
933 rumpuser_mutex_enter_nowrap(mtx);
934 } else {
935 rumpuser_mutex_enter_nowrap(mtx);
936 rumpkern_sched(nlocks, mtx);
937 }
938 }
939
940 void
941 rumpuser_cv_wait(struct rumpuser_cv *cv, struct rumpuser_mtx *mtx)
942 {
943 int nlocks;
944
945 cv->nwaiters++;
946 cv_unsched(mtx, &nlocks);
947 wait(&cv->waiters, 0);
948 cv_resched(mtx, nlocks);
949 cv->nwaiters--;
950 }
951
952 void
953 rumpuser_cv_wait_nowrap(struct rumpuser_cv *cv, struct rumpuser_mtx *mtx)
954 {
955
956 cv->nwaiters++;
957 rumpuser_mutex_exit(mtx);
958 wait(&cv->waiters, 0);
959 rumpuser_mutex_enter_nowrap(mtx);
960 cv->nwaiters--;
961 }
962
963 int
964 rumpuser_cv_timedwait(struct rumpuser_cv *cv, struct rumpuser_mtx *mtx,
965 int64_t sec, int64_t nsec)
966 {
967 int nlocks;
968 int rv;
969
970 cv->nwaiters++;
971 cv_unsched(mtx, &nlocks);
972 rv = wait(&cv->waiters, sec * 1000 + nsec / (1000*1000));
973 cv_resched(mtx, nlocks);
974 cv->nwaiters--;
975
976 return rv;
977 }
978
979 void
980 rumpuser_cv_signal(struct rumpuser_cv *cv)
981 {
982
983 wakeup_one(&cv->waiters);
984 }
985
986 void
987 rumpuser_cv_broadcast(struct rumpuser_cv *cv)
988 {
989
990 wakeup_all(&cv->waiters);
991 }
992
993 void
994 rumpuser_cv_has_waiters(struct rumpuser_cv *cv, int *rvp)
995 {
996
997 *rvp = cv->nwaiters != 0;
998 }
999
1000 /*
1001 * curlwp
1002 */
1003
1004 void
1005 rumpuser_curlwpop(int enum_rumplwpop, struct lwp *l)
1006 {
1007 struct thread *thread;
1008 enum rumplwpop op = enum_rumplwpop;
1009
1010 switch (op) {
1011 case RUMPUSER_LWP_CREATE:
1012 case RUMPUSER_LWP_DESTROY:
1013 break;
1014 case RUMPUSER_LWP_SET:
1015 thread = get_current();
1016 thread->lwp = l;
1017 break;
1018 case RUMPUSER_LWP_CLEAR:
1019 thread = get_current();
1020 assert(thread->lwp == l);
1021 thread->lwp = NULL;
1022 break;
1023 }
1024 }
1025
1026 struct lwp *
1027 rumpuser_curlwp(void)
1028 {
1029
1030 return get_current()->lwp;
1031 }
1032