threads.c revision 1.16 1 /* $NetBSD: threads.c,v 1.16 2012/11/04 14:40:18 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2007-2009 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by
7 * The Finnish Cultural Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
19 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: threads.c,v 1.16 2012/11/04 14:40:18 pooka Exp $");
33
34 #include <sys/param.h>
35 #include <sys/atomic.h>
36 #include <sys/kmem.h>
37 #include <sys/kthread.h>
38 #include <sys/malloc.h>
39 #include <sys/systm.h>
40
41 #include <rump/rumpuser.h>
42
43 #include "rump_private.h"
44
45 struct kthdesc {
46 void (*f)(void *);
47 void *arg;
48 struct lwp *mylwp;
49 };
50
51 static void *
52 threadbouncer(void *arg)
53 {
54 struct kthdesc *k = arg;
55 struct lwp *l = k->mylwp;
56 void (*f)(void *);
57 void *thrarg;
58
59 f = k->f;
60 thrarg = k->arg;
61
62 /* schedule ourselves */
63 rumpuser_set_curlwp(l);
64 rump_schedule();
65
66 /* free dance struct */
67 free(k, M_TEMP);
68
69 if ((curlwp->l_pflag & LP_MPSAFE) == 0)
70 KERNEL_LOCK(1, NULL);
71
72 f(thrarg);
73
74 panic("unreachable, should kthread_exit()");
75 }
76
77 static struct {
78 const char *t_name;
79 bool t_ncmp;
80 } nothreads[] = {
81 { "vrele", false },
82 { "cachegc", false },
83 { "nfssilly", false },
84 { "unpgc", false },
85 { "pmf", true },
86 { "xcall", true },
87 };
88
89 int
90 kthread_create(pri_t pri, int flags, struct cpu_info *ci,
91 void (*func)(void *), void *arg, lwp_t **newlp, const char *fmt, ...)
92 {
93 char thrstore[MAXCOMLEN];
94 const char *thrname = NULL;
95 va_list ap;
96 struct kthdesc *k;
97 struct lwp *l;
98 int rv;
99
100 thrstore[0] = '\0';
101 if (fmt) {
102 va_start(ap, fmt);
103 vsnprintf(thrstore, sizeof(thrstore), fmt, ap);
104 va_end(ap);
105 thrname = thrstore;
106 }
107
108 /*
109 * We don't want a module unload thread.
110 * (XXX: yes, this is a kludge too, and the kernel should
111 * have a more flexible method for configuring which threads
112 * we want).
113 */
114 if (strcmp(thrstore, "modunload") == 0) {
115 return 0;
116 }
117
118 if (!rump_threads) {
119 bool matched;
120 int i;
121
122 /* do we want to fake it? */
123 for (i = 0; i < __arraycount(nothreads); i++) {
124 if (nothreads[i].t_ncmp) {
125 matched = strncmp(thrstore, nothreads[i].t_name,
126 strlen(nothreads[i].t_name)) == 0;
127 } else {
128 matched = strcmp(thrstore,
129 nothreads[i].t_name) == 0;
130 }
131 if (matched) {
132 aprint_error("rump kernel threads not enabled, "
133 "%s not functional\n", nothreads[i].t_name);
134 return 0;
135 }
136 }
137 panic("threads not available");
138 }
139 KASSERT(fmt != NULL);
140
141 k = malloc(sizeof(*k), M_TEMP, M_WAITOK);
142 k->f = func;
143 k->arg = arg;
144 k->mylwp = l = rump__lwproc_alloclwp(&proc0);
145 l->l_flag |= LW_SYSTEM;
146 if (flags & KTHREAD_MPSAFE)
147 l->l_pflag |= LP_MPSAFE;
148 if (flags & KTHREAD_INTR)
149 l->l_pflag |= LP_INTR;
150 if (ci) {
151 l->l_pflag |= LP_BOUND;
152 l->l_target_cpu = ci;
153 }
154 if (thrname) {
155 l->l_name = kmem_alloc(MAXCOMLEN, KM_SLEEP);
156 strlcpy(l->l_name, thrname, MAXCOMLEN);
157 }
158
159 rv = rumpuser_thread_create(threadbouncer, k, thrname,
160 (flags & KTHREAD_MUSTJOIN) == KTHREAD_MUSTJOIN, &l->l_ctxlink);
161 if (rv)
162 return rv;
163
164 if (newlp) {
165 *newlp = l;
166 } else {
167 KASSERT((flags & KTHREAD_MUSTJOIN) == 0);
168 }
169
170 return 0;
171 }
172
173 void
174 kthread_exit(int ecode)
175 {
176
177 if ((curlwp->l_pflag & LP_MPSAFE) == 0)
178 KERNEL_UNLOCK_LAST(NULL);
179 rump_lwproc_releaselwp();
180 /* unschedule includes membar */
181 rump_unschedule();
182 rumpuser_thread_exit();
183 }
184
185 int
186 kthread_join(struct lwp *l)
187 {
188 int rv;
189
190 KASSERT(l->l_ctxlink != NULL);
191 rv = rumpuser_thread_join(l->l_ctxlink);
192 membar_consumer();
193
194 return rv;
195 }
196