threads.c revision 1.15.12.4 1 /* $NetBSD: threads.c,v 1.15.12.4 2017/12/03 11:39:16 jdolecek Exp $ */
2
3 /*
4 * Copyright (c) 2007-2009 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by
7 * The Finnish Cultural Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
19 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: threads.c,v 1.15.12.4 2017/12/03 11:39:16 jdolecek Exp $");
33
34 #include <sys/param.h>
35 #include <sys/atomic.h>
36 #include <sys/kmem.h>
37 #include <sys/kthread.h>
38 #include <sys/malloc.h>
39 #include <sys/systm.h>
40 #include <sys/queue.h>
41
42 #include <rump-sys/kern.h>
43
44 #include <rump/rumpuser.h>
45
46 struct thrdesc {
47 void (*f)(void *);
48 void *arg;
49 struct lwp *newlwp;
50 int runnable;
51
52 TAILQ_ENTRY(thrdesc) entries;
53 };
54
55 static bool threads_are_go;
56 static struct rumpuser_mtx *thrmtx;
57 static struct rumpuser_cv *thrcv;
58 static TAILQ_HEAD(, thrdesc) newthr;
59
60 static void *
61 threadbouncer(void *arg)
62 {
63 struct thrdesc *td = arg;
64 struct lwp *l = td->newlwp;
65 void (*f)(void *);
66 void *thrarg;
67
68 f = td->f;
69 thrarg = td->arg;
70
71 /* don't allow threads to run before all CPUs have fully attached */
72 if (!threads_are_go) {
73 rumpuser_mutex_enter_nowrap(thrmtx);
74 while (!threads_are_go) {
75 rumpuser_cv_wait_nowrap(thrcv, thrmtx);
76 }
77 rumpuser_mutex_exit(thrmtx);
78 }
79
80 /* schedule ourselves */
81 rump_lwproc_curlwp_set(l);
82 rump_schedule();
83
84 /* free dance struct */
85 kmem_intr_free(td, sizeof(*td));
86
87 if ((curlwp->l_pflag & LP_MPSAFE) == 0)
88 KERNEL_LOCK(1, NULL);
89
90 f(thrarg);
91
92 panic("unreachable, should kthread_exit()");
93 }
94
95 void
96 rump_thread_init(void)
97 {
98
99 rumpuser_mutex_init(&thrmtx, RUMPUSER_MTX_SPIN);
100 rumpuser_cv_init(&thrcv);
101 TAILQ_INIT(&newthr);
102 }
103
104 void
105 rump_thread_allow(struct lwp *l)
106 {
107 struct thrdesc *td;
108
109 rumpuser_mutex_enter(thrmtx);
110 if (l == NULL) {
111 threads_are_go = true;
112 } else {
113 TAILQ_FOREACH(td, &newthr, entries) {
114 if (td->newlwp == l) {
115 td->runnable = 1;
116 break;
117 }
118 }
119 }
120 rumpuser_cv_broadcast(thrcv);
121 rumpuser_mutex_exit(thrmtx);
122 }
123
124 static struct {
125 const char *t_name;
126 bool t_ncmp;
127 } nothreads[] = {
128 { "vrele", false },
129 { "vdrain", false },
130 { "cachegc", false },
131 { "nfssilly", false },
132 { "unpgc", false },
133 { "pmf", true },
134 { "xcall", true },
135 };
136
137 int
138 kthread_create(pri_t pri, int flags, struct cpu_info *ci,
139 void (*func)(void *), void *arg, lwp_t **newlp, const char *fmt, ...)
140 {
141 char thrstore[MAXCOMLEN];
142 const char *thrname = NULL;
143 va_list ap;
144 struct thrdesc *td;
145 struct lwp *l;
146 int rv;
147
148 thrstore[0] = '\0';
149 if (fmt) {
150 va_start(ap, fmt);
151 vsnprintf(thrstore, sizeof(thrstore), fmt, ap);
152 va_end(ap);
153 thrname = thrstore;
154 }
155
156 /*
157 * We don't want a module unload thread.
158 * (XXX: yes, this is a kludge too, and the kernel should
159 * have a more flexible method for configuring which threads
160 * we want).
161 */
162 if (strcmp(thrstore, "modunload") == 0) {
163 return 0;
164 }
165
166 if (!rump_threads) {
167 bool matched;
168 int i;
169
170 /* do we want to fake it? */
171 for (i = 0; i < __arraycount(nothreads); i++) {
172 if (nothreads[i].t_ncmp) {
173 matched = strncmp(thrstore, nothreads[i].t_name,
174 strlen(nothreads[i].t_name)) == 0;
175 } else {
176 matched = strcmp(thrstore,
177 nothreads[i].t_name) == 0;
178 }
179 if (matched) {
180 aprint_error("rump kernel threads not enabled, "
181 "%s not functional\n", nothreads[i].t_name);
182 return 0;
183 }
184 }
185 panic("threads not available");
186 }
187 KASSERT(fmt != NULL);
188
189 /*
190 * Allocate with intr-safe allocator, give that we may be
191 * creating interrupt threads.
192 */
193 td = kmem_intr_alloc(sizeof(*td), KM_SLEEP);
194 td->f = func;
195 td->arg = arg;
196 td->newlwp = l = rump__lwproc_alloclwp(&proc0);
197 l->l_flag |= LW_SYSTEM;
198 if (flags & KTHREAD_MPSAFE)
199 l->l_pflag |= LP_MPSAFE;
200 if (flags & KTHREAD_INTR)
201 l->l_pflag |= LP_INTR;
202 if (ci) {
203 l->l_pflag |= LP_BOUND;
204 l->l_target_cpu = ci;
205 }
206 if (thrname) {
207 l->l_name = kmem_alloc(MAXCOMLEN, KM_SLEEP);
208 strlcpy(l->l_name, thrname, MAXCOMLEN);
209 }
210
211 rv = rumpuser_thread_create(threadbouncer, td, thrname,
212 (flags & KTHREAD_MUSTJOIN) == KTHREAD_MUSTJOIN,
213 pri, ci ? ci->ci_index : -1, &l->l_ctxlink);
214 if (rv)
215 return rv; /* XXX */
216
217 if (newlp) {
218 *newlp = l;
219 } else {
220 KASSERT((flags & KTHREAD_MUSTJOIN) == 0);
221 }
222
223 return 0;
224 }
225
226 void
227 kthread_exit(int ecode)
228 {
229
230 if ((curlwp->l_pflag & LP_MPSAFE) == 0)
231 KERNEL_UNLOCK_LAST(NULL);
232 rump_lwproc_releaselwp();
233 /* unschedule includes membar */
234 rump_unschedule();
235 rumpuser_thread_exit();
236 }
237
238 int
239 kthread_join(struct lwp *l)
240 {
241 int rv;
242
243 KASSERT(l->l_ctxlink != NULL);
244 rv = rumpuser_thread_join(l->l_ctxlink);
245 membar_consumer();
246
247 return rv;
248 }
249
250 /*
251 * Create a non-kernel thread that is scheduled by a rump kernel hypercall.
252 *
253 * Sounds strange and out-of-place? yup yup yup. the original motivation
254 * for this was aio. This is a very infrequent code path in rump kernels.
255 * XXX: threads created with lwp_create() are eternal for local clients.
256 * however, they are correctly reaped for remote clients with process exit.
257 */
258 static void *
259 lwpbouncer(void *arg)
260 {
261 struct thrdesc *td = arg;
262 struct lwp *l = td->newlwp;
263 void (*f)(void *);
264 void *thrarg;
265 int run;
266
267 f = td->f;
268 thrarg = td->arg;
269
270 /* do not run until we've been enqueued */
271 rumpuser_mutex_enter_nowrap(thrmtx);
272 while ((run = td->runnable) == 0) {
273 rumpuser_cv_wait_nowrap(thrcv, thrmtx);
274 }
275 rumpuser_mutex_exit(thrmtx);
276
277 /* schedule ourselves */
278 rump_lwproc_curlwp_set(l);
279 rump_schedule();
280 kmem_free(td, sizeof(*td));
281
282 /* should we just die instead? */
283 if (run == -1) {
284 rump_lwproc_releaselwp();
285 lwp_userret(l);
286 panic("lwpbouncer reached unreachable");
287 }
288
289 /* run, and don't come back! */
290 f(thrarg);
291 panic("lwp return from worker not supported");
292 }
293
294 int
295 lwp_create(struct lwp *l1, struct proc *p2, vaddr_t uaddr, int flags,
296 void *stack, size_t stacksize, void (*func)(void *), void *arg,
297 struct lwp **newlwpp, int sclass, const sigset_t *sigmask,
298 const stack_t *sigstk)
299 {
300 struct thrdesc *td;
301 struct lwp *l;
302 int rv;
303
304 if (flags)
305 panic("lwp_create: flags not supported by this implementation");
306 td = kmem_alloc(sizeof(*td), KM_SLEEP);
307 td->f = func;
308 td->arg = arg;
309 td->runnable = 0;
310 td->newlwp = l = rump__lwproc_alloclwp(p2);
311
312 rumpuser_mutex_enter_nowrap(thrmtx);
313 TAILQ_INSERT_TAIL(&newthr, td, entries);
314 rumpuser_mutex_exit(thrmtx);
315
316 rv = rumpuser_thread_create(lwpbouncer, td, p2->p_comm, 0,
317 PRI_USER, -1, NULL);
318 if (rv)
319 panic("rumpuser_thread_create failed"); /* XXX */
320
321 *newlwpp = l;
322 return 0;
323 }
324
325 void
326 lwp_exit(struct lwp *l)
327 {
328 struct thrdesc *td;
329
330 rumpuser_mutex_enter_nowrap(thrmtx);
331 TAILQ_FOREACH(td, &newthr, entries) {
332 if (td->newlwp == l) {
333 td->runnable = -1;
334 break;
335 }
336 }
337 rumpuser_mutex_exit(thrmtx);
338
339 if (td == NULL)
340 panic("lwp_exit: could not find %p\n", l);
341 }
342
343 void
344 lwp_userret(struct lwp *l)
345 {
346
347 if ((l->l_flag & LW_RUMP_QEXIT) == 0)
348 return;
349
350 /* ok, so we should die */
351 rump_unschedule();
352 rumpuser_thread_exit();
353 }
354