threads.c revision 1.22.2.1 1 1.22.2.1 tls /* $NetBSD: threads.c,v 1.22.2.1 2014/08/10 06:56:51 tls Exp $ */
2 1.1 pooka
3 1.1 pooka /*
4 1.1 pooka * Copyright (c) 2007-2009 Antti Kantee. All Rights Reserved.
5 1.1 pooka *
6 1.1 pooka * Development of this software was supported by
7 1.1 pooka * The Finnish Cultural Foundation.
8 1.1 pooka *
9 1.1 pooka * Redistribution and use in source and binary forms, with or without
10 1.1 pooka * modification, are permitted provided that the following conditions
11 1.1 pooka * are met:
12 1.1 pooka * 1. Redistributions of source code must retain the above copyright
13 1.1 pooka * notice, this list of conditions and the following disclaimer.
14 1.1 pooka * 2. Redistributions in binary form must reproduce the above copyright
15 1.1 pooka * notice, this list of conditions and the following disclaimer in the
16 1.1 pooka * documentation and/or other materials provided with the distribution.
17 1.1 pooka *
18 1.1 pooka * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
19 1.1 pooka * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 1.1 pooka * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 1.1 pooka * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 1.1 pooka * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 1.1 pooka * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 1.1 pooka * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 1.1 pooka * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 1.1 pooka * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 1.1 pooka * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 1.1 pooka * SUCH DAMAGE.
29 1.1 pooka */
30 1.1 pooka
31 1.1 pooka #include <sys/cdefs.h>
32 1.22.2.1 tls __KERNEL_RCSID(0, "$NetBSD: threads.c,v 1.22.2.1 2014/08/10 06:56:51 tls Exp $");
33 1.1 pooka
34 1.1 pooka #include <sys/param.h>
35 1.10 pooka #include <sys/atomic.h>
36 1.1 pooka #include <sys/kmem.h>
37 1.1 pooka #include <sys/kthread.h>
38 1.11 pooka #include <sys/malloc.h>
39 1.1 pooka #include <sys/systm.h>
40 1.22.2.1 tls #include <sys/queue.h>
41 1.1 pooka
42 1.1 pooka #include <rump/rumpuser.h>
43 1.1 pooka
44 1.1 pooka #include "rump_private.h"
45 1.1 pooka
46 1.22.2.1 tls struct thrdesc {
47 1.1 pooka void (*f)(void *);
48 1.1 pooka void *arg;
49 1.22.2.1 tls struct lwp *newlwp;
50 1.22.2.1 tls int runnable;
51 1.22.2.1 tls
52 1.22.2.1 tls TAILQ_ENTRY(thrdesc) entries;
53 1.1 pooka };
54 1.1 pooka
55 1.18 pooka static bool threads_are_go;
56 1.18 pooka static struct rumpuser_mtx *thrmtx;
57 1.18 pooka static struct rumpuser_cv *thrcv;
58 1.22.2.1 tls static TAILQ_HEAD(, thrdesc) newthr;
59 1.18 pooka
60 1.1 pooka static void *
61 1.1 pooka threadbouncer(void *arg)
62 1.1 pooka {
63 1.22.2.1 tls struct thrdesc *td = arg;
64 1.22.2.1 tls struct lwp *l = td->newlwp;
65 1.1 pooka void (*f)(void *);
66 1.1 pooka void *thrarg;
67 1.1 pooka
68 1.22.2.1 tls f = td->f;
69 1.22.2.1 tls thrarg = td->arg;
70 1.2 pooka
71 1.18 pooka /* don't allow threads to run before all CPUs have fully attached */
72 1.18 pooka if (!threads_are_go) {
73 1.18 pooka rumpuser_mutex_enter_nowrap(thrmtx);
74 1.18 pooka while (!threads_are_go) {
75 1.18 pooka rumpuser_cv_wait_nowrap(thrcv, thrmtx);
76 1.18 pooka }
77 1.18 pooka rumpuser_mutex_exit(thrmtx);
78 1.18 pooka }
79 1.18 pooka
80 1.3 pooka /* schedule ourselves */
81 1.22 pooka rump_lwproc_curlwp_set(l);
82 1.1 pooka rump_schedule();
83 1.1 pooka
84 1.11 pooka /* free dance struct */
85 1.22.2.1 tls kmem_intr_free(td, sizeof(*td));
86 1.11 pooka
87 1.1 pooka if ((curlwp->l_pflag & LP_MPSAFE) == 0)
88 1.1 pooka KERNEL_LOCK(1, NULL);
89 1.1 pooka
90 1.1 pooka f(thrarg);
91 1.1 pooka
92 1.1 pooka panic("unreachable, should kthread_exit()");
93 1.1 pooka }
94 1.1 pooka
95 1.18 pooka void
96 1.18 pooka rump_thread_init(void)
97 1.18 pooka {
98 1.18 pooka
99 1.19 pooka rumpuser_mutex_init(&thrmtx, RUMPUSER_MTX_SPIN);
100 1.18 pooka rumpuser_cv_init(&thrcv);
101 1.22.2.1 tls TAILQ_INIT(&newthr);
102 1.18 pooka }
103 1.18 pooka
104 1.18 pooka void
105 1.22.2.1 tls rump_thread_allow(struct lwp *l)
106 1.18 pooka {
107 1.22.2.1 tls struct thrdesc *td;
108 1.18 pooka
109 1.18 pooka rumpuser_mutex_enter(thrmtx);
110 1.22.2.1 tls if (l == NULL) {
111 1.22.2.1 tls threads_are_go = true;
112 1.22.2.1 tls } else {
113 1.22.2.1 tls TAILQ_FOREACH(td, &newthr, entries) {
114 1.22.2.1 tls if (td->newlwp == l) {
115 1.22.2.1 tls td->runnable = 1;
116 1.22.2.1 tls break;
117 1.22.2.1 tls }
118 1.22.2.1 tls }
119 1.22.2.1 tls }
120 1.18 pooka rumpuser_cv_broadcast(thrcv);
121 1.18 pooka rumpuser_mutex_exit(thrmtx);
122 1.18 pooka }
123 1.18 pooka
124 1.16 pooka static struct {
125 1.16 pooka const char *t_name;
126 1.16 pooka bool t_ncmp;
127 1.16 pooka } nothreads[] = {
128 1.16 pooka { "vrele", false },
129 1.17 pooka { "vdrain", false },
130 1.16 pooka { "cachegc", false },
131 1.16 pooka { "nfssilly", false },
132 1.16 pooka { "unpgc", false },
133 1.16 pooka { "pmf", true },
134 1.16 pooka { "xcall", true },
135 1.16 pooka };
136 1.16 pooka
137 1.1 pooka int
138 1.1 pooka kthread_create(pri_t pri, int flags, struct cpu_info *ci,
139 1.1 pooka void (*func)(void *), void *arg, lwp_t **newlp, const char *fmt, ...)
140 1.1 pooka {
141 1.1 pooka char thrstore[MAXCOMLEN];
142 1.1 pooka const char *thrname = NULL;
143 1.1 pooka va_list ap;
144 1.22.2.1 tls struct thrdesc *td;
145 1.1 pooka struct lwp *l;
146 1.1 pooka int rv;
147 1.1 pooka
148 1.1 pooka thrstore[0] = '\0';
149 1.1 pooka if (fmt) {
150 1.1 pooka va_start(ap, fmt);
151 1.1 pooka vsnprintf(thrstore, sizeof(thrstore), fmt, ap);
152 1.1 pooka va_end(ap);
153 1.1 pooka thrname = thrstore;
154 1.1 pooka }
155 1.1 pooka
156 1.1 pooka /*
157 1.1 pooka * We don't want a module unload thread.
158 1.1 pooka * (XXX: yes, this is a kludge too, and the kernel should
159 1.1 pooka * have a more flexible method for configuring which threads
160 1.1 pooka * we want).
161 1.1 pooka */
162 1.1 pooka if (strcmp(thrstore, "modunload") == 0) {
163 1.1 pooka return 0;
164 1.1 pooka }
165 1.1 pooka
166 1.1 pooka if (!rump_threads) {
167 1.16 pooka bool matched;
168 1.16 pooka int i;
169 1.16 pooka
170 1.16 pooka /* do we want to fake it? */
171 1.16 pooka for (i = 0; i < __arraycount(nothreads); i++) {
172 1.16 pooka if (nothreads[i].t_ncmp) {
173 1.16 pooka matched = strncmp(thrstore, nothreads[i].t_name,
174 1.16 pooka strlen(nothreads[i].t_name)) == 0;
175 1.16 pooka } else {
176 1.16 pooka matched = strcmp(thrstore,
177 1.16 pooka nothreads[i].t_name) == 0;
178 1.16 pooka }
179 1.16 pooka if (matched) {
180 1.16 pooka aprint_error("rump kernel threads not enabled, "
181 1.16 pooka "%s not functional\n", nothreads[i].t_name);
182 1.16 pooka return 0;
183 1.16 pooka }
184 1.16 pooka }
185 1.16 pooka panic("threads not available");
186 1.1 pooka }
187 1.1 pooka KASSERT(fmt != NULL);
188 1.1 pooka
189 1.22.2.1 tls /*
190 1.22.2.1 tls * Allocate with intr-safe allocator, give that we may be
191 1.22.2.1 tls * creating interrupt threads.
192 1.22.2.1 tls */
193 1.22.2.1 tls td = kmem_intr_alloc(sizeof(*td), KM_SLEEP);
194 1.22.2.1 tls td->f = func;
195 1.22.2.1 tls td->arg = arg;
196 1.22.2.1 tls td->newlwp = l = rump__lwproc_alloclwp(&proc0);
197 1.9 pooka l->l_flag |= LW_SYSTEM;
198 1.1 pooka if (flags & KTHREAD_MPSAFE)
199 1.1 pooka l->l_pflag |= LP_MPSAFE;
200 1.2 pooka if (flags & KTHREAD_INTR)
201 1.2 pooka l->l_pflag |= LP_INTR;
202 1.4 pooka if (ci) {
203 1.4 pooka l->l_pflag |= LP_BOUND;
204 1.9 pooka l->l_target_cpu = ci;
205 1.4 pooka }
206 1.8 pooka if (thrname) {
207 1.8 pooka l->l_name = kmem_alloc(MAXCOMLEN, KM_SLEEP);
208 1.8 pooka strlcpy(l->l_name, thrname, MAXCOMLEN);
209 1.8 pooka }
210 1.8 pooka
211 1.22.2.1 tls rv = rumpuser_thread_create(threadbouncer, td, thrname,
212 1.20 pooka (flags & KTHREAD_MUSTJOIN) == KTHREAD_MUSTJOIN,
213 1.20 pooka pri, ci ? ci->ci_index : -1, &l->l_ctxlink);
214 1.1 pooka if (rv)
215 1.22.2.1 tls return rv; /* XXX */
216 1.1 pooka
217 1.10 pooka if (newlp) {
218 1.1 pooka *newlp = l;
219 1.10 pooka } else {
220 1.15 rmind KASSERT((flags & KTHREAD_MUSTJOIN) == 0);
221 1.10 pooka }
222 1.10 pooka
223 1.1 pooka return 0;
224 1.1 pooka }
225 1.1 pooka
226 1.1 pooka void
227 1.1 pooka kthread_exit(int ecode)
228 1.1 pooka {
229 1.1 pooka
230 1.1 pooka if ((curlwp->l_pflag & LP_MPSAFE) == 0)
231 1.6 pooka KERNEL_UNLOCK_LAST(NULL);
232 1.12 pooka rump_lwproc_releaselwp();
233 1.10 pooka /* unschedule includes membar */
234 1.1 pooka rump_unschedule();
235 1.1 pooka rumpuser_thread_exit();
236 1.1 pooka }
237 1.10 pooka
238 1.10 pooka int
239 1.10 pooka kthread_join(struct lwp *l)
240 1.10 pooka {
241 1.10 pooka int rv;
242 1.10 pooka
243 1.10 pooka KASSERT(l->l_ctxlink != NULL);
244 1.10 pooka rv = rumpuser_thread_join(l->l_ctxlink);
245 1.10 pooka membar_consumer();
246 1.10 pooka
247 1.10 pooka return rv;
248 1.10 pooka }
249 1.22.2.1 tls
250 1.22.2.1 tls /*
251 1.22.2.1 tls * Create a non-kernel thread that is scheduled by a rump kernel hypercall.
252 1.22.2.1 tls *
253 1.22.2.1 tls * Sounds strange and out-of-place? yup yup yup. the original motivation
254 1.22.2.1 tls * for this was aio. This is a very infrequent code path in rump kernels.
255 1.22.2.1 tls * XXX: threads created with lwp_create() are eternal for local clients.
256 1.22.2.1 tls * however, they are correctly reaped for remote clients with process exit.
257 1.22.2.1 tls */
258 1.22.2.1 tls static void *
259 1.22.2.1 tls lwpbouncer(void *arg)
260 1.22.2.1 tls {
261 1.22.2.1 tls struct thrdesc *td = arg;
262 1.22.2.1 tls struct lwp *l = td->newlwp;
263 1.22.2.1 tls void (*f)(void *);
264 1.22.2.1 tls void *thrarg;
265 1.22.2.1 tls int run;
266 1.22.2.1 tls
267 1.22.2.1 tls f = td->f;
268 1.22.2.1 tls thrarg = td->arg;
269 1.22.2.1 tls
270 1.22.2.1 tls /* do not run until we've been enqueued */
271 1.22.2.1 tls rumpuser_mutex_enter_nowrap(thrmtx);
272 1.22.2.1 tls while ((run = td->runnable) == 0) {
273 1.22.2.1 tls rumpuser_cv_wait_nowrap(thrcv, thrmtx);
274 1.22.2.1 tls }
275 1.22.2.1 tls rumpuser_mutex_exit(thrmtx);
276 1.22.2.1 tls
277 1.22.2.1 tls /* schedule ourselves */
278 1.22.2.1 tls rump_lwproc_curlwp_set(l);
279 1.22.2.1 tls rump_schedule();
280 1.22.2.1 tls kmem_free(td, sizeof(*td));
281 1.22.2.1 tls
282 1.22.2.1 tls /* should we just die instead? */
283 1.22.2.1 tls if (run == -1) {
284 1.22.2.1 tls rump_lwproc_releaselwp();
285 1.22.2.1 tls lwp_userret(l);
286 1.22.2.1 tls panic("lwpbouncer reached unreachable");
287 1.22.2.1 tls }
288 1.22.2.1 tls
289 1.22.2.1 tls /* run, and don't come back! */
290 1.22.2.1 tls f(thrarg);
291 1.22.2.1 tls panic("lwp return from worker not supported");
292 1.22.2.1 tls }
293 1.22.2.1 tls
294 1.22.2.1 tls int
295 1.22.2.1 tls lwp_create(struct lwp *l1, struct proc *p2, vaddr_t uaddr, int flags,
296 1.22.2.1 tls void *stack, size_t stacksize, void (*func)(void *), void *arg,
297 1.22.2.1 tls struct lwp **newlwpp, int sclass)
298 1.22.2.1 tls {
299 1.22.2.1 tls struct thrdesc *td;
300 1.22.2.1 tls struct lwp *l;
301 1.22.2.1 tls int rv;
302 1.22.2.1 tls
303 1.22.2.1 tls if (flags)
304 1.22.2.1 tls panic("lwp_create: flags not supported by this implementation");
305 1.22.2.1 tls td = kmem_alloc(sizeof(*td), KM_SLEEP);
306 1.22.2.1 tls td->f = func;
307 1.22.2.1 tls td->arg = arg;
308 1.22.2.1 tls td->runnable = 0;
309 1.22.2.1 tls td->newlwp = l = rump__lwproc_alloclwp(p2);
310 1.22.2.1 tls
311 1.22.2.1 tls rumpuser_mutex_enter_nowrap(thrmtx);
312 1.22.2.1 tls TAILQ_INSERT_TAIL(&newthr, td, entries);
313 1.22.2.1 tls rumpuser_mutex_exit(thrmtx);
314 1.22.2.1 tls
315 1.22.2.1 tls rv = rumpuser_thread_create(lwpbouncer, td, p2->p_comm, 0,
316 1.22.2.1 tls PRI_USER, -1, NULL);
317 1.22.2.1 tls if (rv)
318 1.22.2.1 tls panic("rumpuser_thread_create failed"); /* XXX */
319 1.22.2.1 tls
320 1.22.2.1 tls *newlwpp = l;
321 1.22.2.1 tls return 0;
322 1.22.2.1 tls }
323 1.22.2.1 tls
324 1.22.2.1 tls void
325 1.22.2.1 tls lwp_exit(struct lwp *l)
326 1.22.2.1 tls {
327 1.22.2.1 tls struct thrdesc *td;
328 1.22.2.1 tls
329 1.22.2.1 tls rumpuser_mutex_enter_nowrap(thrmtx);
330 1.22.2.1 tls TAILQ_FOREACH(td, &newthr, entries) {
331 1.22.2.1 tls if (td->newlwp == l) {
332 1.22.2.1 tls td->runnable = -1;
333 1.22.2.1 tls break;
334 1.22.2.1 tls }
335 1.22.2.1 tls }
336 1.22.2.1 tls rumpuser_mutex_exit(thrmtx);
337 1.22.2.1 tls
338 1.22.2.1 tls if (td == NULL)
339 1.22.2.1 tls panic("lwp_exit: could not find %p\n", l);
340 1.22.2.1 tls }
341 1.22.2.1 tls
342 1.22.2.1 tls void
343 1.22.2.1 tls lwp_userret(struct lwp *l)
344 1.22.2.1 tls {
345 1.22.2.1 tls
346 1.22.2.1 tls if ((l->l_flag & LW_RUMP_QEXIT) == 0)
347 1.22.2.1 tls return;
348 1.22.2.1 tls
349 1.22.2.1 tls /* ok, so we should die */
350 1.22.2.1 tls rump_unschedule();
351 1.22.2.1 tls rumpuser_thread_exit();
352 1.22.2.1 tls }
353