threads.c revision 1.15.12.3 1 1.15.12.2 tls /* $NetBSD: threads.c,v 1.15.12.3 2014/08/20 00:04:41 tls Exp $ */
2 1.1 pooka
3 1.1 pooka /*
4 1.1 pooka * Copyright (c) 2007-2009 Antti Kantee. All Rights Reserved.
5 1.1 pooka *
6 1.1 pooka * Development of this software was supported by
7 1.1 pooka * The Finnish Cultural Foundation.
8 1.1 pooka *
9 1.1 pooka * Redistribution and use in source and binary forms, with or without
10 1.1 pooka * modification, are permitted provided that the following conditions
11 1.1 pooka * are met:
12 1.1 pooka * 1. Redistributions of source code must retain the above copyright
13 1.1 pooka * notice, this list of conditions and the following disclaimer.
14 1.1 pooka * 2. Redistributions in binary form must reproduce the above copyright
15 1.1 pooka * notice, this list of conditions and the following disclaimer in the
16 1.1 pooka * documentation and/or other materials provided with the distribution.
17 1.1 pooka *
18 1.1 pooka * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
19 1.1 pooka * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 1.1 pooka * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 1.1 pooka * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 1.1 pooka * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 1.1 pooka * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 1.1 pooka * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 1.1 pooka * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 1.1 pooka * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 1.1 pooka * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 1.1 pooka * SUCH DAMAGE.
29 1.1 pooka */
30 1.1 pooka
31 1.1 pooka #include <sys/cdefs.h>
32 1.15.12.2 tls __KERNEL_RCSID(0, "$NetBSD: threads.c,v 1.15.12.3 2014/08/20 00:04:41 tls Exp $");
33 1.1 pooka
34 1.1 pooka #include <sys/param.h>
35 1.10 pooka #include <sys/atomic.h>
36 1.1 pooka #include <sys/kmem.h>
37 1.1 pooka #include <sys/kthread.h>
38 1.11 pooka #include <sys/malloc.h>
39 1.1 pooka #include <sys/systm.h>
40 1.15.12.3 tls #include <sys/queue.h>
41 1.1 pooka
42 1.1 pooka #include <rump/rumpuser.h>
43 1.1 pooka
44 1.1 pooka #include "rump_private.h"
45 1.1 pooka
46 1.15.12.3 tls struct thrdesc {
47 1.1 pooka void (*f)(void *);
48 1.1 pooka void *arg;
49 1.15.12.3 tls struct lwp *newlwp;
50 1.15.12.3 tls int runnable;
51 1.15.12.3 tls
52 1.15.12.3 tls TAILQ_ENTRY(thrdesc) entries;
53 1.1 pooka };
54 1.1 pooka
55 1.15.12.2 tls static bool threads_are_go;
56 1.15.12.2 tls static struct rumpuser_mtx *thrmtx;
57 1.15.12.2 tls static struct rumpuser_cv *thrcv;
58 1.15.12.3 tls static TAILQ_HEAD(, thrdesc) newthr;
59 1.15.12.2 tls
60 1.1 pooka static void *
61 1.1 pooka threadbouncer(void *arg)
62 1.1 pooka {
63 1.15.12.3 tls struct thrdesc *td = arg;
64 1.15.12.3 tls struct lwp *l = td->newlwp;
65 1.1 pooka void (*f)(void *);
66 1.1 pooka void *thrarg;
67 1.1 pooka
68 1.15.12.3 tls f = td->f;
69 1.15.12.3 tls thrarg = td->arg;
70 1.2 pooka
71 1.15.12.2 tls /* don't allow threads to run before all CPUs have fully attached */
72 1.15.12.2 tls if (!threads_are_go) {
73 1.15.12.2 tls rumpuser_mutex_enter_nowrap(thrmtx);
74 1.15.12.2 tls while (!threads_are_go) {
75 1.15.12.2 tls rumpuser_cv_wait_nowrap(thrcv, thrmtx);
76 1.15.12.2 tls }
77 1.15.12.2 tls rumpuser_mutex_exit(thrmtx);
78 1.15.12.2 tls }
79 1.15.12.2 tls
80 1.3 pooka /* schedule ourselves */
81 1.15.12.3 tls rump_lwproc_curlwp_set(l);
82 1.1 pooka rump_schedule();
83 1.1 pooka
84 1.11 pooka /* free dance struct */
85 1.15.12.3 tls kmem_intr_free(td, sizeof(*td));
86 1.11 pooka
87 1.1 pooka if ((curlwp->l_pflag & LP_MPSAFE) == 0)
88 1.1 pooka KERNEL_LOCK(1, NULL);
89 1.1 pooka
90 1.1 pooka f(thrarg);
91 1.1 pooka
92 1.1 pooka panic("unreachable, should kthread_exit()");
93 1.1 pooka }
94 1.1 pooka
95 1.15.12.2 tls void
96 1.15.12.2 tls rump_thread_init(void)
97 1.15.12.2 tls {
98 1.15.12.2 tls
99 1.15.12.2 tls rumpuser_mutex_init(&thrmtx, RUMPUSER_MTX_SPIN);
100 1.15.12.2 tls rumpuser_cv_init(&thrcv);
101 1.15.12.3 tls TAILQ_INIT(&newthr);
102 1.15.12.2 tls }
103 1.15.12.2 tls
104 1.15.12.2 tls void
105 1.15.12.3 tls rump_thread_allow(struct lwp *l)
106 1.15.12.2 tls {
107 1.15.12.3 tls struct thrdesc *td;
108 1.15.12.2 tls
109 1.15.12.2 tls rumpuser_mutex_enter(thrmtx);
110 1.15.12.3 tls if (l == NULL) {
111 1.15.12.3 tls threads_are_go = true;
112 1.15.12.3 tls } else {
113 1.15.12.3 tls TAILQ_FOREACH(td, &newthr, entries) {
114 1.15.12.3 tls if (td->newlwp == l) {
115 1.15.12.3 tls td->runnable = 1;
116 1.15.12.3 tls break;
117 1.15.12.3 tls }
118 1.15.12.3 tls }
119 1.15.12.3 tls }
120 1.15.12.2 tls rumpuser_cv_broadcast(thrcv);
121 1.15.12.2 tls rumpuser_mutex_exit(thrmtx);
122 1.15.12.2 tls }
123 1.15.12.2 tls
124 1.15.12.1 tls static struct {
125 1.15.12.1 tls const char *t_name;
126 1.15.12.1 tls bool t_ncmp;
127 1.15.12.1 tls } nothreads[] = {
128 1.15.12.1 tls { "vrele", false },
129 1.15.12.1 tls { "vdrain", false },
130 1.15.12.1 tls { "cachegc", false },
131 1.15.12.1 tls { "nfssilly", false },
132 1.15.12.1 tls { "unpgc", false },
133 1.15.12.1 tls { "pmf", true },
134 1.15.12.1 tls { "xcall", true },
135 1.15.12.1 tls };
136 1.15.12.1 tls
137 1.1 pooka int
138 1.1 pooka kthread_create(pri_t pri, int flags, struct cpu_info *ci,
139 1.1 pooka void (*func)(void *), void *arg, lwp_t **newlp, const char *fmt, ...)
140 1.1 pooka {
141 1.1 pooka char thrstore[MAXCOMLEN];
142 1.1 pooka const char *thrname = NULL;
143 1.1 pooka va_list ap;
144 1.15.12.3 tls struct thrdesc *td;
145 1.1 pooka struct lwp *l;
146 1.1 pooka int rv;
147 1.1 pooka
148 1.1 pooka thrstore[0] = '\0';
149 1.1 pooka if (fmt) {
150 1.1 pooka va_start(ap, fmt);
151 1.1 pooka vsnprintf(thrstore, sizeof(thrstore), fmt, ap);
152 1.1 pooka va_end(ap);
153 1.1 pooka thrname = thrstore;
154 1.1 pooka }
155 1.1 pooka
156 1.1 pooka /*
157 1.1 pooka * We don't want a module unload thread.
158 1.1 pooka * (XXX: yes, this is a kludge too, and the kernel should
159 1.1 pooka * have a more flexible method for configuring which threads
160 1.1 pooka * we want).
161 1.1 pooka */
162 1.1 pooka if (strcmp(thrstore, "modunload") == 0) {
163 1.1 pooka return 0;
164 1.1 pooka }
165 1.1 pooka
166 1.1 pooka if (!rump_threads) {
167 1.15.12.1 tls bool matched;
168 1.15.12.1 tls int i;
169 1.15.12.1 tls
170 1.15.12.1 tls /* do we want to fake it? */
171 1.15.12.1 tls for (i = 0; i < __arraycount(nothreads); i++) {
172 1.15.12.1 tls if (nothreads[i].t_ncmp) {
173 1.15.12.1 tls matched = strncmp(thrstore, nothreads[i].t_name,
174 1.15.12.1 tls strlen(nothreads[i].t_name)) == 0;
175 1.15.12.1 tls } else {
176 1.15.12.1 tls matched = strcmp(thrstore,
177 1.15.12.1 tls nothreads[i].t_name) == 0;
178 1.15.12.1 tls }
179 1.15.12.1 tls if (matched) {
180 1.15.12.1 tls aprint_error("rump kernel threads not enabled, "
181 1.15.12.1 tls "%s not functional\n", nothreads[i].t_name);
182 1.15.12.1 tls return 0;
183 1.15.12.1 tls }
184 1.15.12.1 tls }
185 1.15.12.1 tls panic("threads not available");
186 1.1 pooka }
187 1.1 pooka KASSERT(fmt != NULL);
188 1.1 pooka
189 1.15.12.3 tls /*
190 1.15.12.3 tls * Allocate with intr-safe allocator, give that we may be
191 1.15.12.3 tls * creating interrupt threads.
192 1.15.12.3 tls */
193 1.15.12.3 tls td = kmem_intr_alloc(sizeof(*td), KM_SLEEP);
194 1.15.12.3 tls td->f = func;
195 1.15.12.3 tls td->arg = arg;
196 1.15.12.3 tls td->newlwp = l = rump__lwproc_alloclwp(&proc0);
197 1.9 pooka l->l_flag |= LW_SYSTEM;
198 1.1 pooka if (flags & KTHREAD_MPSAFE)
199 1.1 pooka l->l_pflag |= LP_MPSAFE;
200 1.2 pooka if (flags & KTHREAD_INTR)
201 1.2 pooka l->l_pflag |= LP_INTR;
202 1.4 pooka if (ci) {
203 1.4 pooka l->l_pflag |= LP_BOUND;
204 1.9 pooka l->l_target_cpu = ci;
205 1.4 pooka }
206 1.8 pooka if (thrname) {
207 1.8 pooka l->l_name = kmem_alloc(MAXCOMLEN, KM_SLEEP);
208 1.8 pooka strlcpy(l->l_name, thrname, MAXCOMLEN);
209 1.8 pooka }
210 1.8 pooka
211 1.15.12.3 tls rv = rumpuser_thread_create(threadbouncer, td, thrname,
212 1.15.12.2 tls (flags & KTHREAD_MUSTJOIN) == KTHREAD_MUSTJOIN,
213 1.15.12.2 tls pri, ci ? ci->ci_index : -1, &l->l_ctxlink);
214 1.1 pooka if (rv)
215 1.15.12.3 tls return rv; /* XXX */
216 1.1 pooka
217 1.10 pooka if (newlp) {
218 1.1 pooka *newlp = l;
219 1.10 pooka } else {
220 1.15 rmind KASSERT((flags & KTHREAD_MUSTJOIN) == 0);
221 1.10 pooka }
222 1.10 pooka
223 1.1 pooka return 0;
224 1.1 pooka }
225 1.1 pooka
226 1.1 pooka void
227 1.1 pooka kthread_exit(int ecode)
228 1.1 pooka {
229 1.1 pooka
230 1.1 pooka if ((curlwp->l_pflag & LP_MPSAFE) == 0)
231 1.6 pooka KERNEL_UNLOCK_LAST(NULL);
232 1.12 pooka rump_lwproc_releaselwp();
233 1.10 pooka /* unschedule includes membar */
234 1.1 pooka rump_unschedule();
235 1.1 pooka rumpuser_thread_exit();
236 1.1 pooka }
237 1.10 pooka
238 1.10 pooka int
239 1.10 pooka kthread_join(struct lwp *l)
240 1.10 pooka {
241 1.10 pooka int rv;
242 1.10 pooka
243 1.10 pooka KASSERT(l->l_ctxlink != NULL);
244 1.10 pooka rv = rumpuser_thread_join(l->l_ctxlink);
245 1.10 pooka membar_consumer();
246 1.10 pooka
247 1.10 pooka return rv;
248 1.10 pooka }
249 1.15.12.3 tls
250 1.15.12.3 tls /*
251 1.15.12.3 tls * Create a non-kernel thread that is scheduled by a rump kernel hypercall.
252 1.15.12.3 tls *
253 1.15.12.3 tls * Sounds strange and out-of-place? yup yup yup. the original motivation
254 1.15.12.3 tls * for this was aio. This is a very infrequent code path in rump kernels.
255 1.15.12.3 tls * XXX: threads created with lwp_create() are eternal for local clients.
256 1.15.12.3 tls * however, they are correctly reaped for remote clients with process exit.
257 1.15.12.3 tls */
258 1.15.12.3 tls static void *
259 1.15.12.3 tls lwpbouncer(void *arg)
260 1.15.12.3 tls {
261 1.15.12.3 tls struct thrdesc *td = arg;
262 1.15.12.3 tls struct lwp *l = td->newlwp;
263 1.15.12.3 tls void (*f)(void *);
264 1.15.12.3 tls void *thrarg;
265 1.15.12.3 tls int run;
266 1.15.12.3 tls
267 1.15.12.3 tls f = td->f;
268 1.15.12.3 tls thrarg = td->arg;
269 1.15.12.3 tls
270 1.15.12.3 tls /* do not run until we've been enqueued */
271 1.15.12.3 tls rumpuser_mutex_enter_nowrap(thrmtx);
272 1.15.12.3 tls while ((run = td->runnable) == 0) {
273 1.15.12.3 tls rumpuser_cv_wait_nowrap(thrcv, thrmtx);
274 1.15.12.3 tls }
275 1.15.12.3 tls rumpuser_mutex_exit(thrmtx);
276 1.15.12.3 tls
277 1.15.12.3 tls /* schedule ourselves */
278 1.15.12.3 tls rump_lwproc_curlwp_set(l);
279 1.15.12.3 tls rump_schedule();
280 1.15.12.3 tls kmem_free(td, sizeof(*td));
281 1.15.12.3 tls
282 1.15.12.3 tls /* should we just die instead? */
283 1.15.12.3 tls if (run == -1) {
284 1.15.12.3 tls rump_lwproc_releaselwp();
285 1.15.12.3 tls lwp_userret(l);
286 1.15.12.3 tls panic("lwpbouncer reached unreachable");
287 1.15.12.3 tls }
288 1.15.12.3 tls
289 1.15.12.3 tls /* run, and don't come back! */
290 1.15.12.3 tls f(thrarg);
291 1.15.12.3 tls panic("lwp return from worker not supported");
292 1.15.12.3 tls }
293 1.15.12.3 tls
294 1.15.12.3 tls int
295 1.15.12.3 tls lwp_create(struct lwp *l1, struct proc *p2, vaddr_t uaddr, int flags,
296 1.15.12.3 tls void *stack, size_t stacksize, void (*func)(void *), void *arg,
297 1.15.12.3 tls struct lwp **newlwpp, int sclass)
298 1.15.12.3 tls {
299 1.15.12.3 tls struct thrdesc *td;
300 1.15.12.3 tls struct lwp *l;
301 1.15.12.3 tls int rv;
302 1.15.12.3 tls
303 1.15.12.3 tls if (flags)
304 1.15.12.3 tls panic("lwp_create: flags not supported by this implementation");
305 1.15.12.3 tls td = kmem_alloc(sizeof(*td), KM_SLEEP);
306 1.15.12.3 tls td->f = func;
307 1.15.12.3 tls td->arg = arg;
308 1.15.12.3 tls td->runnable = 0;
309 1.15.12.3 tls td->newlwp = l = rump__lwproc_alloclwp(p2);
310 1.15.12.3 tls
311 1.15.12.3 tls rumpuser_mutex_enter_nowrap(thrmtx);
312 1.15.12.3 tls TAILQ_INSERT_TAIL(&newthr, td, entries);
313 1.15.12.3 tls rumpuser_mutex_exit(thrmtx);
314 1.15.12.3 tls
315 1.15.12.3 tls rv = rumpuser_thread_create(lwpbouncer, td, p2->p_comm, 0,
316 1.15.12.3 tls PRI_USER, -1, NULL);
317 1.15.12.3 tls if (rv)
318 1.15.12.3 tls panic("rumpuser_thread_create failed"); /* XXX */
319 1.15.12.3 tls
320 1.15.12.3 tls *newlwpp = l;
321 1.15.12.3 tls return 0;
322 1.15.12.3 tls }
323 1.15.12.3 tls
324 1.15.12.3 tls void
325 1.15.12.3 tls lwp_exit(struct lwp *l)
326 1.15.12.3 tls {
327 1.15.12.3 tls struct thrdesc *td;
328 1.15.12.3 tls
329 1.15.12.3 tls rumpuser_mutex_enter_nowrap(thrmtx);
330 1.15.12.3 tls TAILQ_FOREACH(td, &newthr, entries) {
331 1.15.12.3 tls if (td->newlwp == l) {
332 1.15.12.3 tls td->runnable = -1;
333 1.15.12.3 tls break;
334 1.15.12.3 tls }
335 1.15.12.3 tls }
336 1.15.12.3 tls rumpuser_mutex_exit(thrmtx);
337 1.15.12.3 tls
338 1.15.12.3 tls if (td == NULL)
339 1.15.12.3 tls panic("lwp_exit: could not find %p\n", l);
340 1.15.12.3 tls }
341 1.15.12.3 tls
342 1.15.12.3 tls void
343 1.15.12.3 tls lwp_userret(struct lwp *l)
344 1.15.12.3 tls {
345 1.15.12.3 tls
346 1.15.12.3 tls if ((l->l_flag & LW_RUMP_QEXIT) == 0)
347 1.15.12.3 tls return;
348 1.15.12.3 tls
349 1.15.12.3 tls /* ok, so we should die */
350 1.15.12.3 tls rump_unschedule();
351 1.15.12.3 tls rumpuser_thread_exit();
352 1.15.12.3 tls }
353