threads.c revision 1.17 1 /* $NetBSD: threads.c,v 1.17 2012/11/04 14:40:47 pooka Exp $ */
2
3 /*
4 * Copyright (c) 2007-2009 Antti Kantee. All Rights Reserved.
5 *
6 * Development of this software was supported by
7 * The Finnish Cultural Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
19 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: threads.c,v 1.17 2012/11/04 14:40:47 pooka Exp $");
33
34 #include <sys/param.h>
35 #include <sys/atomic.h>
36 #include <sys/kmem.h>
37 #include <sys/kthread.h>
38 #include <sys/malloc.h>
39 #include <sys/systm.h>
40
41 #include <rump/rumpuser.h>
42
43 #include "rump_private.h"
44
45 struct kthdesc {
46 void (*f)(void *);
47 void *arg;
48 struct lwp *mylwp;
49 };
50
51 static void *
52 threadbouncer(void *arg)
53 {
54 struct kthdesc *k = arg;
55 struct lwp *l = k->mylwp;
56 void (*f)(void *);
57 void *thrarg;
58
59 f = k->f;
60 thrarg = k->arg;
61
62 /* schedule ourselves */
63 rumpuser_set_curlwp(l);
64 rump_schedule();
65
66 /* free dance struct */
67 free(k, M_TEMP);
68
69 if ((curlwp->l_pflag & LP_MPSAFE) == 0)
70 KERNEL_LOCK(1, NULL);
71
72 f(thrarg);
73
74 panic("unreachable, should kthread_exit()");
75 }
76
77 static struct {
78 const char *t_name;
79 bool t_ncmp;
80 } nothreads[] = {
81 { "vrele", false },
82 { "vdrain", false },
83 { "cachegc", false },
84 { "nfssilly", false },
85 { "unpgc", false },
86 { "pmf", true },
87 { "xcall", true },
88 };
89
90 int
91 kthread_create(pri_t pri, int flags, struct cpu_info *ci,
92 void (*func)(void *), void *arg, lwp_t **newlp, const char *fmt, ...)
93 {
94 char thrstore[MAXCOMLEN];
95 const char *thrname = NULL;
96 va_list ap;
97 struct kthdesc *k;
98 struct lwp *l;
99 int rv;
100
101 thrstore[0] = '\0';
102 if (fmt) {
103 va_start(ap, fmt);
104 vsnprintf(thrstore, sizeof(thrstore), fmt, ap);
105 va_end(ap);
106 thrname = thrstore;
107 }
108
109 /*
110 * We don't want a module unload thread.
111 * (XXX: yes, this is a kludge too, and the kernel should
112 * have a more flexible method for configuring which threads
113 * we want).
114 */
115 if (strcmp(thrstore, "modunload") == 0) {
116 return 0;
117 }
118
119 if (!rump_threads) {
120 bool matched;
121 int i;
122
123 /* do we want to fake it? */
124 for (i = 0; i < __arraycount(nothreads); i++) {
125 if (nothreads[i].t_ncmp) {
126 matched = strncmp(thrstore, nothreads[i].t_name,
127 strlen(nothreads[i].t_name)) == 0;
128 } else {
129 matched = strcmp(thrstore,
130 nothreads[i].t_name) == 0;
131 }
132 if (matched) {
133 aprint_error("rump kernel threads not enabled, "
134 "%s not functional\n", nothreads[i].t_name);
135 return 0;
136 }
137 }
138 panic("threads not available");
139 }
140 KASSERT(fmt != NULL);
141
142 k = malloc(sizeof(*k), M_TEMP, M_WAITOK);
143 k->f = func;
144 k->arg = arg;
145 k->mylwp = l = rump__lwproc_alloclwp(&proc0);
146 l->l_flag |= LW_SYSTEM;
147 if (flags & KTHREAD_MPSAFE)
148 l->l_pflag |= LP_MPSAFE;
149 if (flags & KTHREAD_INTR)
150 l->l_pflag |= LP_INTR;
151 if (ci) {
152 l->l_pflag |= LP_BOUND;
153 l->l_target_cpu = ci;
154 }
155 if (thrname) {
156 l->l_name = kmem_alloc(MAXCOMLEN, KM_SLEEP);
157 strlcpy(l->l_name, thrname, MAXCOMLEN);
158 }
159
160 rv = rumpuser_thread_create(threadbouncer, k, thrname,
161 (flags & KTHREAD_MUSTJOIN) == KTHREAD_MUSTJOIN, &l->l_ctxlink);
162 if (rv)
163 return rv;
164
165 if (newlp) {
166 *newlp = l;
167 } else {
168 KASSERT((flags & KTHREAD_MUSTJOIN) == 0);
169 }
170
171 return 0;
172 }
173
174 void
175 kthread_exit(int ecode)
176 {
177
178 if ((curlwp->l_pflag & LP_MPSAFE) == 0)
179 KERNEL_UNLOCK_LAST(NULL);
180 rump_lwproc_releaselwp();
181 /* unschedule includes membar */
182 rump_unschedule();
183 rumpuser_thread_exit();
184 }
185
186 int
187 kthread_join(struct lwp *l)
188 {
189 int rv;
190
191 KASSERT(l->l_ctxlink != NULL);
192 rv = rumpuser_thread_join(l->l_ctxlink);
193 membar_consumer();
194
195 return rv;
196 }
197