kern_kthread.c revision 1.49 1 1.49 ad /* $NetBSD: kern_kthread.c,v 1.49 2023/09/23 14:40:42 ad Exp $ */
2 1.1 thorpej
3 1.1 thorpej /*-
4 1.49 ad * Copyright (c) 1998, 1999, 2007, 2009, 2019, 2023
5 1.49 ad * The NetBSD Foundation, Inc.
6 1.1 thorpej * All rights reserved.
7 1.1 thorpej *
8 1.1 thorpej * This code is derived from software contributed to The NetBSD Foundation
9 1.1 thorpej * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10 1.17 ad * NASA Ames Research Center, and by Andrew Doran.
11 1.1 thorpej *
12 1.1 thorpej * Redistribution and use in source and binary forms, with or without
13 1.1 thorpej * modification, are permitted provided that the following conditions
14 1.1 thorpej * are met:
15 1.1 thorpej * 1. Redistributions of source code must retain the above copyright
16 1.1 thorpej * notice, this list of conditions and the following disclaimer.
17 1.1 thorpej * 2. Redistributions in binary form must reproduce the above copyright
18 1.1 thorpej * notice, this list of conditions and the following disclaimer in the
19 1.1 thorpej * documentation and/or other materials provided with the distribution.
20 1.1 thorpej *
21 1.1 thorpej * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 1.1 thorpej * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 1.1 thorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 1.1 thorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 1.1 thorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 1.1 thorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 1.1 thorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 1.1 thorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 1.1 thorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 1.1 thorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 1.1 thorpej * POSSIBILITY OF SUCH DAMAGE.
32 1.1 thorpej */
33 1.12 lukem
34 1.12 lukem #include <sys/cdefs.h>
35 1.49 ad __KERNEL_RCSID(0, "$NetBSD: kern_kthread.c,v 1.49 2023/09/23 14:40:42 ad Exp $");
36 1.1 thorpej
37 1.1 thorpej #include <sys/param.h>
38 1.46 riastrad #include <sys/cpu.h>
39 1.1 thorpej #include <sys/systm.h>
40 1.1 thorpej #include <sys/kernel.h>
41 1.1 thorpej #include <sys/kthread.h>
42 1.33 rmind #include <sys/mutex.h>
43 1.17 ad #include <sys/sched.h>
44 1.17 ad #include <sys/kmem.h>
45 1.47 riastrad #include <sys/msan.h>
46 1.17 ad
47 1.17 ad #include <uvm/uvm_extern.h>
48 1.33 rmind
49 1.33 rmind static kmutex_t kthread_lock;
50 1.33 rmind static kcondvar_t kthread_cv;
51 1.33 rmind
52 1.33 rmind void
53 1.33 rmind kthread_sysinit(void)
54 1.33 rmind {
55 1.1 thorpej
56 1.33 rmind mutex_init(&kthread_lock, MUTEX_DEFAULT, IPL_NONE);
57 1.33 rmind cv_init(&kthread_cv, "kthrwait");
58 1.33 rmind }
59 1.1 thorpej
60 1.1 thorpej /*
61 1.33 rmind * kthread_create: create a kernel thread, that is, system-only LWP.
62 1.1 thorpej */
63 1.1 thorpej int
64 1.17 ad kthread_create(pri_t pri, int flag, struct cpu_info *ci,
65 1.33 rmind void (*func)(void *), void *arg, lwp_t **lp, const char *fmt, ...)
66 1.1 thorpej {
67 1.17 ad lwp_t *l;
68 1.17 ad vaddr_t uaddr;
69 1.33 rmind int error, lc;
70 1.1 thorpej va_list ap;
71 1.1 thorpej
72 1.33 rmind KASSERT((flag & KTHREAD_INTR) == 0 || (flag & KTHREAD_MPSAFE) != 0);
73 1.31 matt
74 1.39 matt uaddr = uvm_uarea_system_alloc(
75 1.39 matt (flag & (KTHREAD_INTR|KTHREAD_IDLE)) == KTHREAD_IDLE ? ci : NULL);
76 1.27 rmind if (uaddr == 0) {
77 1.17 ad return ENOMEM;
78 1.27 rmind }
79 1.47 riastrad kmsan_orig((void *)uaddr, USPACE, KMSAN_TYPE_POOL, __RET_ADDR);
80 1.26 agc if ((flag & KTHREAD_TS) != 0) {
81 1.25 ad lc = SCHED_OTHER;
82 1.25 ad } else {
83 1.25 ad lc = SCHED_RR;
84 1.25 ad }
85 1.28 haad
86 1.33 rmind error = lwp_create(&lwp0, &proc0, uaddr, LWP_DETACHED, NULL,
87 1.42 christos 0, func, arg, &l, lc, &lwp0.l_sigmask, &lwp0.l_sigstk);
88 1.17 ad if (error) {
89 1.31 matt uvm_uarea_system_free(uaddr);
90 1.17 ad return error;
91 1.17 ad }
92 1.17 ad if (fmt != NULL) {
93 1.17 ad l->l_name = kmem_alloc(MAXCOMLEN, KM_SLEEP);
94 1.17 ad va_start(ap, fmt);
95 1.17 ad vsnprintf(l->l_name, MAXCOMLEN, fmt, ap);
96 1.17 ad va_end(ap);
97 1.17 ad }
98 1.17 ad
99 1.17 ad /*
100 1.17 ad * Set parameters.
101 1.17 ad */
102 1.17 ad if (pri == PRI_NONE) {
103 1.26 agc if ((flag & KTHREAD_TS) != 0) {
104 1.25 ad /* Maximum user priority level. */
105 1.25 ad pri = MAXPRI_USER;
106 1.25 ad } else {
107 1.25 ad /* Minimum kernel priority level. */
108 1.25 ad pri = PRI_KTHREAD;
109 1.25 ad }
110 1.17 ad }
111 1.23 ad mutex_enter(proc0.p_lock);
112 1.19 ad lwp_lock(l);
113 1.44 ad lwp_changepri(l, pri);
114 1.17 ad if (ci != NULL) {
115 1.17 ad if (ci != l->l_cpu) {
116 1.44 ad lwp_unlock_to(l, ci->ci_schedstate.spc_lwplock);
117 1.17 ad lwp_lock(l);
118 1.49 ad l->l_cpu = ci;
119 1.17 ad }
120 1.22 ad l->l_pflag |= LP_BOUND;
121 1.17 ad }
122 1.33 rmind
123 1.36 rmind if ((flag & KTHREAD_MUSTJOIN) != 0) {
124 1.33 rmind KASSERT(lp != NULL);
125 1.36 rmind l->l_pflag |= LP_MUSTJOIN;
126 1.33 rmind }
127 1.33 rmind if ((flag & KTHREAD_INTR) != 0) {
128 1.19 ad l->l_pflag |= LP_INTR;
129 1.33 rmind }
130 1.33 rmind if ((flag & KTHREAD_MPSAFE) == 0) {
131 1.20 ad l->l_pflag &= ~LP_MPSAFE;
132 1.33 rmind }
133 1.17 ad
134 1.17 ad /*
135 1.17 ad * Set the new LWP running, unless the caller has requested
136 1.17 ad * otherwise.
137 1.17 ad */
138 1.44 ad KASSERT(l->l_stat == LSIDL);
139 1.17 ad if ((flag & KTHREAD_IDLE) == 0) {
140 1.44 ad setrunnable(l);
141 1.44 ad /* LWP now unlocked */
142 1.44 ad } else {
143 1.19 ad lwp_unlock(l);
144 1.38 jym }
145 1.23 ad mutex_exit(proc0.p_lock);
146 1.1 thorpej
147 1.1 thorpej /* All done! */
148 1.33 rmind if (lp != NULL) {
149 1.17 ad *lp = l;
150 1.33 rmind }
151 1.33 rmind return 0;
152 1.1 thorpej }
153 1.1 thorpej
154 1.1 thorpej /*
155 1.1 thorpej * Cause a kernel thread to exit. Assumes the exiting thread is the
156 1.1 thorpej * current context.
157 1.1 thorpej */
158 1.1 thorpej void
159 1.11 thorpej kthread_exit(int ecode)
160 1.1 thorpej {
161 1.18 ad const char *name;
162 1.17 ad lwp_t *l = curlwp;
163 1.1 thorpej
164 1.49 ad /* If the kernel lock is held, we need to drop it now. */
165 1.49 ad if ((l->l_pflag & LP_MPSAFE) == 0) {
166 1.49 ad KERNEL_UNLOCK_LAST(l);
167 1.49 ad }
168 1.49 ad
169 1.17 ad /* We can't do much with the exit code, so just report it. */
170 1.18 ad if (ecode != 0) {
171 1.18 ad if ((name = l->l_name) == NULL)
172 1.18 ad name = "unnamed";
173 1.17 ad printf("WARNING: kthread `%s' (%d) exits with status %d\n",
174 1.18 ad name, l->l_lid, ecode);
175 1.18 ad }
176 1.1 thorpej
177 1.33 rmind /* Barrier for joining. */
178 1.36 rmind if (l->l_pflag & LP_MUSTJOIN) {
179 1.48 riastrad bool *exitedp;
180 1.48 riastrad
181 1.33 rmind mutex_enter(&kthread_lock);
182 1.48 riastrad while ((exitedp = l->l_private) == NULL) {
183 1.33 rmind cv_wait(&kthread_cv, &kthread_lock);
184 1.33 rmind }
185 1.48 riastrad KASSERT(!*exitedp);
186 1.48 riastrad *exitedp = true;
187 1.33 rmind cv_broadcast(&kthread_cv);
188 1.33 rmind mutex_exit(&kthread_lock);
189 1.33 rmind }
190 1.33 rmind
191 1.17 ad /* And exit.. */
192 1.17 ad lwp_exit(l);
193 1.34 joerg panic("kthread_exit");
194 1.2 thorpej }
195 1.2 thorpej
196 1.2 thorpej /*
197 1.28 haad * Wait for a kthread to exit, as pthread_join().
198 1.28 haad */
199 1.28 haad int
200 1.28 haad kthread_join(lwp_t *l)
201 1.28 haad {
202 1.48 riastrad bool exited = false;
203 1.28 haad
204 1.28 haad KASSERT((l->l_flag & LW_SYSTEM) != 0);
205 1.43 pgoyette KASSERT((l->l_pflag & LP_MUSTJOIN) != 0);
206 1.28 haad
207 1.33 rmind /*
208 1.48 riastrad * - Ask the kthread to write to `exited'.
209 1.48 riastrad * - After this, touching l is forbidden -- it may be freed.
210 1.48 riastrad * - Wait until the kthread has written to `exited'.
211 1.33 rmind */
212 1.33 rmind mutex_enter(&kthread_lock);
213 1.48 riastrad KASSERT(l->l_private == NULL);
214 1.48 riastrad l->l_private = &exited;
215 1.33 rmind cv_broadcast(&kthread_cv);
216 1.48 riastrad while (!exited) {
217 1.33 rmind cv_wait(&kthread_cv, &kthread_lock);
218 1.33 rmind }
219 1.33 rmind mutex_exit(&kthread_lock);
220 1.33 rmind
221 1.33 rmind return 0;
222 1.28 haad }
223 1.46 riastrad
224 1.46 riastrad /*
225 1.46 riastrad * kthread_fpu_enter()
226 1.46 riastrad *
227 1.46 riastrad * Allow the current lwp, which must be a kthread, to use the FPU.
228 1.46 riastrad * Return a cookie that must be passed to kthread_fpu_exit when
229 1.46 riastrad * done. Must be used only in thread context. Recursive -- you
230 1.46 riastrad * can call kthread_fpu_enter several times in a row as long as
231 1.46 riastrad * you pass the cookies in reverse order to kthread_fpu_exit.
232 1.46 riastrad */
233 1.46 riastrad int
234 1.46 riastrad kthread_fpu_enter(void)
235 1.46 riastrad {
236 1.46 riastrad struct lwp *l = curlwp;
237 1.46 riastrad int s;
238 1.46 riastrad
239 1.46 riastrad KASSERTMSG(!cpu_intr_p(),
240 1.46 riastrad "%s is not allowed in interrupt context", __func__);
241 1.46 riastrad KASSERTMSG(!cpu_softintr_p(),
242 1.46 riastrad "%s is not allowed in interrupt context", __func__);
243 1.46 riastrad
244 1.46 riastrad /*
245 1.46 riastrad * Remember whether this thread already had FPU access, and
246 1.46 riastrad * mark this thread as having FPU access.
247 1.46 riastrad */
248 1.46 riastrad lwp_lock(l);
249 1.46 riastrad KASSERTMSG(l->l_flag & LW_SYSTEM,
250 1.46 riastrad "%s is allowed only in kthreads", __func__);
251 1.46 riastrad s = l->l_flag & LW_SYSTEM_FPU;
252 1.46 riastrad l->l_flag |= LW_SYSTEM_FPU;
253 1.46 riastrad lwp_unlock(l);
254 1.46 riastrad
255 1.46 riastrad /* Take MD steps to enable the FPU if necessary. */
256 1.46 riastrad if (s == 0)
257 1.46 riastrad kthread_fpu_enter_md();
258 1.46 riastrad
259 1.46 riastrad return s;
260 1.46 riastrad }
261 1.46 riastrad
262 1.46 riastrad /*
263 1.46 riastrad * kthread_fpu_exit(s)
264 1.46 riastrad *
265 1.46 riastrad * Restore the current lwp's FPU access to what it was before the
266 1.46 riastrad * matching call to kthread_fpu_enter() that returned s. Must be
267 1.46 riastrad * used only in thread context.
268 1.46 riastrad */
269 1.46 riastrad void
270 1.46 riastrad kthread_fpu_exit(int s)
271 1.46 riastrad {
272 1.46 riastrad struct lwp *l = curlwp;
273 1.46 riastrad
274 1.46 riastrad KASSERT(s == (s & LW_SYSTEM_FPU));
275 1.46 riastrad KASSERTMSG(!cpu_intr_p(),
276 1.46 riastrad "%s is not allowed in interrupt context", __func__);
277 1.46 riastrad KASSERTMSG(!cpu_softintr_p(),
278 1.46 riastrad "%s is not allowed in interrupt context", __func__);
279 1.46 riastrad
280 1.46 riastrad lwp_lock(l);
281 1.46 riastrad KASSERTMSG(l->l_flag & LW_SYSTEM,
282 1.46 riastrad "%s is allowed only in kthreads", __func__);
283 1.46 riastrad KASSERT(l->l_flag & LW_SYSTEM_FPU);
284 1.46 riastrad l->l_flag ^= s ^ LW_SYSTEM_FPU;
285 1.46 riastrad lwp_unlock(l);
286 1.46 riastrad
287 1.46 riastrad /* Take MD steps to zero and disable the FPU if necessary. */
288 1.46 riastrad if (s == 0)
289 1.46 riastrad kthread_fpu_exit_md();
290 1.46 riastrad }
291