kern_kthread.c revision 1.47 1 1.47 riastrad /* $NetBSD: kern_kthread.c,v 1.47 2022/09/13 09:37:49 riastradh Exp $ */
2 1.1 thorpej
3 1.1 thorpej /*-
4 1.44 ad * Copyright (c) 1998, 1999, 2007, 2009, 2019 The NetBSD Foundation, Inc.
5 1.1 thorpej * All rights reserved.
6 1.1 thorpej *
7 1.1 thorpej * This code is derived from software contributed to The NetBSD Foundation
8 1.1 thorpej * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 1.17 ad * NASA Ames Research Center, and by Andrew Doran.
10 1.1 thorpej *
11 1.1 thorpej * Redistribution and use in source and binary forms, with or without
12 1.1 thorpej * modification, are permitted provided that the following conditions
13 1.1 thorpej * are met:
14 1.1 thorpej * 1. Redistributions of source code must retain the above copyright
15 1.1 thorpej * notice, this list of conditions and the following disclaimer.
16 1.1 thorpej * 2. Redistributions in binary form must reproduce the above copyright
17 1.1 thorpej * notice, this list of conditions and the following disclaimer in the
18 1.1 thorpej * documentation and/or other materials provided with the distribution.
19 1.1 thorpej *
20 1.1 thorpej * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 1.1 thorpej * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 1.1 thorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 1.1 thorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 1.1 thorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 1.1 thorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 1.1 thorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 1.1 thorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 1.1 thorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 1.1 thorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 1.1 thorpej * POSSIBILITY OF SUCH DAMAGE.
31 1.1 thorpej */
32 1.12 lukem
33 1.12 lukem #include <sys/cdefs.h>
34 1.47 riastrad __KERNEL_RCSID(0, "$NetBSD: kern_kthread.c,v 1.47 2022/09/13 09:37:49 riastradh Exp $");
35 1.1 thorpej
36 1.1 thorpej #include <sys/param.h>
37 1.46 riastrad #include <sys/cpu.h>
38 1.1 thorpej #include <sys/systm.h>
39 1.1 thorpej #include <sys/kernel.h>
40 1.1 thorpej #include <sys/kthread.h>
41 1.33 rmind #include <sys/mutex.h>
42 1.17 ad #include <sys/sched.h>
43 1.17 ad #include <sys/kmem.h>
44 1.47 riastrad #include <sys/msan.h>
45 1.17 ad
46 1.17 ad #include <uvm/uvm_extern.h>
47 1.33 rmind
48 1.33 rmind static lwp_t * kthread_jtarget;
49 1.33 rmind static kmutex_t kthread_lock;
50 1.33 rmind static kcondvar_t kthread_cv;
51 1.33 rmind
52 1.33 rmind void
53 1.33 rmind kthread_sysinit(void)
54 1.33 rmind {
55 1.1 thorpej
56 1.33 rmind mutex_init(&kthread_lock, MUTEX_DEFAULT, IPL_NONE);
57 1.33 rmind cv_init(&kthread_cv, "kthrwait");
58 1.33 rmind kthread_jtarget = NULL;
59 1.33 rmind }
60 1.1 thorpej
61 1.1 thorpej /*
62 1.33 rmind * kthread_create: create a kernel thread, that is, system-only LWP.
63 1.1 thorpej */
64 1.1 thorpej int
65 1.17 ad kthread_create(pri_t pri, int flag, struct cpu_info *ci,
66 1.33 rmind void (*func)(void *), void *arg, lwp_t **lp, const char *fmt, ...)
67 1.1 thorpej {
68 1.17 ad lwp_t *l;
69 1.17 ad vaddr_t uaddr;
70 1.33 rmind int error, lc;
71 1.1 thorpej va_list ap;
72 1.1 thorpej
73 1.33 rmind KASSERT((flag & KTHREAD_INTR) == 0 || (flag & KTHREAD_MPSAFE) != 0);
74 1.31 matt
75 1.39 matt uaddr = uvm_uarea_system_alloc(
76 1.39 matt (flag & (KTHREAD_INTR|KTHREAD_IDLE)) == KTHREAD_IDLE ? ci : NULL);
77 1.27 rmind if (uaddr == 0) {
78 1.17 ad return ENOMEM;
79 1.27 rmind }
80 1.47 riastrad kmsan_orig((void *)uaddr, USPACE, KMSAN_TYPE_POOL, __RET_ADDR);
81 1.26 agc if ((flag & KTHREAD_TS) != 0) {
82 1.25 ad lc = SCHED_OTHER;
83 1.25 ad } else {
84 1.25 ad lc = SCHED_RR;
85 1.25 ad }
86 1.28 haad
87 1.33 rmind error = lwp_create(&lwp0, &proc0, uaddr, LWP_DETACHED, NULL,
88 1.42 christos 0, func, arg, &l, lc, &lwp0.l_sigmask, &lwp0.l_sigstk);
89 1.17 ad if (error) {
90 1.31 matt uvm_uarea_system_free(uaddr);
91 1.17 ad return error;
92 1.17 ad }
93 1.17 ad if (fmt != NULL) {
94 1.17 ad l->l_name = kmem_alloc(MAXCOMLEN, KM_SLEEP);
95 1.17 ad va_start(ap, fmt);
96 1.17 ad vsnprintf(l->l_name, MAXCOMLEN, fmt, ap);
97 1.17 ad va_end(ap);
98 1.17 ad }
99 1.17 ad
100 1.17 ad /*
101 1.17 ad * Set parameters.
102 1.17 ad */
103 1.17 ad if (pri == PRI_NONE) {
104 1.26 agc if ((flag & KTHREAD_TS) != 0) {
105 1.25 ad /* Maximum user priority level. */
106 1.25 ad pri = MAXPRI_USER;
107 1.25 ad } else {
108 1.25 ad /* Minimum kernel priority level. */
109 1.25 ad pri = PRI_KTHREAD;
110 1.25 ad }
111 1.17 ad }
112 1.23 ad mutex_enter(proc0.p_lock);
113 1.19 ad lwp_lock(l);
114 1.44 ad lwp_changepri(l, pri);
115 1.17 ad if (ci != NULL) {
116 1.17 ad if (ci != l->l_cpu) {
117 1.44 ad lwp_unlock_to(l, ci->ci_schedstate.spc_lwplock);
118 1.17 ad lwp_lock(l);
119 1.17 ad }
120 1.22 ad l->l_pflag |= LP_BOUND;
121 1.17 ad l->l_cpu = ci;
122 1.17 ad }
123 1.33 rmind
124 1.36 rmind if ((flag & KTHREAD_MUSTJOIN) != 0) {
125 1.33 rmind KASSERT(lp != NULL);
126 1.36 rmind l->l_pflag |= LP_MUSTJOIN;
127 1.33 rmind }
128 1.33 rmind if ((flag & KTHREAD_INTR) != 0) {
129 1.19 ad l->l_pflag |= LP_INTR;
130 1.33 rmind }
131 1.33 rmind if ((flag & KTHREAD_MPSAFE) == 0) {
132 1.20 ad l->l_pflag &= ~LP_MPSAFE;
133 1.33 rmind }
134 1.17 ad
135 1.17 ad /*
136 1.17 ad * Set the new LWP running, unless the caller has requested
137 1.17 ad * otherwise.
138 1.17 ad */
139 1.44 ad KASSERT(l->l_stat == LSIDL);
140 1.17 ad if ((flag & KTHREAD_IDLE) == 0) {
141 1.44 ad setrunnable(l);
142 1.44 ad /* LWP now unlocked */
143 1.44 ad } else {
144 1.19 ad lwp_unlock(l);
145 1.38 jym }
146 1.23 ad mutex_exit(proc0.p_lock);
147 1.1 thorpej
148 1.1 thorpej /* All done! */
149 1.33 rmind if (lp != NULL) {
150 1.17 ad *lp = l;
151 1.33 rmind }
152 1.33 rmind return 0;
153 1.1 thorpej }
154 1.1 thorpej
155 1.1 thorpej /*
156 1.1 thorpej * Cause a kernel thread to exit. Assumes the exiting thread is the
157 1.1 thorpej * current context.
158 1.1 thorpej */
159 1.1 thorpej void
160 1.11 thorpej kthread_exit(int ecode)
161 1.1 thorpej {
162 1.18 ad const char *name;
163 1.17 ad lwp_t *l = curlwp;
164 1.1 thorpej
165 1.17 ad /* We can't do much with the exit code, so just report it. */
166 1.18 ad if (ecode != 0) {
167 1.18 ad if ((name = l->l_name) == NULL)
168 1.18 ad name = "unnamed";
169 1.17 ad printf("WARNING: kthread `%s' (%d) exits with status %d\n",
170 1.18 ad name, l->l_lid, ecode);
171 1.18 ad }
172 1.1 thorpej
173 1.33 rmind /* Barrier for joining. */
174 1.36 rmind if (l->l_pflag & LP_MUSTJOIN) {
175 1.33 rmind mutex_enter(&kthread_lock);
176 1.33 rmind while (kthread_jtarget != l) {
177 1.33 rmind cv_wait(&kthread_cv, &kthread_lock);
178 1.33 rmind }
179 1.33 rmind kthread_jtarget = NULL;
180 1.33 rmind cv_broadcast(&kthread_cv);
181 1.33 rmind mutex_exit(&kthread_lock);
182 1.33 rmind }
183 1.33 rmind
184 1.45 ad /* If the kernel lock is held, we need to drop it now. */
185 1.45 ad if ((l->l_pflag & LP_MPSAFE) == 0) {
186 1.45 ad KERNEL_UNLOCK_LAST(l);
187 1.45 ad }
188 1.45 ad
189 1.17 ad /* And exit.. */
190 1.17 ad lwp_exit(l);
191 1.34 joerg panic("kthread_exit");
192 1.2 thorpej }
193 1.2 thorpej
194 1.2 thorpej /*
195 1.28 haad * Wait for a kthread to exit, as pthread_join().
196 1.28 haad */
197 1.28 haad int
198 1.28 haad kthread_join(lwp_t *l)
199 1.28 haad {
200 1.28 haad
201 1.28 haad KASSERT((l->l_flag & LW_SYSTEM) != 0);
202 1.43 pgoyette KASSERT((l->l_pflag & LP_MUSTJOIN) != 0);
203 1.28 haad
204 1.33 rmind /*
205 1.33 rmind * - Wait if some other thread has occupied the target.
206 1.37 jym * - Specify our kthread as a target and notify it.
207 1.33 rmind * - Wait for the target kthread to notify us.
208 1.33 rmind */
209 1.33 rmind mutex_enter(&kthread_lock);
210 1.33 rmind while (kthread_jtarget) {
211 1.33 rmind cv_wait(&kthread_cv, &kthread_lock);
212 1.33 rmind }
213 1.33 rmind kthread_jtarget = l;
214 1.33 rmind cv_broadcast(&kthread_cv);
215 1.33 rmind while (kthread_jtarget == l) {
216 1.33 rmind cv_wait(&kthread_cv, &kthread_lock);
217 1.33 rmind }
218 1.33 rmind mutex_exit(&kthread_lock);
219 1.33 rmind
220 1.33 rmind return 0;
221 1.28 haad }
222 1.46 riastrad
223 1.46 riastrad /*
224 1.46 riastrad * kthread_fpu_enter()
225 1.46 riastrad *
226 1.46 riastrad * Allow the current lwp, which must be a kthread, to use the FPU.
227 1.46 riastrad * Return a cookie that must be passed to kthread_fpu_exit when
228 1.46 riastrad * done. Must be used only in thread context. Recursive -- you
229 1.46 riastrad * can call kthread_fpu_enter several times in a row as long as
230 1.46 riastrad * you pass the cookies in reverse order to kthread_fpu_exit.
231 1.46 riastrad */
232 1.46 riastrad int
233 1.46 riastrad kthread_fpu_enter(void)
234 1.46 riastrad {
235 1.46 riastrad struct lwp *l = curlwp;
236 1.46 riastrad int s;
237 1.46 riastrad
238 1.46 riastrad KASSERTMSG(!cpu_intr_p(),
239 1.46 riastrad "%s is not allowed in interrupt context", __func__);
240 1.46 riastrad KASSERTMSG(!cpu_softintr_p(),
241 1.46 riastrad "%s is not allowed in interrupt context", __func__);
242 1.46 riastrad
243 1.46 riastrad /*
244 1.46 riastrad * Remember whether this thread already had FPU access, and
245 1.46 riastrad * mark this thread as having FPU access.
246 1.46 riastrad */
247 1.46 riastrad lwp_lock(l);
248 1.46 riastrad KASSERTMSG(l->l_flag & LW_SYSTEM,
249 1.46 riastrad "%s is allowed only in kthreads", __func__);
250 1.46 riastrad s = l->l_flag & LW_SYSTEM_FPU;
251 1.46 riastrad l->l_flag |= LW_SYSTEM_FPU;
252 1.46 riastrad lwp_unlock(l);
253 1.46 riastrad
254 1.46 riastrad /* Take MD steps to enable the FPU if necessary. */
255 1.46 riastrad if (s == 0)
256 1.46 riastrad kthread_fpu_enter_md();
257 1.46 riastrad
258 1.46 riastrad return s;
259 1.46 riastrad }
260 1.46 riastrad
261 1.46 riastrad /*
262 1.46 riastrad * kthread_fpu_exit(s)
263 1.46 riastrad *
264 1.46 riastrad * Restore the current lwp's FPU access to what it was before the
265 1.46 riastrad * matching call to kthread_fpu_enter() that returned s. Must be
266 1.46 riastrad * used only in thread context.
267 1.46 riastrad */
268 1.46 riastrad void
269 1.46 riastrad kthread_fpu_exit(int s)
270 1.46 riastrad {
271 1.46 riastrad struct lwp *l = curlwp;
272 1.46 riastrad
273 1.46 riastrad KASSERT(s == (s & LW_SYSTEM_FPU));
274 1.46 riastrad KASSERTMSG(!cpu_intr_p(),
275 1.46 riastrad "%s is not allowed in interrupt context", __func__);
276 1.46 riastrad KASSERTMSG(!cpu_softintr_p(),
277 1.46 riastrad "%s is not allowed in interrupt context", __func__);
278 1.46 riastrad
279 1.46 riastrad lwp_lock(l);
280 1.46 riastrad KASSERTMSG(l->l_flag & LW_SYSTEM,
281 1.46 riastrad "%s is allowed only in kthreads", __func__);
282 1.46 riastrad KASSERT(l->l_flag & LW_SYSTEM_FPU);
283 1.46 riastrad l->l_flag ^= s ^ LW_SYSTEM_FPU;
284 1.46 riastrad lwp_unlock(l);
285 1.46 riastrad
286 1.46 riastrad /* Take MD steps to zero and disable the FPU if necessary. */
287 1.46 riastrad if (s == 0)
288 1.46 riastrad kthread_fpu_exit_md();
289 1.46 riastrad }
290