Home | History | Annotate | Line # | Download | only in kern
      1 /*	$NetBSD: kern_kthread.c,v 1.51 2026/01/04 01:35:16 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1998, 1999, 2007, 2009, 2019, 2023
      5  *     The NetBSD Foundation, Inc.
      6  * All rights reserved.
      7  *
      8  * This code is derived from software contributed to The NetBSD Foundation
      9  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
     10  * NASA Ames Research Center, and by Andrew Doran.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  *
     21  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     23  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     31  * POSSIBILITY OF SUCH DAMAGE.
     32  */
     33 
     34 #include <sys/cdefs.h>
     35 __KERNEL_RCSID(0, "$NetBSD: kern_kthread.c,v 1.51 2026/01/04 01:35:16 riastradh Exp $");
     36 
     37 #include <sys/param.h>
     38 #include <sys/types.h>
     39 
     40 #include <sys/cpu.h>
     41 #include <sys/kernel.h>
     42 #include <sys/kmem.h>
     43 #include <sys/kthread.h>
     44 #include <sys/msan.h>
     45 #include <sys/mutex.h>
     46 #include <sys/sched.h>
     47 #include <sys/sdt.h>
     48 #include <sys/systm.h>
     49 
     50 #include <uvm/uvm_extern.h>
     51 
     52 static kmutex_t		kthread_lock;
     53 static kcondvar_t	kthread_cv;
     54 
     55 void
     56 kthread_sysinit(void)
     57 {
     58 
     59 	mutex_init(&kthread_lock, MUTEX_DEFAULT, IPL_NONE);
     60 	cv_init(&kthread_cv, "kthrwait");
     61 }
     62 
     63 /*
     64  * kthread_create: create a kernel thread, that is, system-only LWP.
     65  */
     66 int
     67 kthread_create(pri_t pri, int flag, struct cpu_info *ci,
     68     void (*func)(void *), void *arg, lwp_t **lp, const char *fmt, ...)
     69 {
     70 	lwp_t *l;
     71 	vaddr_t uaddr;
     72 	int error, lc;
     73 	va_list ap;
     74 
     75 	KASSERT((flag & KTHREAD_INTR) == 0 || (flag & KTHREAD_MPSAFE) != 0);
     76 
     77 	uaddr = uvm_uarea_system_alloc(
     78 	   (flag & (KTHREAD_INTR|KTHREAD_IDLE)) == KTHREAD_IDLE ? ci : NULL);
     79 	if (uaddr == 0) {
     80 		return SET_ERROR(ENOMEM);
     81 	}
     82 	kmsan_orig((void *)uaddr, USPACE, KMSAN_TYPE_POOL, __RET_ADDR);
     83 	if ((flag & KTHREAD_TS) != 0) {
     84 		lc = SCHED_OTHER;
     85 	} else {
     86 		lc = SCHED_RR;
     87 	}
     88 
     89 	error = lwp_create(&lwp0, &proc0, uaddr, LWP_DETACHED, NULL,
     90 	    0, func, arg, &l, lc, &lwp0.l_sigmask, &lwp0.l_sigstk);
     91 	if (error) {
     92 		uvm_uarea_system_free(uaddr);
     93 		return error;
     94 	}
     95 	if (fmt != NULL) {
     96 		l->l_name = kmem_alloc(MAXCOMLEN, KM_SLEEP);
     97 		va_start(ap, fmt);
     98 		vsnprintf(l->l_name, MAXCOMLEN, fmt, ap);
     99 		va_end(ap);
    100 	}
    101 
    102 	/*
    103 	 * Set parameters.
    104 	 */
    105 	if (pri == PRI_NONE) {
    106 		if ((flag & KTHREAD_TS) != 0) {
    107 			/* Maximum user priority level. */
    108 			pri = MAXPRI_USER;
    109 		} else {
    110 			/* Minimum kernel priority level. */
    111 			pri = PRI_KTHREAD;
    112 		}
    113 	}
    114 	mutex_enter(proc0.p_lock);
    115 	lwp_lock(l);
    116 	lwp_changepri(l, pri);
    117 	if (ci != NULL) {
    118 		if (ci != l->l_cpu) {
    119 			lwp_unlock_to(l, ci->ci_schedstate.spc_lwplock);
    120 			lwp_lock(l);
    121 			l->l_cpu = ci;
    122 		}
    123 		l->l_pflag |= LP_BOUND;
    124 	}
    125 
    126 	if ((flag & KTHREAD_MUSTJOIN) != 0) {
    127 		KASSERT(lp != NULL);
    128 		l->l_pflag |= LP_MUSTJOIN;
    129 	}
    130 	if ((flag & KTHREAD_INTR) != 0) {
    131 		l->l_pflag |= LP_INTR;
    132 	}
    133 	if ((flag & KTHREAD_MPSAFE) == 0) {
    134 		l->l_pflag &= ~LP_MPSAFE;
    135 	}
    136 
    137 	/*
    138 	 * Set the new LWP running, unless the caller has requested
    139 	 * otherwise.
    140 	 */
    141 	KASSERT(l->l_stat == LSIDL);
    142 	if ((flag & KTHREAD_IDLE) == 0) {
    143 		setrunnable(l);
    144 		/* LWP now unlocked */
    145 	} else {
    146 		lwp_unlock(l);
    147 	}
    148 	mutex_exit(proc0.p_lock);
    149 
    150 	/* All done! */
    151 	if (lp != NULL) {
    152 		*lp = l;
    153 	}
    154 	return 0;
    155 }
    156 
    157 /*
    158  * Cause a kernel thread to exit.  Assumes the exiting thread is the
    159  * current context.
    160  */
    161 void
    162 kthread_exit(int ecode)
    163 {
    164 	const char *name;
    165 	lwp_t *l = curlwp;
    166 
    167 	/* If the kernel lock is held, we need to drop it now. */
    168 	if ((l->l_pflag & LP_MPSAFE) == 0) {
    169 		KERNEL_UNLOCK_LAST(l);
    170 	}
    171 
    172 	/* We can't do much with the exit code, so just report it. */
    173 	if (ecode != 0) {
    174 		if ((name = l->l_name) == NULL)
    175 			name = "unnamed";
    176 		printf("WARNING: kthread `%s' (%d) exits with status %d\n",
    177 		    name, l->l_lid, ecode);
    178 	}
    179 
    180 	/* Barrier for joining. */
    181 	if (l->l_pflag & LP_MUSTJOIN) {
    182 		bool *exitedp;
    183 
    184 		mutex_enter(&kthread_lock);
    185 		while ((exitedp = l->l_private) == NULL) {
    186 			cv_wait(&kthread_cv, &kthread_lock);
    187 		}
    188 		KASSERT(!*exitedp);
    189 		*exitedp = true;
    190 		cv_broadcast(&kthread_cv);
    191 		mutex_exit(&kthread_lock);
    192 	}
    193 
    194 	/* And exit.. */
    195 	lwp_exit(l);
    196 	panic("kthread_exit");
    197 }
    198 
    199 /*
    200  * Wait for a kthread to exit, as pthread_join().
    201  */
    202 int
    203 kthread_join(lwp_t *l)
    204 {
    205 	bool exited = false;
    206 
    207 	KASSERT((l->l_flag & LW_SYSTEM) != 0);
    208 	KASSERT((l->l_pflag & LP_MUSTJOIN) != 0);
    209 
    210 	/*
    211 	 * - Ask the kthread to write to `exited'.
    212 	 * - After this, touching l is forbidden -- it may be freed.
    213 	 * - Wait until the kthread has written to `exited'.
    214 	 */
    215 	mutex_enter(&kthread_lock);
    216 	KASSERT(l->l_private == NULL);
    217 	l->l_private = &exited;
    218 	cv_broadcast(&kthread_cv);
    219 	while (!exited) {
    220 		cv_wait(&kthread_cv, &kthread_lock);
    221 	}
    222 	mutex_exit(&kthread_lock);
    223 
    224 	return 0;
    225 }
    226 
    227 /*
    228  * kthread_fpu_enter()
    229  *
    230  *	Allow the current lwp, which must be a kthread, to use the FPU.
    231  *	Return a cookie that must be passed to kthread_fpu_exit when
    232  *	done.  Must be used only in thread context.  Recursive -- you
    233  *	can call kthread_fpu_enter several times in a row as long as
    234  *	you pass the cookies in reverse order to kthread_fpu_exit.
    235  */
    236 int
    237 kthread_fpu_enter(void)
    238 {
    239 	struct lwp *l = curlwp;
    240 	int s;
    241 
    242 	KASSERTMSG(!cpu_intr_p(),
    243 	    "%s is not allowed in interrupt context", __func__);
    244 	KASSERTMSG(!cpu_softintr_p(),
    245 	    "%s is not allowed in interrupt context", __func__);
    246 
    247 	/*
    248 	 * Remember whether this thread already had FPU access, and
    249 	 * mark this thread as having FPU access.
    250 	 */
    251 	lwp_lock(l);
    252 	KASSERTMSG(l->l_flag & LW_SYSTEM,
    253 	    "%s is allowed only in kthreads", __func__);
    254 	s = l->l_flag & LW_SYSTEM_FPU;
    255 	l->l_flag |= LW_SYSTEM_FPU;
    256 	lwp_unlock(l);
    257 
    258 	/* Take MD steps to enable the FPU if necessary.  */
    259 	if (s == 0)
    260 		kthread_fpu_enter_md();
    261 
    262 	return s;
    263 }
    264 
    265 /*
    266  * kthread_fpu_exit(s)
    267  *
    268  *	Restore the current lwp's FPU access to what it was before the
    269  *	matching call to kthread_fpu_enter() that returned s.  Must be
    270  *	used only in thread context.
    271  */
    272 void
    273 kthread_fpu_exit(int s)
    274 {
    275 	struct lwp *l = curlwp;
    276 
    277 	KASSERT(s == (s & LW_SYSTEM_FPU));
    278 	KASSERTMSG(!cpu_intr_p(),
    279 	    "%s is not allowed in interrupt context", __func__);
    280 	KASSERTMSG(!cpu_softintr_p(),
    281 	    "%s is not allowed in interrupt context", __func__);
    282 
    283 	lwp_lock(l);
    284 	KASSERTMSG(l->l_flag & LW_SYSTEM,
    285 	    "%s is allowed only in kthreads", __func__);
    286 	KASSERT(l->l_flag & LW_SYSTEM_FPU);
    287 	l->l_flag ^= s ^ LW_SYSTEM_FPU;
    288 	lwp_unlock(l);
    289 
    290 	/* Take MD steps to zero and disable the FPU if necessary.  */
    291 	if (s == 0)
    292 		kthread_fpu_exit_md();
    293 }
    294