Home | History | Annotate | Line # | Download | only in rumpkern
      1 /*	$NetBSD: threads.c,v 1.28 2023/10/04 21:56:15 ad Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2007-2009 Antti Kantee.  All Rights Reserved.
      5  *
      6  * Development of this software was supported by
      7  * The Finnish Cultural Foundation.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
     19  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     21  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     24  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     28  * SUCH DAMAGE.
     29  */
     30 
     31 #include <sys/cdefs.h>
     32 __KERNEL_RCSID(0, "$NetBSD: threads.c,v 1.28 2023/10/04 21:56:15 ad Exp $");
     33 
     34 #include <sys/param.h>
     35 #include <sys/atomic.h>
     36 #include <sys/kmem.h>
     37 #include <sys/kthread.h>
     38 #include <sys/malloc.h>
     39 #include <sys/systm.h>
     40 #include <sys/queue.h>
     41 
     42 #include <rump-sys/kern.h>
     43 
     44 #include <rump/rumpuser.h>
     45 
     46 struct thrdesc {
     47 	void (*f)(void *);
     48 	void *arg;
     49 	struct lwp *newlwp;
     50 	int runnable;
     51 
     52 	TAILQ_ENTRY(thrdesc) entries;
     53 };
     54 
     55 static bool threads_are_go;
     56 static struct rumpuser_mtx *thrmtx;
     57 static struct rumpuser_cv *thrcv;
     58 static TAILQ_HEAD(, thrdesc) newthr;
     59 
     60 static void *
     61 threadbouncer(void *arg)
     62 {
     63 	struct thrdesc *td = arg;
     64 	struct lwp *l = td->newlwp;
     65 	void (*f)(void *);
     66 	void *thrarg;
     67 
     68 	f = td->f;
     69 	thrarg = td->arg;
     70 
     71 	/* don't allow threads to run before all CPUs have fully attached */
     72 	if (!threads_are_go) {
     73 		rumpuser_mutex_enter_nowrap(thrmtx);
     74 		while (!threads_are_go) {
     75 			rumpuser_cv_wait_nowrap(thrcv, thrmtx);
     76 		}
     77 		rumpuser_mutex_exit(thrmtx);
     78 	}
     79 
     80 	/* schedule ourselves */
     81 	rump_lwproc_curlwp_set(l);
     82 	rump_schedule();
     83 
     84 	/* free dance struct */
     85 	kmem_intr_free(td, sizeof(*td));
     86 
     87 	if ((curlwp->l_pflag & LP_MPSAFE) == 0)
     88 		KERNEL_LOCK(1, NULL);
     89 
     90 	f(thrarg);
     91 
     92 	panic("unreachable, should kthread_exit()");
     93 }
     94 
     95 void
     96 rump_thread_init(void)
     97 {
     98 
     99 	rumpuser_mutex_init(&thrmtx, RUMPUSER_MTX_SPIN);
    100 	rumpuser_cv_init(&thrcv);
    101 	TAILQ_INIT(&newthr);
    102 }
    103 
    104 void
    105 rump_thread_allow(struct lwp *l)
    106 {
    107 	struct thrdesc *td;
    108 
    109 	rumpuser_mutex_enter(thrmtx);
    110 	if (l == NULL) {
    111 		threads_are_go = true;
    112 	} else {
    113 		TAILQ_FOREACH(td, &newthr, entries) {
    114 			if (td->newlwp == l) {
    115 				td->runnable = 1;
    116 				break;
    117 			}
    118 		}
    119 	}
    120 	rumpuser_cv_broadcast(thrcv);
    121 	rumpuser_mutex_exit(thrmtx);
    122 }
    123 
    124 static struct {
    125 	const char *t_name;
    126 	bool t_ncmp;
    127 } nothreads[] = {
    128 	{ "vrele", false },
    129 	{ "vdrain", false },
    130 	{ "cachegc", false },
    131 	{ "nfssilly", false },
    132 	{ "unpgc", false },
    133 	{ "pmf", true },
    134 	{ "xcall", true },
    135 };
    136 
    137 int
    138 kthread_create(pri_t pri, int flags, struct cpu_info *ci,
    139 	void (*func)(void *), void *arg, lwp_t **newlp, const char *fmt, ...)
    140 {
    141 	char thrstore[MAXCOMLEN];
    142 	const char *thrname = NULL;
    143 	va_list ap;
    144 	struct thrdesc *td;
    145 	struct lwp *l;
    146 	int rv;
    147 
    148 	thrstore[0] = '\0';
    149 	if (fmt) {
    150 		va_start(ap, fmt);
    151 		vsnprintf(thrstore, sizeof(thrstore), fmt, ap);
    152 		va_end(ap);
    153 		thrname = thrstore;
    154 	}
    155 
    156 	/*
    157 	 * We don't want a module unload thread.
    158 	 * (XXX: yes, this is a kludge too, and the kernel should
    159 	 * have a more flexible method for configuring which threads
    160 	 * we want).
    161 	 */
    162 	if (strcmp(thrstore, "modunload") == 0) {
    163 		return 0;
    164 	}
    165 
    166 	if (!rump_threads) {
    167 		bool matched;
    168 		int i;
    169 
    170 		/* do we want to fake it? */
    171 		for (i = 0; i < __arraycount(nothreads); i++) {
    172 			if (nothreads[i].t_ncmp) {
    173 				matched = strncmp(thrstore, nothreads[i].t_name,
    174 				    strlen(nothreads[i].t_name)) == 0;
    175 			} else {
    176 				matched = strcmp(thrstore,
    177 				    nothreads[i].t_name) == 0;
    178 			}
    179 			if (matched) {
    180 				aprint_error("rump kernel threads not enabled, "
    181 				    "%s not functional\n", nothreads[i].t_name);
    182 				return 0;
    183 			}
    184 		}
    185 		panic("threads not available");
    186 	}
    187 	KASSERT(fmt != NULL);
    188 
    189 	/*
    190 	 * Allocate with intr-safe allocator, give that we may be
    191 	 * creating interrupt threads.
    192 	 */
    193 	td = kmem_intr_alloc(sizeof(*td), KM_SLEEP);
    194 	td->f = func;
    195 	td->arg = arg;
    196 	td->newlwp = l = rump__lwproc_alloclwp(&proc0);
    197 	l->l_flag |= LW_SYSTEM;
    198 	if (flags & KTHREAD_MPSAFE)
    199 		l->l_pflag |= LP_MPSAFE;
    200 	if (flags & KTHREAD_INTR)
    201 		l->l_pflag |= LP_INTR;
    202 	if (ci) {
    203 		l->l_pflag |= LP_BOUND;
    204 		l->l_target_cpu = ci;
    205 	}
    206 	if (thrname) {
    207 		l->l_name = kmem_alloc(MAXCOMLEN, KM_SLEEP);
    208 		strlcpy(l->l_name, thrname, MAXCOMLEN);
    209 	}
    210 
    211 	rv = rumpuser_thread_create(threadbouncer, td, thrname,
    212 	    (flags & KTHREAD_MUSTJOIN) == KTHREAD_MUSTJOIN,
    213 	    pri, ci ? ci->ci_index : -1, &l->l_ctxlink);
    214 	if (rv)
    215 		return rv; /* XXX */
    216 
    217 	if (newlp) {
    218 		*newlp = l;
    219 	} else {
    220 		KASSERT((flags & KTHREAD_MUSTJOIN) == 0);
    221 	}
    222 
    223 	return 0;
    224 }
    225 
    226 void
    227 kthread_exit(int ecode)
    228 {
    229 
    230 	if ((curlwp->l_pflag & LP_MPSAFE) == 0)
    231 		KERNEL_UNLOCK_LAST(NULL);
    232 	rump_lwproc_releaselwp();
    233 	/* unschedule includes membar */
    234 	rump_unschedule();
    235 	rumpuser_thread_exit();
    236 }
    237 
    238 int
    239 kthread_join(struct lwp *l)
    240 {
    241 	int rv;
    242 
    243 	KASSERT(l->l_ctxlink != NULL);
    244 	rv = rumpuser_thread_join(l->l_ctxlink);
    245 	membar_consumer();
    246 
    247 	return rv;
    248 }
    249 
    250 int
    251 kthread_fpu_enter(void)
    252 {
    253 	struct lwp *l = curlwp;
    254 	int s;
    255 
    256 	KASSERTMSG(l->l_flag & LW_SYSTEM,
    257 	    "%s is allowed only in kthreads", __func__);
    258 	s = l->l_flag & LW_SYSTEM_FPU;
    259 	l->l_flag |= LW_SYSTEM_FPU;
    260 
    261 	return s;
    262 }
    263 
    264 void
    265 kthread_fpu_exit(int s)
    266 {
    267 	struct lwp *l = curlwp;
    268 
    269 	KASSERT(s == (s & LW_SYSTEM_FPU));
    270 	KASSERTMSG(l->l_flag & LW_SYSTEM,
    271 	    "%s is allowed only in kthreads", __func__);
    272 	KASSERT(l->l_flag & LW_SYSTEM_FPU);
    273 	l->l_flag ^= s ^ LW_SYSTEM_FPU;
    274 }
    275 
    276 /*
    277  * Create a non-kernel thread that is scheduled by a rump kernel hypercall.
    278  *
    279  * Sounds strange and out-of-place?  yup yup yup.  the original motivation
    280  * for this was aio.  This is a very infrequent code path in rump kernels.
    281  * XXX: threads created with lwp_create() are eternal for local clients.
    282  * however, they are correctly reaped for remote clients with process exit.
    283  */
    284 static void *
    285 lwpbouncer(void *arg)
    286 {
    287 	struct thrdesc *td = arg;
    288 	struct lwp *l = td->newlwp;
    289 	void (*f)(void *);
    290 	void *thrarg;
    291 	int run;
    292 
    293 	f = td->f;
    294 	thrarg = td->arg;
    295 
    296 	/* do not run until we've been enqueued */
    297 	rumpuser_mutex_enter_nowrap(thrmtx);
    298 	while ((run = td->runnable) == 0) {
    299 		rumpuser_cv_wait_nowrap(thrcv, thrmtx);
    300 	}
    301 	rumpuser_mutex_exit(thrmtx);
    302 
    303 	/* schedule ourselves */
    304 	rump_lwproc_curlwp_set(l);
    305 	rump_schedule();
    306 	kmem_free(td, sizeof(*td));
    307 
    308 	/* should we just die instead? */
    309 	if (run == -1) {
    310 		rump_lwproc_releaselwp();
    311 		lwp_userret(l);
    312 		panic("lwpbouncer reached unreachable");
    313 	}
    314 
    315 	/* run, and don't come back! */
    316 	f(thrarg);
    317 	panic("lwp return from worker not supported");
    318 }
    319 
    320 int
    321 lwp_create(struct lwp *l1, struct proc *p2, vaddr_t uaddr, int flags,
    322     void *stack, size_t stacksize, void (*func)(void *), void *arg,
    323     struct lwp **newlwpp, int sclass, const sigset_t *sigmask,
    324     const stack_t *sigstk)
    325 {
    326 	struct thrdesc *td;
    327 	struct lwp *l;
    328 	int rv;
    329 
    330 	if (flags)
    331 		panic("lwp_create: flags not supported by this implementation");
    332 	td = kmem_alloc(sizeof(*td), KM_SLEEP);
    333 	td->f = func;
    334 	td->arg = arg;
    335 	td->runnable = 0;
    336 	td->newlwp = l = rump__lwproc_alloclwp(p2);
    337 
    338 	rumpuser_mutex_enter_nowrap(thrmtx);
    339 	TAILQ_INSERT_TAIL(&newthr, td, entries);
    340 	rumpuser_mutex_exit(thrmtx);
    341 
    342 	rv = rumpuser_thread_create(lwpbouncer, td, p2->p_comm, 0,
    343 	    PRI_USER, -1, NULL);
    344 	if (rv)
    345 		panic("rumpuser_thread_create failed"); /* XXX */
    346 
    347 	*newlwpp = l;
    348 	return 0;
    349 }
    350 
    351 void
    352 lwp_exit(struct lwp *l)
    353 {
    354 	struct thrdesc *td;
    355 
    356 	rumpuser_mutex_enter_nowrap(thrmtx);
    357 	TAILQ_FOREACH(td, &newthr, entries) {
    358 		if (td->newlwp == l) {
    359 			td->runnable = -1;
    360 			break;
    361 		}
    362 	}
    363 	rumpuser_mutex_exit(thrmtx);
    364 
    365 	if (td == NULL)
    366 		panic("lwp_exit: could not find %p\n", l);
    367 }
    368 
    369 void
    370 lwp_userret(struct lwp *l)
    371 {
    372 
    373 	if ((l->l_flag & LW_RUMP_QEXIT) == 0)
    374 		return;
    375 
    376 	/* ok, so we should die */
    377 	rump_unschedule();
    378 	rumpuser_thread_exit();
    379 }
    380 
    381 void
    382 lwp_need_userret(struct lwp *l)
    383 {
    384 
    385 	/* do what? */
    386 }
    387