Home | History | Annotate | Line # | Download | only in rumpkern
      1  1.28        ad /*	$NetBSD: threads.c,v 1.28 2023/10/04 21:56:15 ad Exp $	*/
      2   1.1     pooka 
      3   1.1     pooka /*
      4   1.1     pooka  * Copyright (c) 2007-2009 Antti Kantee.  All Rights Reserved.
      5   1.1     pooka  *
      6   1.1     pooka  * Development of this software was supported by
      7   1.1     pooka  * The Finnish Cultural Foundation.
      8   1.1     pooka  *
      9   1.1     pooka  * Redistribution and use in source and binary forms, with or without
     10   1.1     pooka  * modification, are permitted provided that the following conditions
     11   1.1     pooka  * are met:
     12   1.1     pooka  * 1. Redistributions of source code must retain the above copyright
     13   1.1     pooka  *    notice, this list of conditions and the following disclaimer.
     14   1.1     pooka  * 2. Redistributions in binary form must reproduce the above copyright
     15   1.1     pooka  *    notice, this list of conditions and the following disclaimer in the
     16   1.1     pooka  *    documentation and/or other materials provided with the distribution.
     17   1.1     pooka  *
     18   1.1     pooka  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
     19   1.1     pooka  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     20   1.1     pooka  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     21   1.1     pooka  * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     22   1.1     pooka  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     23   1.1     pooka  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     24   1.1     pooka  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     25   1.1     pooka  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     26   1.1     pooka  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     27   1.1     pooka  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     28   1.1     pooka  * SUCH DAMAGE.
     29   1.1     pooka  */
     30   1.1     pooka 
     31   1.1     pooka #include <sys/cdefs.h>
     32  1.28        ad __KERNEL_RCSID(0, "$NetBSD: threads.c,v 1.28 2023/10/04 21:56:15 ad Exp $");
     33   1.1     pooka 
     34   1.1     pooka #include <sys/param.h>
     35  1.10     pooka #include <sys/atomic.h>
     36   1.1     pooka #include <sys/kmem.h>
     37   1.1     pooka #include <sys/kthread.h>
     38  1.11     pooka #include <sys/malloc.h>
     39   1.1     pooka #include <sys/systm.h>
     40  1.23     pooka #include <sys/queue.h>
     41   1.1     pooka 
     42  1.24     pooka #include <rump-sys/kern.h>
     43  1.24     pooka 
     44   1.1     pooka #include <rump/rumpuser.h>
     45   1.1     pooka 
     46  1.23     pooka struct thrdesc {
     47   1.1     pooka 	void (*f)(void *);
     48   1.1     pooka 	void *arg;
     49  1.23     pooka 	struct lwp *newlwp;
     50  1.23     pooka 	int runnable;
     51  1.23     pooka 
     52  1.23     pooka 	TAILQ_ENTRY(thrdesc) entries;
     53   1.1     pooka };
     54   1.1     pooka 
     55  1.18     pooka static bool threads_are_go;
     56  1.18     pooka static struct rumpuser_mtx *thrmtx;
     57  1.18     pooka static struct rumpuser_cv *thrcv;
     58  1.23     pooka static TAILQ_HEAD(, thrdesc) newthr;
     59  1.18     pooka 
     60   1.1     pooka static void *
     61   1.1     pooka threadbouncer(void *arg)
     62   1.1     pooka {
     63  1.23     pooka 	struct thrdesc *td = arg;
     64  1.23     pooka 	struct lwp *l = td->newlwp;
     65   1.1     pooka 	void (*f)(void *);
     66   1.1     pooka 	void *thrarg;
     67   1.1     pooka 
     68  1.23     pooka 	f = td->f;
     69  1.23     pooka 	thrarg = td->arg;
     70   1.2     pooka 
     71  1.18     pooka 	/* don't allow threads to run before all CPUs have fully attached */
     72  1.18     pooka 	if (!threads_are_go) {
     73  1.18     pooka 		rumpuser_mutex_enter_nowrap(thrmtx);
     74  1.18     pooka 		while (!threads_are_go) {
     75  1.18     pooka 			rumpuser_cv_wait_nowrap(thrcv, thrmtx);
     76  1.18     pooka 		}
     77  1.18     pooka 		rumpuser_mutex_exit(thrmtx);
     78  1.18     pooka 	}
     79  1.18     pooka 
     80   1.3     pooka 	/* schedule ourselves */
     81  1.22     pooka 	rump_lwproc_curlwp_set(l);
     82   1.1     pooka 	rump_schedule();
     83   1.1     pooka 
     84  1.11     pooka 	/* free dance struct */
     85  1.23     pooka 	kmem_intr_free(td, sizeof(*td));
     86  1.11     pooka 
     87   1.1     pooka 	if ((curlwp->l_pflag & LP_MPSAFE) == 0)
     88   1.1     pooka 		KERNEL_LOCK(1, NULL);
     89   1.1     pooka 
     90   1.1     pooka 	f(thrarg);
     91   1.1     pooka 
     92   1.1     pooka 	panic("unreachable, should kthread_exit()");
     93   1.1     pooka }
     94   1.1     pooka 
     95  1.18     pooka void
     96  1.18     pooka rump_thread_init(void)
     97  1.18     pooka {
     98  1.18     pooka 
     99  1.19     pooka 	rumpuser_mutex_init(&thrmtx, RUMPUSER_MTX_SPIN);
    100  1.18     pooka 	rumpuser_cv_init(&thrcv);
    101  1.23     pooka 	TAILQ_INIT(&newthr);
    102  1.18     pooka }
    103  1.18     pooka 
    104  1.18     pooka void
    105  1.23     pooka rump_thread_allow(struct lwp *l)
    106  1.18     pooka {
    107  1.23     pooka 	struct thrdesc *td;
    108  1.18     pooka 
    109  1.18     pooka 	rumpuser_mutex_enter(thrmtx);
    110  1.23     pooka 	if (l == NULL) {
    111  1.23     pooka 		threads_are_go = true;
    112  1.23     pooka 	} else {
    113  1.23     pooka 		TAILQ_FOREACH(td, &newthr, entries) {
    114  1.23     pooka 			if (td->newlwp == l) {
    115  1.23     pooka 				td->runnable = 1;
    116  1.23     pooka 				break;
    117  1.23     pooka 			}
    118  1.23     pooka 		}
    119  1.23     pooka 	}
    120  1.18     pooka 	rumpuser_cv_broadcast(thrcv);
    121  1.18     pooka 	rumpuser_mutex_exit(thrmtx);
    122  1.18     pooka }
    123  1.18     pooka 
    124  1.16     pooka static struct {
    125  1.16     pooka 	const char *t_name;
    126  1.16     pooka 	bool t_ncmp;
    127  1.16     pooka } nothreads[] = {
    128  1.16     pooka 	{ "vrele", false },
    129  1.17     pooka 	{ "vdrain", false },
    130  1.16     pooka 	{ "cachegc", false },
    131  1.16     pooka 	{ "nfssilly", false },
    132  1.16     pooka 	{ "unpgc", false },
    133  1.16     pooka 	{ "pmf", true },
    134  1.16     pooka 	{ "xcall", true },
    135  1.16     pooka };
    136  1.16     pooka 
    137   1.1     pooka int
    138   1.1     pooka kthread_create(pri_t pri, int flags, struct cpu_info *ci,
    139   1.1     pooka 	void (*func)(void *), void *arg, lwp_t **newlp, const char *fmt, ...)
    140   1.1     pooka {
    141   1.1     pooka 	char thrstore[MAXCOMLEN];
    142   1.1     pooka 	const char *thrname = NULL;
    143   1.1     pooka 	va_list ap;
    144  1.23     pooka 	struct thrdesc *td;
    145   1.1     pooka 	struct lwp *l;
    146   1.1     pooka 	int rv;
    147   1.1     pooka 
    148   1.1     pooka 	thrstore[0] = '\0';
    149   1.1     pooka 	if (fmt) {
    150   1.1     pooka 		va_start(ap, fmt);
    151   1.1     pooka 		vsnprintf(thrstore, sizeof(thrstore), fmt, ap);
    152   1.1     pooka 		va_end(ap);
    153   1.1     pooka 		thrname = thrstore;
    154   1.1     pooka 	}
    155   1.1     pooka 
    156   1.1     pooka 	/*
    157   1.1     pooka 	 * We don't want a module unload thread.
    158   1.1     pooka 	 * (XXX: yes, this is a kludge too, and the kernel should
    159   1.1     pooka 	 * have a more flexible method for configuring which threads
    160   1.1     pooka 	 * we want).
    161   1.1     pooka 	 */
    162   1.1     pooka 	if (strcmp(thrstore, "modunload") == 0) {
    163   1.1     pooka 		return 0;
    164   1.1     pooka 	}
    165   1.1     pooka 
    166   1.1     pooka 	if (!rump_threads) {
    167  1.16     pooka 		bool matched;
    168  1.16     pooka 		int i;
    169  1.16     pooka 
    170  1.16     pooka 		/* do we want to fake it? */
    171  1.16     pooka 		for (i = 0; i < __arraycount(nothreads); i++) {
    172  1.16     pooka 			if (nothreads[i].t_ncmp) {
    173  1.16     pooka 				matched = strncmp(thrstore, nothreads[i].t_name,
    174  1.16     pooka 				    strlen(nothreads[i].t_name)) == 0;
    175  1.16     pooka 			} else {
    176  1.16     pooka 				matched = strcmp(thrstore,
    177  1.16     pooka 				    nothreads[i].t_name) == 0;
    178  1.16     pooka 			}
    179  1.16     pooka 			if (matched) {
    180  1.16     pooka 				aprint_error("rump kernel threads not enabled, "
    181  1.16     pooka 				    "%s not functional\n", nothreads[i].t_name);
    182  1.16     pooka 				return 0;
    183  1.16     pooka 			}
    184  1.16     pooka 		}
    185  1.16     pooka 		panic("threads not available");
    186   1.1     pooka 	}
    187   1.1     pooka 	KASSERT(fmt != NULL);
    188   1.1     pooka 
    189  1.23     pooka 	/*
    190  1.23     pooka 	 * Allocate with intr-safe allocator, give that we may be
    191  1.23     pooka 	 * creating interrupt threads.
    192  1.23     pooka 	 */
    193  1.23     pooka 	td = kmem_intr_alloc(sizeof(*td), KM_SLEEP);
    194  1.23     pooka 	td->f = func;
    195  1.23     pooka 	td->arg = arg;
    196  1.23     pooka 	td->newlwp = l = rump__lwproc_alloclwp(&proc0);
    197   1.9     pooka 	l->l_flag |= LW_SYSTEM;
    198   1.1     pooka 	if (flags & KTHREAD_MPSAFE)
    199   1.1     pooka 		l->l_pflag |= LP_MPSAFE;
    200   1.2     pooka 	if (flags & KTHREAD_INTR)
    201   1.2     pooka 		l->l_pflag |= LP_INTR;
    202   1.4     pooka 	if (ci) {
    203   1.4     pooka 		l->l_pflag |= LP_BOUND;
    204   1.9     pooka 		l->l_target_cpu = ci;
    205   1.4     pooka 	}
    206   1.8     pooka 	if (thrname) {
    207   1.8     pooka 		l->l_name = kmem_alloc(MAXCOMLEN, KM_SLEEP);
    208   1.8     pooka 		strlcpy(l->l_name, thrname, MAXCOMLEN);
    209   1.8     pooka 	}
    210   1.8     pooka 
    211  1.23     pooka 	rv = rumpuser_thread_create(threadbouncer, td, thrname,
    212  1.20     pooka 	    (flags & KTHREAD_MUSTJOIN) == KTHREAD_MUSTJOIN,
    213  1.20     pooka 	    pri, ci ? ci->ci_index : -1, &l->l_ctxlink);
    214   1.1     pooka 	if (rv)
    215  1.23     pooka 		return rv; /* XXX */
    216   1.1     pooka 
    217  1.10     pooka 	if (newlp) {
    218   1.1     pooka 		*newlp = l;
    219  1.10     pooka 	} else {
    220  1.15     rmind 		KASSERT((flags & KTHREAD_MUSTJOIN) == 0);
    221  1.10     pooka 	}
    222  1.10     pooka 
    223   1.1     pooka 	return 0;
    224   1.1     pooka }
    225   1.1     pooka 
    226   1.1     pooka void
    227   1.1     pooka kthread_exit(int ecode)
    228   1.1     pooka {
    229   1.1     pooka 
    230   1.1     pooka 	if ((curlwp->l_pflag & LP_MPSAFE) == 0)
    231   1.6     pooka 		KERNEL_UNLOCK_LAST(NULL);
    232  1.12     pooka 	rump_lwproc_releaselwp();
    233  1.10     pooka 	/* unschedule includes membar */
    234   1.1     pooka 	rump_unschedule();
    235   1.1     pooka 	rumpuser_thread_exit();
    236   1.1     pooka }
    237  1.10     pooka 
    238  1.10     pooka int
    239  1.10     pooka kthread_join(struct lwp *l)
    240  1.10     pooka {
    241  1.10     pooka 	int rv;
    242  1.10     pooka 
    243  1.10     pooka 	KASSERT(l->l_ctxlink != NULL);
    244  1.10     pooka 	rv = rumpuser_thread_join(l->l_ctxlink);
    245  1.10     pooka 	membar_consumer();
    246  1.10     pooka 
    247  1.10     pooka 	return rv;
    248  1.10     pooka }
    249  1.23     pooka 
    250  1.27  riastrad int
    251  1.27  riastrad kthread_fpu_enter(void)
    252  1.27  riastrad {
    253  1.27  riastrad 	struct lwp *l = curlwp;
    254  1.27  riastrad 	int s;
    255  1.27  riastrad 
    256  1.27  riastrad 	KASSERTMSG(l->l_flag & LW_SYSTEM,
    257  1.27  riastrad 	    "%s is allowed only in kthreads", __func__);
    258  1.27  riastrad 	s = l->l_flag & LW_SYSTEM_FPU;
    259  1.27  riastrad 	l->l_flag |= LW_SYSTEM_FPU;
    260  1.27  riastrad 
    261  1.27  riastrad 	return s;
    262  1.27  riastrad }
    263  1.27  riastrad 
    264  1.27  riastrad void
    265  1.27  riastrad kthread_fpu_exit(int s)
    266  1.27  riastrad {
    267  1.27  riastrad 	struct lwp *l = curlwp;
    268  1.27  riastrad 
    269  1.27  riastrad 	KASSERT(s == (s & LW_SYSTEM_FPU));
    270  1.27  riastrad 	KASSERTMSG(l->l_flag & LW_SYSTEM,
    271  1.27  riastrad 	    "%s is allowed only in kthreads", __func__);
    272  1.27  riastrad 	KASSERT(l->l_flag & LW_SYSTEM_FPU);
    273  1.27  riastrad 	l->l_flag ^= s ^ LW_SYSTEM_FPU;
    274  1.27  riastrad }
    275  1.27  riastrad 
    276  1.23     pooka /*
    277  1.23     pooka  * Create a non-kernel thread that is scheduled by a rump kernel hypercall.
    278  1.23     pooka  *
    279  1.23     pooka  * Sounds strange and out-of-place?  yup yup yup.  the original motivation
    280  1.23     pooka  * for this was aio.  This is a very infrequent code path in rump kernels.
    281  1.23     pooka  * XXX: threads created with lwp_create() are eternal for local clients.
    282  1.23     pooka  * however, they are correctly reaped for remote clients with process exit.
    283  1.23     pooka  */
    284  1.23     pooka static void *
    285  1.23     pooka lwpbouncer(void *arg)
    286  1.23     pooka {
    287  1.23     pooka 	struct thrdesc *td = arg;
    288  1.23     pooka 	struct lwp *l = td->newlwp;
    289  1.23     pooka 	void (*f)(void *);
    290  1.23     pooka 	void *thrarg;
    291  1.23     pooka 	int run;
    292  1.23     pooka 
    293  1.23     pooka 	f = td->f;
    294  1.23     pooka 	thrarg = td->arg;
    295  1.23     pooka 
    296  1.23     pooka 	/* do not run until we've been enqueued */
    297  1.23     pooka 	rumpuser_mutex_enter_nowrap(thrmtx);
    298  1.23     pooka 	while ((run = td->runnable) == 0) {
    299  1.23     pooka 		rumpuser_cv_wait_nowrap(thrcv, thrmtx);
    300  1.23     pooka 	}
    301  1.23     pooka 	rumpuser_mutex_exit(thrmtx);
    302  1.23     pooka 
    303  1.23     pooka 	/* schedule ourselves */
    304  1.23     pooka 	rump_lwproc_curlwp_set(l);
    305  1.23     pooka 	rump_schedule();
    306  1.23     pooka 	kmem_free(td, sizeof(*td));
    307  1.23     pooka 
    308  1.23     pooka 	/* should we just die instead? */
    309  1.23     pooka 	if (run == -1) {
    310  1.23     pooka 		rump_lwproc_releaselwp();
    311  1.23     pooka 		lwp_userret(l);
    312  1.23     pooka 		panic("lwpbouncer reached unreachable");
    313  1.23     pooka 	}
    314  1.23     pooka 
    315  1.23     pooka 	/* run, and don't come back! */
    316  1.23     pooka 	f(thrarg);
    317  1.23     pooka 	panic("lwp return from worker not supported");
    318  1.23     pooka }
    319  1.23     pooka 
    320  1.23     pooka int
    321  1.23     pooka lwp_create(struct lwp *l1, struct proc *p2, vaddr_t uaddr, int flags,
    322  1.25  christos     void *stack, size_t stacksize, void (*func)(void *), void *arg,
    323  1.26     kamil     struct lwp **newlwpp, int sclass, const sigset_t *sigmask,
    324  1.25  christos     const stack_t *sigstk)
    325  1.23     pooka {
    326  1.23     pooka 	struct thrdesc *td;
    327  1.23     pooka 	struct lwp *l;
    328  1.23     pooka 	int rv;
    329  1.23     pooka 
    330  1.23     pooka 	if (flags)
    331  1.23     pooka 		panic("lwp_create: flags not supported by this implementation");
    332  1.23     pooka 	td = kmem_alloc(sizeof(*td), KM_SLEEP);
    333  1.23     pooka 	td->f = func;
    334  1.23     pooka 	td->arg = arg;
    335  1.23     pooka 	td->runnable = 0;
    336  1.23     pooka 	td->newlwp = l = rump__lwproc_alloclwp(p2);
    337  1.23     pooka 
    338  1.23     pooka 	rumpuser_mutex_enter_nowrap(thrmtx);
    339  1.23     pooka 	TAILQ_INSERT_TAIL(&newthr, td, entries);
    340  1.23     pooka 	rumpuser_mutex_exit(thrmtx);
    341  1.23     pooka 
    342  1.23     pooka 	rv = rumpuser_thread_create(lwpbouncer, td, p2->p_comm, 0,
    343  1.23     pooka 	    PRI_USER, -1, NULL);
    344  1.23     pooka 	if (rv)
    345  1.23     pooka 		panic("rumpuser_thread_create failed"); /* XXX */
    346  1.23     pooka 
    347  1.23     pooka 	*newlwpp = l;
    348  1.23     pooka 	return 0;
    349  1.23     pooka }
    350  1.23     pooka 
    351  1.23     pooka void
    352  1.23     pooka lwp_exit(struct lwp *l)
    353  1.23     pooka {
    354  1.23     pooka 	struct thrdesc *td;
    355  1.23     pooka 
    356  1.23     pooka 	rumpuser_mutex_enter_nowrap(thrmtx);
    357  1.23     pooka 	TAILQ_FOREACH(td, &newthr, entries) {
    358  1.23     pooka 		if (td->newlwp == l) {
    359  1.23     pooka 			td->runnable = -1;
    360  1.23     pooka 			break;
    361  1.23     pooka 		}
    362  1.23     pooka 	}
    363  1.23     pooka 	rumpuser_mutex_exit(thrmtx);
    364  1.23     pooka 
    365  1.23     pooka 	if (td == NULL)
    366  1.23     pooka 		panic("lwp_exit: could not find %p\n", l);
    367  1.23     pooka }
    368  1.23     pooka 
    369  1.23     pooka void
    370  1.23     pooka lwp_userret(struct lwp *l)
    371  1.23     pooka {
    372  1.23     pooka 
    373  1.23     pooka 	if ((l->l_flag & LW_RUMP_QEXIT) == 0)
    374  1.23     pooka 		return;
    375  1.23     pooka 
    376  1.23     pooka 	/* ok, so we should die */
    377  1.23     pooka 	rump_unschedule();
    378  1.23     pooka 	rumpuser_thread_exit();
    379  1.23     pooka }
    380  1.28        ad 
    381  1.28        ad void
    382  1.28        ad lwp_need_userret(struct lwp *l)
    383  1.28        ad {
    384  1.28        ad 
    385  1.28        ad 	/* do what? */
    386  1.28        ad }
    387