Home | History | Annotate | Line # | Download | only in kern
kern_threadpool.c revision 1.14
      1  1.14  thorpej /*	$NetBSD: kern_threadpool.c,v 1.14 2018/12/29 04:39:14 thorpej Exp $	*/
      2   1.1  thorpej 
      3   1.1  thorpej /*-
      4   1.1  thorpej  * Copyright (c) 2014, 2018 The NetBSD Foundation, Inc.
      5   1.1  thorpej  * All rights reserved.
      6   1.1  thorpej  *
      7   1.1  thorpej  * This code is derived from software contributed to The NetBSD Foundation
      8   1.1  thorpej  * by Taylor R. Campbell and Jason R. Thorpe.
      9   1.1  thorpej  *
     10   1.1  thorpej  * Redistribution and use in source and binary forms, with or without
     11   1.1  thorpej  * modification, are permitted provided that the following conditions
     12   1.1  thorpej  * are met:
     13   1.1  thorpej  * 1. Redistributions of source code must retain the above copyright
     14   1.1  thorpej  *    notice, this list of conditions and the following disclaimer.
     15   1.1  thorpej  * 2. Redistributions in binary form must reproduce the above copyright
     16   1.1  thorpej  *    notice, this list of conditions and the following disclaimer in the
     17   1.1  thorpej  *    documentation and/or other materials provided with the distribution.
     18   1.1  thorpej  *
     19   1.1  thorpej  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20   1.1  thorpej  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21   1.1  thorpej  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22   1.1  thorpej  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23   1.1  thorpej  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24   1.1  thorpej  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25   1.1  thorpej  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26   1.1  thorpej  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27   1.1  thorpej  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28   1.1  thorpej  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29   1.1  thorpej  * POSSIBILITY OF SUCH DAMAGE.
     30   1.1  thorpej  */
     31   1.1  thorpej 
     32   1.1  thorpej /*
     33   1.1  thorpej  * Thread pools.
     34   1.1  thorpej  *
     35   1.1  thorpej  * A thread pool is a collection of worker threads idle or running
     36   1.1  thorpej  * jobs, together with an overseer thread that does not run jobs but
     37   1.1  thorpej  * can be given jobs to assign to a worker thread.  Scheduling a job in
     38   1.1  thorpej  * a thread pool does not allocate or even sleep at all, except perhaps
     39   1.1  thorpej  * on an adaptive lock, unlike kthread_create.  Jobs reuse threads, so
     40   1.1  thorpej  * they do not incur the expense of creating and destroying kthreads
     41   1.1  thorpej  * unless there is not much work to be done.
     42   1.1  thorpej  *
     43   1.1  thorpej  * A per-CPU thread pool (threadpool_percpu) is a collection of thread
     44   1.1  thorpej  * pools, one per CPU bound to that CPU.  For each priority level in
     45   1.1  thorpej  * use, there is one shared unbound thread pool (i.e., pool of threads
     46   1.1  thorpej  * not bound to any CPU) and one shared per-CPU thread pool.
     47   1.1  thorpej  *
     48   1.1  thorpej  * To use the unbound thread pool at priority pri, call
     49   1.1  thorpej  * threadpool_get(&pool, pri).  When you're done, call
     50   1.1  thorpej  * threadpool_put(pool, pri).
     51   1.1  thorpej  *
     52   1.1  thorpej  * To use the per-CPU thread pools at priority pri, call
     53   1.1  thorpej  * threadpool_percpu_get(&pool_percpu, pri), and then use the thread
     54   1.1  thorpej  * pool returned by threadpool_percpu_ref(pool_percpu) for the current
     55   1.1  thorpej  * CPU, or by threadpool_percpu_ref_remote(pool_percpu, ci) for another
     56   1.1  thorpej  * CPU.  When you're done, call threadpool_percpu_put(pool_percpu,
     57   1.1  thorpej  * pri).
     58   1.1  thorpej  *
     59   1.1  thorpej  * +--MACHINE-----------------------------------------------+
     60   1.1  thorpej  * | +--CPU 0-------+ +--CPU 1-------+     +--CPU n-------+ |
     61   1.1  thorpej  * | | <overseer 0> | | <overseer 1> | ... | <overseer n> | |
     62   1.1  thorpej  * | | <idle 0a>    | | <running 1a> | ... | <idle na>    | |
     63   1.1  thorpej  * | | <running 0b> | | <running 1b> | ... | <idle nb>    | |
     64   1.1  thorpej  * | | .            | | .            | ... | .            | |
     65   1.1  thorpej  * | | .            | | .            | ... | .            | |
     66   1.1  thorpej  * | | .            | | .            | ... | .            | |
     67   1.1  thorpej  * | +--------------+ +--------------+     +--------------+ |
     68   1.1  thorpej  * |            +--unbound---------+                        |
     69   1.1  thorpej  * |            | <overseer n+1>   |                        |
     70   1.1  thorpej  * |            | <idle (n+1)a>    |                        |
     71   1.1  thorpej  * |            | <running (n+1)b> |                        |
     72   1.1  thorpej  * |            +------------------+                        |
     73   1.1  thorpej  * +--------------------------------------------------------+
     74   1.1  thorpej  *
     75   1.1  thorpej  * XXX Why one overseer per CPU?  I did that originally to avoid
     76   1.1  thorpej  * touching remote CPUs' memory when scheduling a job, but that still
     77   1.1  thorpej  * requires interprocessor synchronization.  Perhaps we could get by
     78   1.1  thorpej  * with a single overseer thread, at the expense of another pointer in
     79   1.4  thorpej  * struct threadpool_job to identify the CPU on which it must run
     80   1.1  thorpej  * in order for the overseer to schedule it correctly.
     81   1.1  thorpej  */
     82   1.1  thorpej 
     83   1.1  thorpej #include <sys/cdefs.h>
     84  1.14  thorpej __KERNEL_RCSID(0, "$NetBSD: kern_threadpool.c,v 1.14 2018/12/29 04:39:14 thorpej Exp $");
     85   1.1  thorpej 
     86   1.1  thorpej #include <sys/types.h>
     87   1.1  thorpej #include <sys/param.h>
     88   1.1  thorpej #include <sys/atomic.h>
     89   1.1  thorpej #include <sys/condvar.h>
     90   1.1  thorpej #include <sys/cpu.h>
     91   1.1  thorpej #include <sys/kernel.h>
     92   1.1  thorpej #include <sys/kmem.h>
     93   1.1  thorpej #include <sys/kthread.h>
     94   1.1  thorpej #include <sys/mutex.h>
     95   1.1  thorpej #include <sys/once.h>
     96   1.1  thorpej #include <sys/percpu.h>
     97   1.1  thorpej #include <sys/pool.h>
     98   1.1  thorpej #include <sys/proc.h>
     99   1.1  thorpej #include <sys/queue.h>
    100   1.1  thorpej #include <sys/systm.h>
    101  1.14  thorpej #include <sys/sysctl.h>
    102   1.1  thorpej #include <sys/threadpool.h>
    103   1.1  thorpej 
    104   1.1  thorpej /* Data structures */
    105   1.1  thorpej 
    106   1.4  thorpej TAILQ_HEAD(job_head, threadpool_job);
    107   1.1  thorpej TAILQ_HEAD(thread_head, threadpool_thread);
    108   1.1  thorpej 
    109   1.1  thorpej struct threadpool_thread {
    110   1.1  thorpej 	struct lwp			*tpt_lwp;
    111  1.12  thorpej 	char				*tpt_lwp_savedname;
    112   1.4  thorpej 	struct threadpool		*tpt_pool;
    113   1.4  thorpej 	struct threadpool_job		*tpt_job;
    114   1.1  thorpej 	kcondvar_t			tpt_cv;
    115   1.1  thorpej 	TAILQ_ENTRY(threadpool_thread)	tpt_entry;
    116   1.1  thorpej };
    117   1.1  thorpej 
    118   1.1  thorpej struct threadpool {
    119   1.1  thorpej 	kmutex_t			tp_lock;
    120   1.1  thorpej 	struct threadpool_thread	tp_overseer;
    121   1.1  thorpej 	struct job_head			tp_jobs;
    122   1.1  thorpej 	struct thread_head		tp_idle_threads;
    123   1.7  thorpej 	uint64_t			tp_refcnt;
    124   1.1  thorpej 	int				tp_flags;
    125   1.1  thorpej #define	THREADPOOL_DYING	0x01
    126   1.1  thorpej 	struct cpu_info			*tp_cpu;
    127   1.1  thorpej 	pri_t				tp_pri;
    128   1.1  thorpej };
    129   1.1  thorpej 
    130   1.7  thorpej static void	threadpool_hold(struct threadpool *);
    131   1.4  thorpej static void	threadpool_rele(struct threadpool *);
    132   1.1  thorpej 
    133   1.4  thorpej static int	threadpool_percpu_create(struct threadpool_percpu **, pri_t);
    134   1.4  thorpej static void	threadpool_percpu_destroy(struct threadpool_percpu *);
    135   1.1  thorpej 
    136  1.10  thorpej static threadpool_job_fn_t threadpool_job_dead;
    137   1.1  thorpej 
    138  1.13  thorpej static void	threadpool_job_hold(struct threadpool_job *);
    139   1.4  thorpej static void	threadpool_job_rele(struct threadpool_job *);
    140   1.1  thorpej 
    141   1.1  thorpej static void	threadpool_overseer_thread(void *) __dead;
    142   1.1  thorpej static void	threadpool_thread(void *) __dead;
    143   1.1  thorpej 
    144   1.1  thorpej static pool_cache_t	threadpool_thread_pc __read_mostly;
    145   1.1  thorpej 
    146   1.1  thorpej static kmutex_t		threadpools_lock __cacheline_aligned;
    147   1.1  thorpej 
    148  1.14  thorpej 	/* Default to 30 second idle timeout for pool threads. */
    149  1.14  thorpej static int	threadpool_idle_time_ms = 30 * 1000;
    150   1.1  thorpej 
    151   1.1  thorpej struct threadpool_unbound {
    152   1.1  thorpej 	struct threadpool		tpu_pool;
    153   1.1  thorpej 
    154   1.1  thorpej 	/* protected by threadpools_lock */
    155   1.1  thorpej 	LIST_ENTRY(threadpool_unbound)	tpu_link;
    156   1.5  thorpej 	uint64_t			tpu_refcnt;
    157   1.1  thorpej };
    158   1.1  thorpej 
    159   1.1  thorpej static LIST_HEAD(, threadpool_unbound) unbound_threadpools;
    160   1.1  thorpej 
    161   1.1  thorpej static struct threadpool_unbound *
    162   1.1  thorpej threadpool_lookup_unbound(pri_t pri)
    163   1.1  thorpej {
    164   1.1  thorpej 	struct threadpool_unbound *tpu;
    165   1.1  thorpej 
    166   1.1  thorpej 	LIST_FOREACH(tpu, &unbound_threadpools, tpu_link) {
    167   1.1  thorpej 		if (tpu->tpu_pool.tp_pri == pri)
    168   1.1  thorpej 			return tpu;
    169   1.1  thorpej 	}
    170   1.1  thorpej 	return NULL;
    171   1.1  thorpej }
    172   1.1  thorpej 
    173   1.1  thorpej static void
    174   1.1  thorpej threadpool_insert_unbound(struct threadpool_unbound *tpu)
    175   1.1  thorpej {
    176   1.1  thorpej 	KASSERT(threadpool_lookup_unbound(tpu->tpu_pool.tp_pri) == NULL);
    177   1.1  thorpej 	LIST_INSERT_HEAD(&unbound_threadpools, tpu, tpu_link);
    178   1.1  thorpej }
    179   1.1  thorpej 
    180   1.1  thorpej static void
    181   1.1  thorpej threadpool_remove_unbound(struct threadpool_unbound *tpu)
    182   1.1  thorpej {
    183   1.1  thorpej 	KASSERT(threadpool_lookup_unbound(tpu->tpu_pool.tp_pri) == tpu);
    184   1.1  thorpej 	LIST_REMOVE(tpu, tpu_link);
    185   1.1  thorpej }
    186   1.1  thorpej 
    187   1.1  thorpej struct threadpool_percpu {
    188   1.1  thorpej 	percpu_t *			tpp_percpu;
    189   1.1  thorpej 	pri_t				tpp_pri;
    190   1.1  thorpej 
    191   1.1  thorpej 	/* protected by threadpools_lock */
    192   1.1  thorpej 	LIST_ENTRY(threadpool_percpu)	tpp_link;
    193   1.5  thorpej 	uint64_t			tpp_refcnt;
    194   1.1  thorpej };
    195   1.1  thorpej 
    196   1.1  thorpej static LIST_HEAD(, threadpool_percpu) percpu_threadpools;
    197   1.1  thorpej 
    198   1.4  thorpej static struct threadpool_percpu *
    199   1.1  thorpej threadpool_lookup_percpu(pri_t pri)
    200   1.1  thorpej {
    201   1.4  thorpej 	struct threadpool_percpu *tpp;
    202   1.1  thorpej 
    203   1.1  thorpej 	LIST_FOREACH(tpp, &percpu_threadpools, tpp_link) {
    204   1.1  thorpej 		if (tpp->tpp_pri == pri)
    205   1.1  thorpej 			return tpp;
    206   1.1  thorpej 	}
    207   1.1  thorpej 	return NULL;
    208   1.1  thorpej }
    209   1.1  thorpej 
    210   1.1  thorpej static void
    211   1.4  thorpej threadpool_insert_percpu(struct threadpool_percpu *tpp)
    212   1.1  thorpej {
    213   1.1  thorpej 	KASSERT(threadpool_lookup_percpu(tpp->tpp_pri) == NULL);
    214   1.1  thorpej 	LIST_INSERT_HEAD(&percpu_threadpools, tpp, tpp_link);
    215   1.1  thorpej }
    216   1.1  thorpej 
    217   1.1  thorpej static void
    218   1.4  thorpej threadpool_remove_percpu(struct threadpool_percpu *tpp)
    219   1.1  thorpej {
    220   1.1  thorpej 	KASSERT(threadpool_lookup_percpu(tpp->tpp_pri) == tpp);
    221   1.1  thorpej 	LIST_REMOVE(tpp, tpp_link);
    222   1.1  thorpej }
    223   1.1  thorpej 
    224   1.1  thorpej #ifdef THREADPOOL_VERBOSE
    225   1.1  thorpej #define	TP_LOG(x)		printf x
    226   1.1  thorpej #else
    227   1.1  thorpej #define	TP_LOG(x)		/* nothing */
    228   1.1  thorpej #endif /* THREADPOOL_VERBOSE */
    229   1.1  thorpej 
    230  1.14  thorpej static int
    231  1.14  thorpej sysctl_kern_threadpool_idle_ms(SYSCTLFN_ARGS)
    232  1.14  thorpej {
    233  1.14  thorpej 	struct sysctlnode node;
    234  1.14  thorpej 	int val, error;
    235  1.14  thorpej 
    236  1.14  thorpej 	node = *rnode;
    237  1.14  thorpej 
    238  1.14  thorpej 	val = threadpool_idle_time_ms;
    239  1.14  thorpej 	node.sysctl_data = &val;
    240  1.14  thorpej 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
    241  1.14  thorpej 	if (error == 0 && newp != NULL) {
    242  1.14  thorpej 		/* Disallow negative values and 0 (forever). */
    243  1.14  thorpej 		if (val < 1)
    244  1.14  thorpej 			error = EINVAL;
    245  1.14  thorpej 		else
    246  1.14  thorpej 			threadpool_idle_time_ms = val;
    247  1.14  thorpej 	}
    248  1.14  thorpej 
    249  1.14  thorpej 	return error;
    250  1.14  thorpej }
    251  1.14  thorpej 
    252  1.14  thorpej SYSCTL_SETUP_PROTO(sysctl_threadpool_setup);
    253  1.14  thorpej 
    254  1.14  thorpej SYSCTL_SETUP(sysctl_threadpool_setup,
    255  1.14  thorpej     "sysctl kern.threadpool subtree setup")
    256  1.14  thorpej {
    257  1.14  thorpej 	const struct sysctlnode *rnode, *cnode;
    258  1.14  thorpej 	int error __diagused;
    259  1.14  thorpej 
    260  1.14  thorpej 	error = sysctl_createv(clog, 0, NULL, &rnode,
    261  1.14  thorpej 	    CTLFLAG_PERMANENT,
    262  1.14  thorpej 	    CTLTYPE_NODE, "threadpool",
    263  1.14  thorpej 	    SYSCTL_DESCR("threadpool subsystem options"),
    264  1.14  thorpej 	    NULL, 0, NULL, 0,
    265  1.14  thorpej 	    CTL_KERN, CTL_CREATE, CTL_EOL);
    266  1.14  thorpej 	KASSERT(error == 0);
    267  1.14  thorpej 
    268  1.14  thorpej 	error = sysctl_createv(clog, 0, &rnode, &cnode,
    269  1.14  thorpej 	    CTLFLAG_PERMANENT | CTLFLAG_READWRITE,
    270  1.14  thorpej 	    CTLTYPE_INT, "idle_ms",
    271  1.14  thorpej 	    SYSCTL_DESCR("idle thread timeout in ms"),
    272  1.14  thorpej 	    sysctl_kern_threadpool_idle_ms, 0, NULL, 0,
    273  1.14  thorpej 	    CTL_CREATE, CTL_EOL);
    274  1.14  thorpej 	KASSERT(error == 0);
    275  1.14  thorpej }
    276  1.14  thorpej 
    277  1.11  thorpej void
    278   1.1  thorpej threadpools_init(void)
    279   1.1  thorpej {
    280   1.1  thorpej 
    281   1.1  thorpej 	threadpool_thread_pc =
    282   1.1  thorpej 	    pool_cache_init(sizeof(struct threadpool_thread), 0, 0, 0,
    283   1.1  thorpej 		"thplthrd", NULL, IPL_NONE, NULL, NULL, NULL);
    284   1.1  thorpej 
    285   1.1  thorpej 	LIST_INIT(&unbound_threadpools);
    286   1.1  thorpej 	LIST_INIT(&percpu_threadpools);
    287   1.1  thorpej 	mutex_init(&threadpools_lock, MUTEX_DEFAULT, IPL_NONE);
    288   1.1  thorpej }
    289   1.1  thorpej 
    290   1.1  thorpej /* Thread pool creation */
    291   1.1  thorpej 
    292   1.1  thorpej static bool
    293   1.1  thorpej threadpool_pri_is_valid(pri_t pri)
    294   1.1  thorpej {
    295   1.1  thorpej 	return (pri == PRI_NONE || (pri >= PRI_USER && pri < PRI_COUNT));
    296   1.1  thorpej }
    297   1.1  thorpej 
    298   1.1  thorpej static int
    299   1.6  thorpej threadpool_create(struct threadpool *const pool, struct cpu_info *ci,
    300   1.6  thorpej     pri_t pri)
    301   1.1  thorpej {
    302   1.1  thorpej 	struct lwp *lwp;
    303   1.1  thorpej 	int ktflags;
    304   1.1  thorpej 	int error;
    305   1.1  thorpej 
    306   1.1  thorpej 	KASSERT(threadpool_pri_is_valid(pri));
    307   1.1  thorpej 
    308   1.1  thorpej 	mutex_init(&pool->tp_lock, MUTEX_DEFAULT, IPL_VM);
    309   1.1  thorpej 	/* XXX overseer */
    310   1.1  thorpej 	TAILQ_INIT(&pool->tp_jobs);
    311   1.1  thorpej 	TAILQ_INIT(&pool->tp_idle_threads);
    312   1.7  thorpej 	pool->tp_refcnt = 1;		/* overseer's reference */
    313   1.1  thorpej 	pool->tp_flags = 0;
    314   1.1  thorpej 	pool->tp_cpu = ci;
    315   1.1  thorpej 	pool->tp_pri = pri;
    316   1.1  thorpej 
    317   1.1  thorpej 	pool->tp_overseer.tpt_lwp = NULL;
    318   1.1  thorpej 	pool->tp_overseer.tpt_pool = pool;
    319   1.1  thorpej 	pool->tp_overseer.tpt_job = NULL;
    320   1.1  thorpej 	cv_init(&pool->tp_overseer.tpt_cv, "poolover");
    321   1.1  thorpej 
    322   1.1  thorpej 	ktflags = 0;
    323   1.1  thorpej 	ktflags |= KTHREAD_MPSAFE;
    324   1.1  thorpej 	if (pri < PRI_KERNEL)
    325   1.1  thorpej 		ktflags |= KTHREAD_TS;
    326   1.1  thorpej 	error = kthread_create(pri, ktflags, ci, &threadpool_overseer_thread,
    327   1.1  thorpej 	    &pool->tp_overseer, &lwp,
    328   1.1  thorpej 	    "pooloverseer/%d@%d", (ci ? cpu_index(ci) : -1), (int)pri);
    329   1.1  thorpej 	if (error)
    330   1.1  thorpej 		goto fail0;
    331   1.1  thorpej 
    332   1.1  thorpej 	mutex_spin_enter(&pool->tp_lock);
    333   1.1  thorpej 	pool->tp_overseer.tpt_lwp = lwp;
    334   1.1  thorpej 	cv_broadcast(&pool->tp_overseer.tpt_cv);
    335   1.1  thorpej 	mutex_spin_exit(&pool->tp_lock);
    336   1.1  thorpej 
    337   1.1  thorpej 	return 0;
    338   1.1  thorpej 
    339   1.1  thorpej fail0:	KASSERT(error);
    340   1.1  thorpej 	KASSERT(pool->tp_overseer.tpt_job == NULL);
    341   1.1  thorpej 	KASSERT(pool->tp_overseer.tpt_pool == pool);
    342   1.1  thorpej 	KASSERT(pool->tp_flags == 0);
    343   1.1  thorpej 	KASSERT(pool->tp_refcnt == 0);
    344   1.1  thorpej 	KASSERT(TAILQ_EMPTY(&pool->tp_idle_threads));
    345   1.1  thorpej 	KASSERT(TAILQ_EMPTY(&pool->tp_jobs));
    346   1.1  thorpej 	KASSERT(!cv_has_waiters(&pool->tp_overseer.tpt_cv));
    347   1.1  thorpej 	cv_destroy(&pool->tp_overseer.tpt_cv);
    348   1.1  thorpej 	mutex_destroy(&pool->tp_lock);
    349   1.1  thorpej 	return error;
    350   1.1  thorpej }
    351   1.1  thorpej 
    352   1.1  thorpej /* Thread pool destruction */
    353   1.1  thorpej 
    354   1.1  thorpej static void
    355   1.6  thorpej threadpool_destroy(struct threadpool *pool)
    356   1.1  thorpej {
    357   1.1  thorpej 	struct threadpool_thread *thread;
    358   1.1  thorpej 
    359   1.1  thorpej 	/* Mark the pool dying and wait for threads to commit suicide.  */
    360   1.1  thorpej 	mutex_spin_enter(&pool->tp_lock);
    361   1.1  thorpej 	KASSERT(TAILQ_EMPTY(&pool->tp_jobs));
    362   1.1  thorpej 	pool->tp_flags |= THREADPOOL_DYING;
    363   1.1  thorpej 	cv_broadcast(&pool->tp_overseer.tpt_cv);
    364   1.1  thorpej 	TAILQ_FOREACH(thread, &pool->tp_idle_threads, tpt_entry)
    365   1.1  thorpej 		cv_broadcast(&thread->tpt_cv);
    366   1.1  thorpej 	while (0 < pool->tp_refcnt) {
    367   1.1  thorpej 		TP_LOG(("%s: draining %u references...\n", __func__,
    368   1.1  thorpej 		    pool->tp_refcnt));
    369   1.1  thorpej 		cv_wait(&pool->tp_overseer.tpt_cv, &pool->tp_lock);
    370   1.1  thorpej 	}
    371   1.1  thorpej 	mutex_spin_exit(&pool->tp_lock);
    372   1.1  thorpej 
    373   1.1  thorpej 	KASSERT(pool->tp_overseer.tpt_job == NULL);
    374   1.1  thorpej 	KASSERT(pool->tp_overseer.tpt_pool == pool);
    375   1.1  thorpej 	KASSERT(pool->tp_flags == THREADPOOL_DYING);
    376   1.1  thorpej 	KASSERT(pool->tp_refcnt == 0);
    377   1.1  thorpej 	KASSERT(TAILQ_EMPTY(&pool->tp_idle_threads));
    378   1.1  thorpej 	KASSERT(TAILQ_EMPTY(&pool->tp_jobs));
    379   1.1  thorpej 	KASSERT(!cv_has_waiters(&pool->tp_overseer.tpt_cv));
    380   1.1  thorpej 	cv_destroy(&pool->tp_overseer.tpt_cv);
    381   1.1  thorpej 	mutex_destroy(&pool->tp_lock);
    382   1.1  thorpej }
    383   1.1  thorpej 
    384   1.7  thorpej static void
    385   1.4  thorpej threadpool_hold(struct threadpool *pool)
    386   1.1  thorpej {
    387   1.1  thorpej 
    388   1.7  thorpej 	KASSERT(mutex_owned(&pool->tp_lock));
    389   1.7  thorpej 	pool->tp_refcnt++;
    390   1.7  thorpej 	KASSERT(pool->tp_refcnt != 0);
    391   1.1  thorpej }
    392   1.1  thorpej 
    393   1.1  thorpej static void
    394   1.4  thorpej threadpool_rele(struct threadpool *pool)
    395   1.1  thorpej {
    396   1.1  thorpej 
    397   1.7  thorpej 	KASSERT(mutex_owned(&pool->tp_lock));
    398   1.7  thorpej 	KASSERT(0 < pool->tp_refcnt);
    399   1.8  thorpej 	if (--pool->tp_refcnt == 0)
    400   1.7  thorpej 		cv_broadcast(&pool->tp_overseer.tpt_cv);
    401   1.1  thorpej }
    402   1.1  thorpej 
    403   1.1  thorpej /* Unbound thread pools */
    404   1.1  thorpej 
    405   1.1  thorpej int
    406   1.4  thorpej threadpool_get(struct threadpool **poolp, pri_t pri)
    407   1.1  thorpej {
    408   1.1  thorpej 	struct threadpool_unbound *tpu, *tmp = NULL;
    409   1.1  thorpej 	int error;
    410   1.1  thorpej 
    411   1.1  thorpej 	ASSERT_SLEEPABLE();
    412   1.1  thorpej 
    413   1.1  thorpej 	if (! threadpool_pri_is_valid(pri))
    414   1.1  thorpej 		return EINVAL;
    415   1.1  thorpej 
    416   1.1  thorpej 	mutex_enter(&threadpools_lock);
    417   1.1  thorpej 	tpu = threadpool_lookup_unbound(pri);
    418   1.1  thorpej 	if (tpu == NULL) {
    419   1.1  thorpej 		mutex_exit(&threadpools_lock);
    420   1.1  thorpej 		TP_LOG(("%s: No pool for pri=%d, creating one.\n",
    421   1.9  thorpej 		    __func__, (int)pri));
    422   1.6  thorpej 		tmp = kmem_zalloc(sizeof(*tmp), KM_SLEEP);
    423   1.6  thorpej 		error = threadpool_create(&tmp->tpu_pool, NULL, pri);
    424   1.6  thorpej 		if (error) {
    425   1.6  thorpej 			kmem_free(tmp, sizeof(*tmp));
    426   1.1  thorpej 			return error;
    427   1.6  thorpej 		}
    428   1.1  thorpej 		mutex_enter(&threadpools_lock);
    429   1.1  thorpej 		tpu = threadpool_lookup_unbound(pri);
    430   1.1  thorpej 		if (tpu == NULL) {
    431   1.1  thorpej 			TP_LOG(("%s: Won the creation race for pri=%d.\n",
    432   1.9  thorpej 			    __func__, (int)pri));
    433   1.1  thorpej 			tpu = tmp;
    434   1.1  thorpej 			tmp = NULL;
    435   1.1  thorpej 			threadpool_insert_unbound(tpu);
    436   1.1  thorpej 		}
    437   1.1  thorpej 	}
    438   1.1  thorpej 	KASSERT(tpu != NULL);
    439   1.1  thorpej 	tpu->tpu_refcnt++;
    440   1.5  thorpej 	KASSERT(tpu->tpu_refcnt != 0);
    441   1.1  thorpej 	mutex_exit(&threadpools_lock);
    442   1.1  thorpej 
    443   1.6  thorpej 	if (tmp != NULL) {
    444   1.6  thorpej 		threadpool_destroy(&tmp->tpu_pool);
    445   1.6  thorpej 		kmem_free(tmp, sizeof(*tmp));
    446   1.6  thorpej 	}
    447   1.1  thorpej 	KASSERT(tpu != NULL);
    448   1.1  thorpej 	*poolp = &tpu->tpu_pool;
    449   1.1  thorpej 	return 0;
    450   1.1  thorpej }
    451   1.1  thorpej 
    452   1.1  thorpej void
    453   1.4  thorpej threadpool_put(struct threadpool *pool, pri_t pri)
    454   1.1  thorpej {
    455   1.1  thorpej 	struct threadpool_unbound *tpu =
    456   1.1  thorpej 	    container_of(pool, struct threadpool_unbound, tpu_pool);
    457   1.1  thorpej 
    458   1.1  thorpej 	ASSERT_SLEEPABLE();
    459   1.1  thorpej 
    460   1.1  thorpej 	KASSERT(threadpool_pri_is_valid(pri));
    461   1.1  thorpej 
    462   1.1  thorpej 	mutex_enter(&threadpools_lock);
    463   1.1  thorpej 	KASSERT(tpu == threadpool_lookup_unbound(pri));
    464   1.1  thorpej 	KASSERT(0 < tpu->tpu_refcnt);
    465   1.1  thorpej 	if (--tpu->tpu_refcnt == 0) {
    466   1.1  thorpej 		TP_LOG(("%s: Last reference for pri=%d, destroying pool.\n",
    467   1.9  thorpej 		    __func__, (int)pri));
    468   1.1  thorpej 		threadpool_remove_unbound(tpu);
    469   1.5  thorpej 	} else {
    470   1.1  thorpej 		tpu = NULL;
    471   1.5  thorpej 	}
    472   1.1  thorpej 	mutex_exit(&threadpools_lock);
    473   1.1  thorpej 
    474   1.6  thorpej 	if (tpu) {
    475   1.6  thorpej 		threadpool_destroy(&tpu->tpu_pool);
    476   1.6  thorpej 		kmem_free(tpu, sizeof(*tpu));
    477   1.6  thorpej 	}
    478   1.1  thorpej }
    479   1.1  thorpej 
    480   1.1  thorpej /* Per-CPU thread pools */
    481   1.1  thorpej 
    482   1.1  thorpej int
    483   1.4  thorpej threadpool_percpu_get(struct threadpool_percpu **pool_percpup, pri_t pri)
    484   1.1  thorpej {
    485   1.4  thorpej 	struct threadpool_percpu *pool_percpu, *tmp = NULL;
    486   1.1  thorpej 	int error;
    487   1.1  thorpej 
    488   1.1  thorpej 	ASSERT_SLEEPABLE();
    489   1.1  thorpej 
    490   1.1  thorpej 	if (! threadpool_pri_is_valid(pri))
    491   1.1  thorpej 		return EINVAL;
    492   1.1  thorpej 
    493   1.1  thorpej 	mutex_enter(&threadpools_lock);
    494   1.1  thorpej 	pool_percpu = threadpool_lookup_percpu(pri);
    495   1.1  thorpej 	if (pool_percpu == NULL) {
    496   1.1  thorpej 		mutex_exit(&threadpools_lock);
    497   1.1  thorpej 		TP_LOG(("%s: No pool for pri=%d, creating one.\n",
    498   1.9  thorpej 		    __func__, (int)pri));
    499   1.1  thorpej 		error = threadpool_percpu_create(&tmp, pri);
    500   1.1  thorpej 		if (error)
    501   1.1  thorpej 			return error;
    502   1.1  thorpej 		KASSERT(tmp != NULL);
    503   1.1  thorpej 		mutex_enter(&threadpools_lock);
    504   1.1  thorpej 		pool_percpu = threadpool_lookup_percpu(pri);
    505   1.1  thorpej 		if (pool_percpu == NULL) {
    506   1.1  thorpej 			TP_LOG(("%s: Won the creation race for pri=%d.\n",
    507   1.9  thorpej 			    __func__, (int)pri));
    508   1.1  thorpej 			pool_percpu = tmp;
    509   1.1  thorpej 			tmp = NULL;
    510   1.1  thorpej 			threadpool_insert_percpu(pool_percpu);
    511   1.1  thorpej 		}
    512   1.1  thorpej 	}
    513   1.1  thorpej 	KASSERT(pool_percpu != NULL);
    514   1.1  thorpej 	pool_percpu->tpp_refcnt++;
    515   1.5  thorpej 	KASSERT(pool_percpu->tpp_refcnt != 0);
    516   1.1  thorpej 	mutex_exit(&threadpools_lock);
    517   1.1  thorpej 
    518   1.1  thorpej 	if (tmp != NULL)
    519   1.1  thorpej 		threadpool_percpu_destroy(tmp);
    520   1.1  thorpej 	KASSERT(pool_percpu != NULL);
    521   1.1  thorpej 	*pool_percpup = pool_percpu;
    522   1.1  thorpej 	return 0;
    523   1.1  thorpej }
    524   1.1  thorpej 
    525   1.1  thorpej void
    526   1.4  thorpej threadpool_percpu_put(struct threadpool_percpu *pool_percpu, pri_t pri)
    527   1.1  thorpej {
    528   1.1  thorpej 
    529   1.1  thorpej 	ASSERT_SLEEPABLE();
    530   1.1  thorpej 
    531   1.1  thorpej 	KASSERT(threadpool_pri_is_valid(pri));
    532   1.1  thorpej 
    533   1.1  thorpej 	mutex_enter(&threadpools_lock);
    534   1.1  thorpej 	KASSERT(pool_percpu == threadpool_lookup_percpu(pri));
    535   1.1  thorpej 	KASSERT(0 < pool_percpu->tpp_refcnt);
    536   1.1  thorpej 	if (--pool_percpu->tpp_refcnt == 0) {
    537   1.1  thorpej 		TP_LOG(("%s: Last reference for pri=%d, destroying pool.\n",
    538   1.9  thorpej 		    __func__, (int)pri));
    539   1.1  thorpej 		threadpool_remove_percpu(pool_percpu);
    540   1.5  thorpej 	} else {
    541   1.1  thorpej 		pool_percpu = NULL;
    542   1.5  thorpej 	}
    543   1.1  thorpej 	mutex_exit(&threadpools_lock);
    544   1.1  thorpej 
    545   1.1  thorpej 	if (pool_percpu)
    546   1.1  thorpej 		threadpool_percpu_destroy(pool_percpu);
    547   1.1  thorpej }
    548   1.1  thorpej 
    549   1.4  thorpej struct threadpool *
    550   1.4  thorpej threadpool_percpu_ref(struct threadpool_percpu *pool_percpu)
    551   1.1  thorpej {
    552   1.4  thorpej 	struct threadpool **poolp, *pool;
    553   1.1  thorpej 
    554   1.1  thorpej 	poolp = percpu_getref(pool_percpu->tpp_percpu);
    555   1.1  thorpej 	pool = *poolp;
    556   1.1  thorpej 	percpu_putref(pool_percpu->tpp_percpu);
    557   1.1  thorpej 
    558   1.1  thorpej 	return pool;
    559   1.1  thorpej }
    560   1.1  thorpej 
    561   1.4  thorpej struct threadpool *
    562   1.4  thorpej threadpool_percpu_ref_remote(struct threadpool_percpu *pool_percpu,
    563   1.1  thorpej     struct cpu_info *ci)
    564   1.1  thorpej {
    565   1.4  thorpej 	struct threadpool **poolp, *pool;
    566   1.1  thorpej 
    567   1.1  thorpej 	percpu_traverse_enter();
    568   1.1  thorpej 	poolp = percpu_getptr_remote(pool_percpu->tpp_percpu, ci);
    569   1.1  thorpej 	pool = *poolp;
    570   1.1  thorpej 	percpu_traverse_exit();
    571   1.1  thorpej 
    572   1.1  thorpej 	return pool;
    573   1.1  thorpej }
    574   1.1  thorpej 
    575   1.1  thorpej static int
    576   1.4  thorpej threadpool_percpu_create(struct threadpool_percpu **pool_percpup, pri_t pri)
    577   1.1  thorpej {
    578   1.4  thorpej 	struct threadpool_percpu *pool_percpu;
    579   1.1  thorpej 	struct cpu_info *ci;
    580   1.1  thorpej 	CPU_INFO_ITERATOR cii;
    581   1.1  thorpej 	unsigned int i, j;
    582   1.1  thorpej 	int error;
    583   1.1  thorpej 
    584   1.1  thorpej 	pool_percpu = kmem_zalloc(sizeof(*pool_percpu), KM_SLEEP);
    585   1.1  thorpej 	if (pool_percpu == NULL) {
    586   1.1  thorpej 		error = ENOMEM;
    587   1.1  thorpej 		goto fail0;
    588   1.1  thorpej 	}
    589   1.1  thorpej 	pool_percpu->tpp_pri = pri;
    590   1.1  thorpej 
    591   1.4  thorpej 	pool_percpu->tpp_percpu = percpu_alloc(sizeof(struct threadpool *));
    592   1.1  thorpej 	if (pool_percpu->tpp_percpu == NULL) {
    593   1.1  thorpej 		error = ENOMEM;
    594   1.1  thorpej 		goto fail1;
    595   1.1  thorpej 	}
    596   1.1  thorpej 
    597   1.1  thorpej 	for (i = 0, CPU_INFO_FOREACH(cii, ci), i++) {
    598   1.4  thorpej 		struct threadpool *pool;
    599   1.1  thorpej 
    600   1.6  thorpej 		pool = kmem_zalloc(sizeof(*pool), KM_SLEEP);
    601   1.6  thorpej 		error = threadpool_create(pool, ci, pri);
    602   1.6  thorpej 		if (error) {
    603   1.6  thorpej 			kmem_free(pool, sizeof(*pool));
    604   1.1  thorpej 			goto fail2;
    605   1.6  thorpej 		}
    606   1.1  thorpej 		percpu_traverse_enter();
    607   1.4  thorpej 		struct threadpool **const poolp =
    608   1.1  thorpej 		    percpu_getptr_remote(pool_percpu->tpp_percpu, ci);
    609   1.1  thorpej 		*poolp = pool;
    610   1.1  thorpej 		percpu_traverse_exit();
    611   1.1  thorpej 	}
    612   1.1  thorpej 
    613   1.1  thorpej 	/* Success!  */
    614   1.4  thorpej 	*pool_percpup = (struct threadpool_percpu *)pool_percpu;
    615   1.1  thorpej 	return 0;
    616   1.1  thorpej 
    617   1.1  thorpej fail2:	for (j = 0, CPU_INFO_FOREACH(cii, ci), j++) {
    618   1.1  thorpej 		if (i <= j)
    619   1.1  thorpej 			break;
    620   1.1  thorpej 		percpu_traverse_enter();
    621   1.4  thorpej 		struct threadpool **const poolp =
    622   1.1  thorpej 		    percpu_getptr_remote(pool_percpu->tpp_percpu, ci);
    623   1.4  thorpej 		struct threadpool *const pool = *poolp;
    624   1.1  thorpej 		percpu_traverse_exit();
    625   1.6  thorpej 		threadpool_destroy(pool);
    626   1.6  thorpej 		kmem_free(pool, sizeof(*pool));
    627   1.1  thorpej 	}
    628   1.1  thorpej 	percpu_free(pool_percpu->tpp_percpu, sizeof(struct taskthread_pool *));
    629   1.1  thorpej fail1:	kmem_free(pool_percpu, sizeof(*pool_percpu));
    630   1.1  thorpej fail0:	return error;
    631   1.1  thorpej }
    632   1.1  thorpej 
    633   1.1  thorpej static void
    634   1.4  thorpej threadpool_percpu_destroy(struct threadpool_percpu *pool_percpu)
    635   1.1  thorpej {
    636   1.1  thorpej 	struct cpu_info *ci;
    637   1.1  thorpej 	CPU_INFO_ITERATOR cii;
    638   1.1  thorpej 
    639   1.1  thorpej 	for (CPU_INFO_FOREACH(cii, ci)) {
    640   1.1  thorpej 		percpu_traverse_enter();
    641   1.4  thorpej 		struct threadpool **const poolp =
    642   1.1  thorpej 		    percpu_getptr_remote(pool_percpu->tpp_percpu, ci);
    643   1.4  thorpej 		struct threadpool *const pool = *poolp;
    644   1.1  thorpej 		percpu_traverse_exit();
    645   1.6  thorpej 		threadpool_destroy(pool);
    646   1.6  thorpej 		kmem_free(pool, sizeof(*pool));
    647   1.1  thorpej 	}
    648   1.1  thorpej 
    649   1.4  thorpej 	percpu_free(pool_percpu->tpp_percpu, sizeof(struct threadpool *));
    650   1.1  thorpej 	kmem_free(pool_percpu, sizeof(*pool_percpu));
    651   1.1  thorpej }
    652   1.1  thorpej 
    653   1.1  thorpej /* Thread pool jobs */
    654   1.1  thorpej 
    655   1.1  thorpej void __printflike(4,5)
    656   1.4  thorpej threadpool_job_init(struct threadpool_job *job, threadpool_job_fn_t fn,
    657   1.1  thorpej     kmutex_t *lock, const char *fmt, ...)
    658   1.1  thorpej {
    659   1.1  thorpej 	va_list ap;
    660   1.1  thorpej 
    661   1.1  thorpej 	va_start(ap, fmt);
    662   1.1  thorpej 	(void)vsnprintf(job->job_name, sizeof(job->job_name), fmt, ap);
    663   1.1  thorpej 	va_end(ap);
    664   1.1  thorpej 
    665   1.1  thorpej 	job->job_lock = lock;
    666   1.1  thorpej 	job->job_thread = NULL;
    667   1.1  thorpej 	job->job_refcnt = 0;
    668   1.1  thorpej 	cv_init(&job->job_cv, job->job_name);
    669   1.1  thorpej 	job->job_fn = fn;
    670   1.1  thorpej }
    671   1.1  thorpej 
    672   1.1  thorpej static void
    673   1.4  thorpej threadpool_job_dead(struct threadpool_job *job)
    674   1.1  thorpej {
    675   1.1  thorpej 
    676   1.4  thorpej 	panic("threadpool job %p ran after destruction", job);
    677   1.1  thorpej }
    678   1.1  thorpej 
    679   1.1  thorpej void
    680   1.4  thorpej threadpool_job_destroy(struct threadpool_job *job)
    681   1.1  thorpej {
    682   1.1  thorpej 
    683   1.1  thorpej 	ASSERT_SLEEPABLE();
    684   1.1  thorpej 
    685   1.1  thorpej 	KASSERTMSG((job->job_thread == NULL), "job %p still running", job);
    686   1.1  thorpej 
    687   1.1  thorpej 	mutex_enter(job->job_lock);
    688   1.1  thorpej 	while (0 < job->job_refcnt)
    689   1.1  thorpej 		cv_wait(&job->job_cv, job->job_lock);
    690   1.1  thorpej 	mutex_exit(job->job_lock);
    691   1.1  thorpej 
    692   1.1  thorpej 	job->job_lock = NULL;
    693   1.1  thorpej 	KASSERT(job->job_thread == NULL);
    694   1.1  thorpej 	KASSERT(job->job_refcnt == 0);
    695   1.1  thorpej 	KASSERT(!cv_has_waiters(&job->job_cv));
    696   1.1  thorpej 	cv_destroy(&job->job_cv);
    697   1.1  thorpej 	job->job_fn = threadpool_job_dead;
    698   1.1  thorpej 	(void)strlcpy(job->job_name, "deadjob", sizeof(job->job_name));
    699   1.1  thorpej }
    700   1.1  thorpej 
    701  1.13  thorpej static void
    702   1.4  thorpej threadpool_job_hold(struct threadpool_job *job)
    703   1.1  thorpej {
    704   1.1  thorpej 	unsigned int refcnt;
    705   1.9  thorpej 
    706   1.1  thorpej 	do {
    707   1.1  thorpej 		refcnt = job->job_refcnt;
    708  1.13  thorpej 		KASSERT(refcnt != UINT_MAX);
    709   1.1  thorpej 	} while (atomic_cas_uint(&job->job_refcnt, refcnt, (refcnt + 1))
    710   1.1  thorpej 	    != refcnt);
    711   1.1  thorpej }
    712   1.1  thorpej 
    713   1.1  thorpej static void
    714   1.4  thorpej threadpool_job_rele(struct threadpool_job *job)
    715   1.1  thorpej {
    716   1.1  thorpej 	unsigned int refcnt;
    717   1.1  thorpej 
    718  1.13  thorpej 	KASSERT(mutex_owned(job->job_lock));
    719  1.13  thorpej 
    720   1.1  thorpej 	do {
    721   1.1  thorpej 		refcnt = job->job_refcnt;
    722   1.1  thorpej 		KASSERT(0 < refcnt);
    723   1.1  thorpej 		if (refcnt == 1) {
    724   1.1  thorpej 			refcnt = atomic_dec_uint_nv(&job->job_refcnt);
    725   1.1  thorpej 			KASSERT(refcnt != UINT_MAX);
    726   1.1  thorpej 			if (refcnt == 0)
    727   1.1  thorpej 				cv_broadcast(&job->job_cv);
    728   1.1  thorpej 			return;
    729   1.1  thorpej 		}
    730   1.1  thorpej 	} while (atomic_cas_uint(&job->job_refcnt, refcnt, (refcnt - 1))
    731   1.1  thorpej 	    != refcnt);
    732   1.1  thorpej }
    733   1.1  thorpej 
    734   1.1  thorpej void
    735   1.4  thorpej threadpool_job_done(struct threadpool_job *job)
    736   1.1  thorpej {
    737   1.1  thorpej 
    738   1.1  thorpej 	KASSERT(mutex_owned(job->job_lock));
    739   1.1  thorpej 	KASSERT(job->job_thread != NULL);
    740   1.1  thorpej 	KASSERT(job->job_thread->tpt_lwp == curlwp);
    741   1.1  thorpej 
    742  1.12  thorpej 	/*
    743  1.12  thorpej 	 * We can safely read this field; it's only modified right before
    744  1.12  thorpej 	 * we call the job work function, and we are only preserving it
    745  1.12  thorpej 	 * to use here; no one cares if it contains junk afterward.
    746  1.12  thorpej 	 */
    747  1.12  thorpej 	lwp_lock(curlwp);
    748  1.12  thorpej 	curlwp->l_name = job->job_thread->tpt_lwp_savedname;
    749  1.12  thorpej 	lwp_unlock(curlwp);
    750  1.12  thorpej 
    751  1.13  thorpej 	/*
    752  1.13  thorpej 	 * Inline the work of threadpool_job_rele(); the job is already
    753  1.13  thorpej 	 * locked, the most likely scenario (XXXJRT only scenario?) is
    754  1.13  thorpej 	 * that we're dropping the last reference (the one taken in
    755  1.13  thorpej 	 * threadpool_schedule_job()), and we always do the cv_broadcast()
    756  1.13  thorpej 	 * anyway.
    757  1.13  thorpej 	 */
    758  1.13  thorpej 	KASSERT(0 < job->job_refcnt);
    759  1.13  thorpej 	unsigned int refcnt __diagused = atomic_dec_uint_nv(&job->job_refcnt);
    760  1.13  thorpej 	KASSERT(refcnt != UINT_MAX);
    761   1.1  thorpej 	cv_broadcast(&job->job_cv);
    762   1.1  thorpej 	job->job_thread = NULL;
    763   1.1  thorpej }
    764   1.1  thorpej 
    765   1.1  thorpej void
    766   1.4  thorpej threadpool_schedule_job(struct threadpool *pool, struct threadpool_job *job)
    767   1.1  thorpej {
    768   1.1  thorpej 
    769   1.1  thorpej 	KASSERT(mutex_owned(job->job_lock));
    770   1.1  thorpej 
    771   1.1  thorpej 	/*
    772   1.1  thorpej 	 * If the job's already running, let it keep running.  The job
    773   1.1  thorpej 	 * is guaranteed by the interlock not to end early -- if it had
    774   1.1  thorpej 	 * ended early, threadpool_job_done would have set job_thread
    775   1.1  thorpej 	 * to NULL under the interlock.
    776   1.1  thorpej 	 */
    777   1.1  thorpej 	if (__predict_true(job->job_thread != NULL)) {
    778   1.1  thorpej 		TP_LOG(("%s: job '%s' already runnining.\n",
    779   1.9  thorpej 		    __func__, job->job_name));
    780   1.1  thorpej 		return;
    781   1.1  thorpej 	}
    782   1.1  thorpej 
    783  1.13  thorpej 	threadpool_job_hold(job);
    784  1.13  thorpej 
    785   1.1  thorpej 	/* Otherwise, try to assign a thread to the job.  */
    786   1.1  thorpej 	mutex_spin_enter(&pool->tp_lock);
    787   1.1  thorpej 	if (__predict_false(TAILQ_EMPTY(&pool->tp_idle_threads))) {
    788   1.1  thorpej 		/* Nobody's idle.  Give it to the overseer.  */
    789   1.1  thorpej 		TP_LOG(("%s: giving job '%s' to overseer.\n",
    790   1.9  thorpej 		    __func__, job->job_name));
    791   1.1  thorpej 		job->job_thread = &pool->tp_overseer;
    792   1.1  thorpej 		TAILQ_INSERT_TAIL(&pool->tp_jobs, job, job_entry);
    793   1.1  thorpej 	} else {
    794   1.1  thorpej 		/* Assign it to the first idle thread.  */
    795   1.1  thorpej 		job->job_thread = TAILQ_FIRST(&pool->tp_idle_threads);
    796   1.1  thorpej 		TP_LOG(("%s: giving job '%s' to idle thread %p.\n",
    797   1.9  thorpej 		    __func__, job->job_name, job->job_thread));
    798   1.1  thorpej 		TAILQ_REMOVE(&pool->tp_idle_threads, job->job_thread,
    799   1.1  thorpej 		    tpt_entry);
    800   1.1  thorpej 		job->job_thread->tpt_job = job;
    801   1.1  thorpej 	}
    802   1.1  thorpej 
    803   1.1  thorpej 	/* Notify whomever we gave it to, overseer or idle thread.  */
    804   1.1  thorpej 	KASSERT(job->job_thread != NULL);
    805   1.1  thorpej 	cv_broadcast(&job->job_thread->tpt_cv);
    806   1.1  thorpej 	mutex_spin_exit(&pool->tp_lock);
    807   1.1  thorpej }
    808   1.1  thorpej 
    809   1.1  thorpej bool
    810   1.4  thorpej threadpool_cancel_job_async(struct threadpool *pool, struct threadpool_job *job)
    811   1.1  thorpej {
    812   1.1  thorpej 
    813   1.1  thorpej 	KASSERT(mutex_owned(job->job_lock));
    814   1.1  thorpej 
    815   1.1  thorpej 	/*
    816   1.1  thorpej 	 * XXXJRT This fails (albeit safely) when all of the following
    817   1.1  thorpej 	 * are true:
    818   1.1  thorpej 	 *
    819   1.1  thorpej 	 *	=> "pool" is something other than what the job was
    820   1.1  thorpej 	 *	   scheduled on.  This can legitimately occur if,
    821   1.1  thorpej 	 *	   for example, a job is percpu-scheduled on CPU0
    822   1.1  thorpej 	 *	   and then CPU1 attempts to cancel it without taking
    823   1.1  thorpej 	 *	   a remote pool reference.  (this might happen by
    824   1.1  thorpej 	 *	   "luck of the draw").
    825   1.1  thorpej 	 *
    826   1.1  thorpej 	 *	=> "job" is not yet running, but is assigned to the
    827   1.1  thorpej 	 *	   overseer.
    828   1.1  thorpej 	 *
    829   1.1  thorpej 	 * When this happens, this code makes the determination that
    830   1.1  thorpej 	 * the job is already running.  The failure mode is that the
    831   1.1  thorpej 	 * caller is told the job is running, and thus has to wait.
    832   1.1  thorpej 	 * The overseer will eventually get to it and the job will
    833   1.1  thorpej 	 * proceed as if it had been already running.
    834   1.1  thorpej 	 */
    835   1.1  thorpej 
    836   1.1  thorpej 	if (job->job_thread == NULL) {
    837   1.1  thorpej 		/* Nothing to do.  Guaranteed not running.  */
    838   1.1  thorpej 		return true;
    839   1.1  thorpej 	} else if (job->job_thread == &pool->tp_overseer) {
    840   1.1  thorpej 		/* Take it off the list to guarantee it won't run.  */
    841   1.1  thorpej 		job->job_thread = NULL;
    842   1.1  thorpej 		mutex_spin_enter(&pool->tp_lock);
    843   1.1  thorpej 		TAILQ_REMOVE(&pool->tp_jobs, job, job_entry);
    844   1.1  thorpej 		mutex_spin_exit(&pool->tp_lock);
    845  1.13  thorpej 		threadpool_job_rele(job);
    846   1.1  thorpej 		return true;
    847   1.1  thorpej 	} else {
    848   1.1  thorpej 		/* Too late -- already running.  */
    849   1.1  thorpej 		return false;
    850   1.1  thorpej 	}
    851   1.1  thorpej }
    852   1.1  thorpej 
    853   1.1  thorpej void
    854   1.4  thorpej threadpool_cancel_job(struct threadpool *pool, struct threadpool_job *job)
    855   1.1  thorpej {
    856   1.1  thorpej 
    857   1.1  thorpej 	ASSERT_SLEEPABLE();
    858   1.1  thorpej 
    859   1.1  thorpej 	KASSERT(mutex_owned(job->job_lock));
    860   1.1  thorpej 
    861   1.4  thorpej 	if (threadpool_cancel_job_async(pool, job))
    862   1.1  thorpej 		return;
    863   1.1  thorpej 
    864   1.1  thorpej 	/* Already running.  Wait for it to complete.  */
    865   1.1  thorpej 	while (job->job_thread != NULL)
    866   1.1  thorpej 		cv_wait(&job->job_cv, job->job_lock);
    867   1.1  thorpej }
    868   1.1  thorpej 
    869   1.1  thorpej /* Thread pool overseer thread */
    870   1.1  thorpej 
    871   1.1  thorpej static void __dead
    872   1.1  thorpej threadpool_overseer_thread(void *arg)
    873   1.1  thorpej {
    874   1.1  thorpej 	struct threadpool_thread *const overseer = arg;
    875   1.4  thorpej 	struct threadpool *const pool = overseer->tpt_pool;
    876   1.1  thorpej 	struct lwp *lwp = NULL;
    877   1.1  thorpej 	int ktflags;
    878   1.1  thorpej 	int error;
    879   1.1  thorpej 
    880   1.1  thorpej 	KASSERT((pool->tp_cpu == NULL) || (pool->tp_cpu == curcpu()));
    881   1.1  thorpej 
    882   1.1  thorpej 	/* Wait until we're initialized.  */
    883   1.1  thorpej 	mutex_spin_enter(&pool->tp_lock);
    884   1.1  thorpej 	while (overseer->tpt_lwp == NULL)
    885   1.1  thorpej 		cv_wait(&overseer->tpt_cv, &pool->tp_lock);
    886   1.1  thorpej 
    887   1.1  thorpej 	TP_LOG(("%s: starting.\n", __func__));
    888   1.1  thorpej 
    889   1.1  thorpej 	for (;;) {
    890   1.1  thorpej 		/* Wait until there's a job.  */
    891   1.1  thorpej 		while (TAILQ_EMPTY(&pool->tp_jobs)) {
    892   1.1  thorpej 			if (ISSET(pool->tp_flags, THREADPOOL_DYING)) {
    893   1.1  thorpej 				TP_LOG(("%s: THREADPOOL_DYING\n",
    894   1.9  thorpej 				    __func__));
    895   1.1  thorpej 				break;
    896   1.1  thorpej 			}
    897   1.1  thorpej 			cv_wait(&overseer->tpt_cv, &pool->tp_lock);
    898   1.1  thorpej 		}
    899   1.1  thorpej 		if (__predict_false(TAILQ_EMPTY(&pool->tp_jobs)))
    900   1.1  thorpej 			break;
    901   1.1  thorpej 
    902   1.1  thorpej 		/* If there are no threads, we'll have to try to start one.  */
    903   1.1  thorpej 		if (TAILQ_EMPTY(&pool->tp_idle_threads)) {
    904   1.1  thorpej 			TP_LOG(("%s: Got a job, need to create a thread.\n",
    905   1.9  thorpej 			    __func__));
    906   1.7  thorpej 			threadpool_hold(pool);
    907   1.1  thorpej 			mutex_spin_exit(&pool->tp_lock);
    908   1.1  thorpej 
    909   1.1  thorpej 			struct threadpool_thread *const thread =
    910   1.1  thorpej 			    pool_cache_get(threadpool_thread_pc, PR_WAITOK);
    911   1.1  thorpej 			thread->tpt_lwp = NULL;
    912   1.1  thorpej 			thread->tpt_pool = pool;
    913   1.1  thorpej 			thread->tpt_job = NULL;
    914   1.1  thorpej 			cv_init(&thread->tpt_cv, "poolthrd");
    915   1.1  thorpej 
    916   1.1  thorpej 			ktflags = 0;
    917   1.1  thorpej 			ktflags |= KTHREAD_MPSAFE;
    918   1.1  thorpej 			if (pool->tp_pri < PRI_KERNEL)
    919   1.1  thorpej 				ktflags |= KTHREAD_TS;
    920   1.1  thorpej 			error = kthread_create(pool->tp_pri, ktflags,
    921   1.1  thorpej 			    pool->tp_cpu, &threadpool_thread, thread, &lwp,
    922   1.1  thorpej 			    "poolthread/%d@%d",
    923   1.1  thorpej 			    (pool->tp_cpu ? cpu_index(pool->tp_cpu) : -1),
    924   1.1  thorpej 			    (int)pool->tp_pri);
    925   1.1  thorpej 
    926   1.1  thorpej 			mutex_spin_enter(&pool->tp_lock);
    927   1.1  thorpej 			if (error) {
    928   1.1  thorpej 				pool_cache_put(threadpool_thread_pc, thread);
    929   1.1  thorpej 				threadpool_rele(pool);
    930   1.1  thorpej 				/* XXX What to do to wait for memory?  */
    931   1.1  thorpej 				(void)kpause("thrdplcr", false, hz,
    932   1.1  thorpej 				    &pool->tp_lock);
    933   1.1  thorpej 				continue;
    934   1.1  thorpej 			}
    935   1.7  thorpej 			/*
    936   1.7  thorpej 			 * New kthread now owns the reference to the pool
    937   1.7  thorpej 			 * taken above.
    938   1.7  thorpej 			 */
    939   1.1  thorpej 			KASSERT(lwp != NULL);
    940   1.1  thorpej 			TAILQ_INSERT_TAIL(&pool->tp_idle_threads, thread,
    941   1.1  thorpej 			    tpt_entry);
    942   1.1  thorpej 			thread->tpt_lwp = lwp;
    943   1.1  thorpej 			lwp = NULL;
    944   1.1  thorpej 			cv_broadcast(&thread->tpt_cv);
    945   1.1  thorpej 			continue;
    946   1.1  thorpej 		}
    947   1.1  thorpej 
    948   1.1  thorpej 		/* There are idle threads, so try giving one a job.  */
    949   1.4  thorpej 		struct threadpool_job *const job = TAILQ_FIRST(&pool->tp_jobs);
    950   1.1  thorpej 		TAILQ_REMOVE(&pool->tp_jobs, job, job_entry);
    951  1.13  thorpej 		/*
    952  1.13  thorpej 		 * Take an extra reference on the job temporarily so that
    953  1.13  thorpej 		 * it won't disappear on us while we have both locks dropped.
    954  1.13  thorpej 		 */
    955  1.13  thorpej 		threadpool_job_hold(job);
    956   1.1  thorpej 		mutex_spin_exit(&pool->tp_lock);
    957   1.1  thorpej 
    958   1.1  thorpej 		mutex_enter(job->job_lock);
    959   1.1  thorpej 		/* If the job was cancelled, we'll no longer be its thread.  */
    960   1.1  thorpej 		if (__predict_true(job->job_thread == overseer)) {
    961   1.1  thorpej 			mutex_spin_enter(&pool->tp_lock);
    962   1.1  thorpej 			if (__predict_false(
    963   1.1  thorpej 				    TAILQ_EMPTY(&pool->tp_idle_threads))) {
    964   1.1  thorpej 				/*
    965   1.1  thorpej 				 * Someone else snagged the thread
    966   1.1  thorpej 				 * first.  We'll have to try again.
    967   1.1  thorpej 				 */
    968   1.1  thorpej 				TP_LOG(("%s: '%s' lost race to use idle thread.\n",
    969   1.9  thorpej 				    __func__, job->job_name));
    970   1.1  thorpej 				TAILQ_INSERT_HEAD(&pool->tp_jobs, job,
    971   1.1  thorpej 				    job_entry);
    972   1.1  thorpej 			} else {
    973   1.1  thorpej 				/*
    974   1.1  thorpej 				 * Assign the job to the thread and
    975   1.1  thorpej 				 * wake the thread so it starts work.
    976   1.1  thorpej 				 */
    977   1.1  thorpej 				struct threadpool_thread *const thread =
    978   1.1  thorpej 				    TAILQ_FIRST(&pool->tp_idle_threads);
    979   1.1  thorpej 
    980   1.1  thorpej 				TP_LOG(("%s: '%s' gets thread %p\n",
    981   1.9  thorpej 				    __func__, job->job_name, thread));
    982   1.1  thorpej 				KASSERT(thread->tpt_job == NULL);
    983   1.1  thorpej 				TAILQ_REMOVE(&pool->tp_idle_threads, thread,
    984   1.1  thorpej 				    tpt_entry);
    985   1.1  thorpej 				thread->tpt_job = job;
    986   1.1  thorpej 				job->job_thread = thread;
    987   1.1  thorpej 				cv_broadcast(&thread->tpt_cv);
    988   1.1  thorpej 			}
    989   1.1  thorpej 			mutex_spin_exit(&pool->tp_lock);
    990   1.1  thorpej 		}
    991  1.13  thorpej 		threadpool_job_rele(job);
    992   1.1  thorpej 		mutex_exit(job->job_lock);
    993   1.1  thorpej 
    994   1.1  thorpej 		mutex_spin_enter(&pool->tp_lock);
    995   1.1  thorpej 	}
    996   1.7  thorpej 	threadpool_rele(pool);
    997   1.1  thorpej 	mutex_spin_exit(&pool->tp_lock);
    998   1.1  thorpej 
    999   1.1  thorpej 	TP_LOG(("%s: exiting.\n", __func__));
   1000   1.1  thorpej 
   1001   1.1  thorpej 	kthread_exit(0);
   1002   1.1  thorpej }
   1003   1.1  thorpej 
   1004   1.1  thorpej /* Thread pool thread */
   1005   1.1  thorpej 
   1006   1.1  thorpej static void __dead
   1007   1.1  thorpej threadpool_thread(void *arg)
   1008   1.1  thorpej {
   1009   1.1  thorpej 	struct threadpool_thread *const thread = arg;
   1010   1.4  thorpej 	struct threadpool *const pool = thread->tpt_pool;
   1011   1.1  thorpej 
   1012   1.1  thorpej 	KASSERT((pool->tp_cpu == NULL) || (pool->tp_cpu == curcpu()));
   1013   1.1  thorpej 
   1014   1.1  thorpej 	/* Wait until we're initialized and on the queue.  */
   1015   1.1  thorpej 	mutex_spin_enter(&pool->tp_lock);
   1016   1.1  thorpej 	while (thread->tpt_lwp == NULL)
   1017   1.1  thorpej 		cv_wait(&thread->tpt_cv, &pool->tp_lock);
   1018   1.1  thorpej 
   1019   1.1  thorpej 	TP_LOG(("%s: starting.\n", __func__));
   1020   1.1  thorpej 
   1021   1.1  thorpej 	KASSERT(thread->tpt_lwp == curlwp);
   1022   1.1  thorpej 	for (;;) {
   1023   1.1  thorpej 		/* Wait until we are assigned a job.  */
   1024   1.1  thorpej 		while (thread->tpt_job == NULL) {
   1025   1.1  thorpej 			if (ISSET(pool->tp_flags, THREADPOOL_DYING)) {
   1026   1.1  thorpej 				TP_LOG(("%s: THREADPOOL_DYING\n",
   1027   1.9  thorpej 				    __func__));
   1028   1.1  thorpej 				break;
   1029   1.1  thorpej 			}
   1030   1.1  thorpej 			if (cv_timedwait(&thread->tpt_cv, &pool->tp_lock,
   1031  1.14  thorpej 				mstohz(threadpool_idle_time_ms)))
   1032   1.1  thorpej 				break;
   1033   1.1  thorpej 		}
   1034   1.1  thorpej 		if (__predict_false(thread->tpt_job == NULL)) {
   1035   1.1  thorpej 			TAILQ_REMOVE(&pool->tp_idle_threads, thread,
   1036   1.1  thorpej 			    tpt_entry);
   1037   1.1  thorpej 			break;
   1038   1.1  thorpej 		}
   1039   1.1  thorpej 
   1040   1.4  thorpej 		struct threadpool_job *const job = thread->tpt_job;
   1041   1.1  thorpej 		KASSERT(job != NULL);
   1042   1.1  thorpej 
   1043   1.1  thorpej 		/* Set our lwp name to reflect what job we're doing.  */
   1044   1.1  thorpej 		lwp_lock(curlwp);
   1045  1.12  thorpej 		char *const lwp_name __diagused = curlwp->l_name;
   1046  1.12  thorpej 		thread->tpt_lwp_savedname = curlwp->l_name;
   1047   1.1  thorpej 		curlwp->l_name = job->job_name;
   1048   1.1  thorpej 		lwp_unlock(curlwp);
   1049   1.1  thorpej 
   1050  1.12  thorpej 		mutex_spin_exit(&pool->tp_lock);
   1051  1.12  thorpej 
   1052  1.12  thorpej 		TP_LOG(("%s: running job '%s' on thread %p.\n",
   1053  1.12  thorpej 		    __func__, job->job_name, thread));
   1054  1.12  thorpej 
   1055   1.1  thorpej 		/* Run the job.  */
   1056   1.4  thorpej 		(*job->job_fn)(job);
   1057   1.1  thorpej 
   1058  1.12  thorpej 		/* lwp name restored in threadpool_job_done(). */
   1059  1.12  thorpej 		KASSERTMSG((curlwp->l_name == lwp_name),
   1060  1.12  thorpej 		    "someone forgot to call threadpool_job_done()!");
   1061   1.1  thorpej 
   1062  1.13  thorpej 		/*
   1063  1.13  thorpej 		 * We can compare pointers, but we can no longer deference
   1064  1.13  thorpej 		 * job after this because threadpool_job_done() drops the
   1065  1.13  thorpej 		 * last reference on the job while the job is locked.
   1066  1.13  thorpej 		 */
   1067   1.1  thorpej 
   1068   1.1  thorpej 		mutex_spin_enter(&pool->tp_lock);
   1069   1.1  thorpej 		KASSERT(thread->tpt_job == job);
   1070   1.1  thorpej 		thread->tpt_job = NULL;
   1071   1.1  thorpej 		TAILQ_INSERT_TAIL(&pool->tp_idle_threads, thread, tpt_entry);
   1072   1.1  thorpej 	}
   1073   1.7  thorpej 	threadpool_rele(pool);
   1074   1.1  thorpej 	mutex_spin_exit(&pool->tp_lock);
   1075   1.1  thorpej 
   1076   1.1  thorpej 	TP_LOG(("%s: thread %p exiting.\n", __func__, thread));
   1077   1.1  thorpej 
   1078   1.1  thorpej 	KASSERT(!cv_has_waiters(&thread->tpt_cv));
   1079   1.1  thorpej 	cv_destroy(&thread->tpt_cv);
   1080   1.1  thorpej 	pool_cache_put(threadpool_thread_pc, thread);
   1081   1.1  thorpej 	kthread_exit(0);
   1082   1.1  thorpej }
   1083