Home | History | Annotate | Line # | Download | only in kern
subr_workqueue.c revision 1.33.30.1
      1  1.33.30.1  martin /*	$NetBSD: subr_workqueue.c,v 1.33.30.1 2018/01/16 13:01:10 martin Exp $	*/
      2        1.1    yamt 
      3        1.1    yamt /*-
      4       1.20    yamt  * Copyright (c)2002, 2005, 2006, 2007 YAMAMOTO Takashi,
      5        1.1    yamt  * All rights reserved.
      6        1.1    yamt  *
      7        1.1    yamt  * Redistribution and use in source and binary forms, with or without
      8        1.1    yamt  * modification, are permitted provided that the following conditions
      9        1.1    yamt  * are met:
     10        1.1    yamt  * 1. Redistributions of source code must retain the above copyright
     11        1.1    yamt  *    notice, this list of conditions and the following disclaimer.
     12        1.1    yamt  * 2. Redistributions in binary form must reproduce the above copyright
     13        1.1    yamt  *    notice, this list of conditions and the following disclaimer in the
     14        1.1    yamt  *    documentation and/or other materials provided with the distribution.
     15        1.1    yamt  *
     16        1.1    yamt  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     17        1.1    yamt  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     18        1.1    yamt  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     19        1.1    yamt  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     20        1.1    yamt  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     21        1.1    yamt  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     22        1.1    yamt  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     23        1.1    yamt  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     24        1.1    yamt  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     25        1.1    yamt  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26        1.1    yamt  * SUCH DAMAGE.
     27        1.1    yamt  */
     28        1.1    yamt 
     29        1.1    yamt #include <sys/cdefs.h>
     30  1.33.30.1  martin __KERNEL_RCSID(0, "$NetBSD: subr_workqueue.c,v 1.33.30.1 2018/01/16 13:01:10 martin Exp $");
     31        1.1    yamt 
     32        1.1    yamt #include <sys/param.h>
     33       1.18   rmind #include <sys/cpu.h>
     34        1.1    yamt #include <sys/systm.h>
     35        1.1    yamt #include <sys/kthread.h>
     36        1.4    yamt #include <sys/kmem.h>
     37        1.1    yamt #include <sys/proc.h>
     38        1.1    yamt #include <sys/workqueue.h>
     39        1.9      ad #include <sys/mutex.h>
     40        1.9      ad #include <sys/condvar.h>
     41       1.17    yamt #include <sys/queue.h>
     42        1.1    yamt 
     43       1.17    yamt typedef struct work_impl {
     44       1.17    yamt 	SIMPLEQ_ENTRY(work_impl) wk_entry;
     45       1.17    yamt } work_impl_t;
     46       1.17    yamt 
     47       1.17    yamt SIMPLEQ_HEAD(workqhead, work_impl);
     48        1.1    yamt 
     49        1.1    yamt struct workqueue_queue {
     50        1.9      ad 	kmutex_t q_mutex;
     51        1.9      ad 	kcondvar_t q_cv;
     52  1.33.30.1  martin 	struct workqhead q_queue_pending;
     53  1.33.30.1  martin 	struct workqhead q_queue_running;
     54       1.28    yamt 	lwp_t *q_worker;
     55  1.33.30.1  martin 	work_impl_t *q_waiter;
     56        1.1    yamt };
     57        1.1    yamt 
     58        1.1    yamt struct workqueue {
     59        1.1    yamt 	void (*wq_func)(struct work *, void *);
     60        1.1    yamt 	void *wq_arg;
     61       1.20    yamt 	int wq_flags;
     62       1.20    yamt 
     63       1.32     jym 	char wq_name[MAXCOMLEN];
     64       1.12    yamt 	pri_t wq_prio;
     65       1.18   rmind 	void *wq_ptr;
     66        1.1    yamt };
     67        1.1    yamt 
     68       1.24      ad #define	WQ_SIZE		(roundup2(sizeof(struct workqueue), coherency_unit))
     69       1.24      ad #define	WQ_QUEUE_SIZE	(roundup2(sizeof(struct workqueue_queue), coherency_unit))
     70       1.18   rmind 
     71        1.1    yamt #define	POISON	0xaabbccdd
     72        1.1    yamt 
     73       1.20    yamt static size_t
     74       1.20    yamt workqueue_size(int flags)
     75       1.20    yamt {
     76       1.20    yamt 
     77       1.20    yamt 	return WQ_SIZE
     78       1.20    yamt 	    + ((flags & WQ_PERCPU) != 0 ? ncpu : 1) * WQ_QUEUE_SIZE
     79       1.24      ad 	    + coherency_unit;
     80       1.20    yamt }
     81       1.20    yamt 
     82       1.14   rmind static struct workqueue_queue *
     83       1.14   rmind workqueue_queue_lookup(struct workqueue *wq, struct cpu_info *ci)
     84       1.14   rmind {
     85       1.18   rmind 	u_int idx = 0;
     86       1.14   rmind 
     87       1.18   rmind 	if (wq->wq_flags & WQ_PERCPU) {
     88       1.18   rmind 		idx = ci ? cpu_index(ci) : cpu_index(curcpu());
     89       1.18   rmind 	}
     90       1.14   rmind 
     91       1.26   rmind 	return (void *)((uintptr_t)(wq) + WQ_SIZE + (idx * WQ_QUEUE_SIZE));
     92       1.14   rmind }
     93       1.14   rmind 
     94        1.1    yamt static void
     95        1.1    yamt workqueue_runlist(struct workqueue *wq, struct workqhead *list)
     96        1.1    yamt {
     97       1.17    yamt 	work_impl_t *wk;
     98       1.17    yamt 	work_impl_t *next;
     99        1.1    yamt 
    100        1.1    yamt 	/*
    101        1.1    yamt 	 * note that "list" is not a complete SIMPLEQ.
    102        1.1    yamt 	 */
    103        1.1    yamt 
    104        1.1    yamt 	for (wk = SIMPLEQ_FIRST(list); wk != NULL; wk = next) {
    105        1.1    yamt 		next = SIMPLEQ_NEXT(wk, wk_entry);
    106       1.17    yamt 		(*wq->wq_func)((void *)wk, wq->wq_arg);
    107        1.1    yamt 	}
    108        1.1    yamt }
    109        1.1    yamt 
    110        1.1    yamt static void
    111       1.21    yamt workqueue_worker(void *cookie)
    112        1.1    yamt {
    113       1.21    yamt 	struct workqueue *wq = cookie;
    114       1.14   rmind 	struct workqueue_queue *q;
    115       1.14   rmind 
    116       1.14   rmind 	/* find the workqueue of this kthread */
    117       1.14   rmind 	q = workqueue_queue_lookup(wq, curlwp->l_cpu);
    118       1.14   rmind 
    119        1.3  rpaulo 	for (;;) {
    120        1.1    yamt 		/*
    121        1.1    yamt 		 * we violate abstraction of SIMPLEQ.
    122        1.1    yamt 		 */
    123        1.1    yamt 
    124        1.9      ad 		mutex_enter(&q->q_mutex);
    125  1.33.30.1  martin 		while (SIMPLEQ_EMPTY(&q->q_queue_pending))
    126        1.9      ad 			cv_wait(&q->q_cv, &q->q_mutex);
    127  1.33.30.1  martin 		KASSERT(SIMPLEQ_EMPTY(&q->q_queue_running));
    128  1.33.30.1  martin 		q->q_queue_running.sqh_first =
    129  1.33.30.1  martin 		    q->q_queue_pending.sqh_first; /* XXX */
    130  1.33.30.1  martin 		SIMPLEQ_INIT(&q->q_queue_pending);
    131        1.9      ad 		mutex_exit(&q->q_mutex);
    132        1.1    yamt 
    133  1.33.30.1  martin 		workqueue_runlist(wq, &q->q_queue_running);
    134  1.33.30.1  martin 
    135  1.33.30.1  martin 		mutex_enter(&q->q_mutex);
    136  1.33.30.1  martin 		KASSERT(!SIMPLEQ_EMPTY(&q->q_queue_running));
    137  1.33.30.1  martin 		SIMPLEQ_INIT(&q->q_queue_running);
    138  1.33.30.1  martin 		if (__predict_false(q->q_waiter != NULL)) {
    139  1.33.30.1  martin 			/* Wake up workqueue_wait */
    140  1.33.30.1  martin 			cv_signal(&q->q_cv);
    141  1.33.30.1  martin 		}
    142  1.33.30.1  martin 		mutex_exit(&q->q_mutex);
    143        1.1    yamt 	}
    144        1.1    yamt }
    145        1.1    yamt 
    146        1.1    yamt static void
    147        1.1    yamt workqueue_init(struct workqueue *wq, const char *name,
    148        1.1    yamt     void (*callback_func)(struct work *, void *), void *callback_arg,
    149       1.12    yamt     pri_t prio, int ipl)
    150        1.1    yamt {
    151        1.1    yamt 
    152       1.32     jym 	strncpy(wq->wq_name, name, sizeof(wq->wq_name));
    153       1.32     jym 
    154        1.1    yamt 	wq->wq_prio = prio;
    155        1.1    yamt 	wq->wq_func = callback_func;
    156        1.1    yamt 	wq->wq_arg = callback_arg;
    157        1.1    yamt }
    158        1.1    yamt 
    159        1.1    yamt static int
    160       1.18   rmind workqueue_initqueue(struct workqueue *wq, struct workqueue_queue *q,
    161       1.18   rmind     int ipl, struct cpu_info *ci)
    162        1.1    yamt {
    163       1.13      ad 	int error, ktf;
    164       1.14   rmind 
    165       1.20    yamt 	KASSERT(q->q_worker == NULL);
    166       1.20    yamt 
    167       1.22      ad 	mutex_init(&q->q_mutex, MUTEX_DEFAULT, ipl);
    168        1.9      ad 	cv_init(&q->q_cv, wq->wq_name);
    169  1.33.30.1  martin 	SIMPLEQ_INIT(&q->q_queue_pending);
    170  1.33.30.1  martin 	SIMPLEQ_INIT(&q->q_queue_running);
    171       1.18   rmind 	ktf = ((wq->wq_flags & WQ_MPSAFE) != 0 ? KTHREAD_MPSAFE : 0);
    172       1.33    matt 	if (wq->wq_prio < PRI_KERNEL)
    173       1.33    matt 		ktf |= KTHREAD_TS;
    174       1.18   rmind 	if (ci) {
    175       1.18   rmind 		error = kthread_create(wq->wq_prio, ktf, ci, workqueue_worker,
    176       1.23  martin 		    wq, &q->q_worker, "%s/%u", wq->wq_name, ci->ci_index);
    177       1.18   rmind 	} else {
    178       1.18   rmind 		error = kthread_create(wq->wq_prio, ktf, ci, workqueue_worker,
    179       1.18   rmind 		    wq, &q->q_worker, "%s", wq->wq_name);
    180       1.18   rmind 	}
    181       1.20    yamt 	if (error != 0) {
    182       1.20    yamt 		mutex_destroy(&q->q_mutex);
    183       1.20    yamt 		cv_destroy(&q->q_cv);
    184       1.20    yamt 		KASSERT(q->q_worker == NULL);
    185       1.20    yamt 	}
    186        1.1    yamt 	return error;
    187        1.1    yamt }
    188        1.1    yamt 
    189        1.5    yamt struct workqueue_exitargs {
    190       1.17    yamt 	work_impl_t wqe_wk;
    191        1.5    yamt 	struct workqueue_queue *wqe_q;
    192        1.5    yamt };
    193        1.5    yamt 
    194        1.5    yamt static void
    195        1.7    yamt workqueue_exit(struct work *wk, void *arg)
    196        1.5    yamt {
    197        1.5    yamt 	struct workqueue_exitargs *wqe = (void *)wk;
    198        1.5    yamt 	struct workqueue_queue *q = wqe->wqe_q;
    199        1.5    yamt 
    200        1.5    yamt 	/*
    201       1.11    yamt 	 * only competition at this point is workqueue_finiqueue.
    202        1.5    yamt 	 */
    203        1.5    yamt 
    204       1.13      ad 	KASSERT(q->q_worker == curlwp);
    205  1.33.30.1  martin 	KASSERT(SIMPLEQ_EMPTY(&q->q_queue_pending));
    206        1.9      ad 	mutex_enter(&q->q_mutex);
    207        1.5    yamt 	q->q_worker = NULL;
    208       1.10    yamt 	cv_signal(&q->q_cv);
    209        1.9      ad 	mutex_exit(&q->q_mutex);
    210        1.5    yamt 	kthread_exit(0);
    211        1.5    yamt }
    212        1.5    yamt 
    213        1.5    yamt static void
    214       1.14   rmind workqueue_finiqueue(struct workqueue *wq, struct workqueue_queue *q)
    215        1.5    yamt {
    216        1.5    yamt 	struct workqueue_exitargs wqe;
    217        1.5    yamt 
    218       1.20    yamt 	KASSERT(wq->wq_func == workqueue_exit);
    219        1.5    yamt 
    220        1.5    yamt 	wqe.wqe_q = q;
    221  1.33.30.1  martin 	KASSERT(SIMPLEQ_EMPTY(&q->q_queue_pending));
    222        1.5    yamt 	KASSERT(q->q_worker != NULL);
    223        1.9      ad 	mutex_enter(&q->q_mutex);
    224  1.33.30.1  martin 	SIMPLEQ_INSERT_TAIL(&q->q_queue_pending, &wqe.wqe_wk, wk_entry);
    225       1.10    yamt 	cv_signal(&q->q_cv);
    226        1.5    yamt 	while (q->q_worker != NULL) {
    227        1.9      ad 		cv_wait(&q->q_cv, &q->q_mutex);
    228        1.5    yamt 	}
    229        1.9      ad 	mutex_exit(&q->q_mutex);
    230        1.9      ad 	mutex_destroy(&q->q_mutex);
    231        1.9      ad 	cv_destroy(&q->q_cv);
    232        1.5    yamt }
    233        1.5    yamt 
    234        1.1    yamt /* --- */
    235        1.1    yamt 
    236        1.1    yamt int
    237        1.1    yamt workqueue_create(struct workqueue **wqp, const char *name,
    238        1.1    yamt     void (*callback_func)(struct work *, void *), void *callback_arg,
    239       1.12    yamt     pri_t prio, int ipl, int flags)
    240        1.1    yamt {
    241        1.1    yamt 	struct workqueue *wq;
    242       1.18   rmind 	struct workqueue_queue *q;
    243       1.18   rmind 	void *ptr;
    244       1.20    yamt 	int error = 0;
    245        1.1    yamt 
    246       1.25    matt 	CTASSERT(sizeof(work_impl_t) <= sizeof(struct work));
    247       1.17    yamt 
    248       1.20    yamt 	ptr = kmem_zalloc(workqueue_size(flags), KM_SLEEP);
    249       1.26   rmind 	wq = (void *)roundup2((uintptr_t)ptr, coherency_unit);
    250       1.18   rmind 	wq->wq_ptr = ptr;
    251       1.18   rmind 	wq->wq_flags = flags;
    252        1.1    yamt 
    253        1.1    yamt 	workqueue_init(wq, name, callback_func, callback_arg, prio, ipl);
    254        1.1    yamt 
    255       1.14   rmind 	if (flags & WQ_PERCPU) {
    256       1.14   rmind 		struct cpu_info *ci;
    257       1.14   rmind 		CPU_INFO_ITERATOR cii;
    258       1.14   rmind 
    259       1.14   rmind 		/* create the work-queue for each CPU */
    260       1.14   rmind 		for (CPU_INFO_FOREACH(cii, ci)) {
    261       1.20    yamt 			q = workqueue_queue_lookup(wq, ci);
    262       1.18   rmind 			error = workqueue_initqueue(wq, q, ipl, ci);
    263       1.18   rmind 			if (error) {
    264       1.14   rmind 				break;
    265       1.18   rmind 			}
    266       1.14   rmind 		}
    267       1.14   rmind 	} else {
    268       1.18   rmind 		/* initialize a work-queue */
    269       1.20    yamt 		q = workqueue_queue_lookup(wq, NULL);
    270       1.18   rmind 		error = workqueue_initqueue(wq, q, ipl, NULL);
    271        1.1    yamt 	}
    272       1.18   rmind 
    273       1.20    yamt 	if (error != 0) {
    274       1.20    yamt 		workqueue_destroy(wq);
    275       1.20    yamt 	} else {
    276       1.20    yamt 		*wqp = wq;
    277       1.15   rmind 	}
    278        1.1    yamt 
    279       1.20    yamt 	return error;
    280        1.1    yamt }
    281        1.1    yamt 
    282  1.33.30.1  martin static bool
    283  1.33.30.1  martin workqueue_q_wait(struct workqueue_queue *q, work_impl_t *wk_target)
    284  1.33.30.1  martin {
    285  1.33.30.1  martin 	work_impl_t *wk;
    286  1.33.30.1  martin 	bool found = false;
    287  1.33.30.1  martin 
    288  1.33.30.1  martin 	mutex_enter(&q->q_mutex);
    289  1.33.30.1  martin     again:
    290  1.33.30.1  martin 	SIMPLEQ_FOREACH(wk, &q->q_queue_pending, wk_entry) {
    291  1.33.30.1  martin 		if (wk == wk_target)
    292  1.33.30.1  martin 			goto found;
    293  1.33.30.1  martin 	}
    294  1.33.30.1  martin 	SIMPLEQ_FOREACH(wk, &q->q_queue_running, wk_entry) {
    295  1.33.30.1  martin 		if (wk == wk_target)
    296  1.33.30.1  martin 			goto found;
    297  1.33.30.1  martin 	}
    298  1.33.30.1  martin     found:
    299  1.33.30.1  martin 	if (wk != NULL) {
    300  1.33.30.1  martin 		found = true;
    301  1.33.30.1  martin 		KASSERT(q->q_waiter == NULL);
    302  1.33.30.1  martin 		q->q_waiter = wk;
    303  1.33.30.1  martin 		cv_wait(&q->q_cv, &q->q_mutex);
    304  1.33.30.1  martin 		goto again;
    305  1.33.30.1  martin 	}
    306  1.33.30.1  martin 	if (q->q_waiter != NULL)
    307  1.33.30.1  martin 		q->q_waiter = NULL;
    308  1.33.30.1  martin 	mutex_exit(&q->q_mutex);
    309  1.33.30.1  martin 
    310  1.33.30.1  martin 	return found;
    311  1.33.30.1  martin }
    312  1.33.30.1  martin 
    313  1.33.30.1  martin /*
    314  1.33.30.1  martin  * Wait for a specified work to finish.  The caller must ensure that no new
    315  1.33.30.1  martin  * work will be enqueued before calling workqueue_wait.  Note that if the
    316  1.33.30.1  martin  * workqueue is WQ_PERCPU, the caller can enqueue a new work to another queue
    317  1.33.30.1  martin  * other than the waiting queue.
    318  1.33.30.1  martin  */
    319  1.33.30.1  martin void
    320  1.33.30.1  martin workqueue_wait(struct workqueue *wq, struct work *wk)
    321  1.33.30.1  martin {
    322  1.33.30.1  martin 	struct workqueue_queue *q;
    323  1.33.30.1  martin 	bool found;
    324  1.33.30.1  martin 
    325  1.33.30.1  martin 	if (ISSET(wq->wq_flags, WQ_PERCPU)) {
    326  1.33.30.1  martin 		struct cpu_info *ci;
    327  1.33.30.1  martin 		CPU_INFO_ITERATOR cii;
    328  1.33.30.1  martin 		for (CPU_INFO_FOREACH(cii, ci)) {
    329  1.33.30.1  martin 			q = workqueue_queue_lookup(wq, ci);
    330  1.33.30.1  martin 			found = workqueue_q_wait(q, (work_impl_t *)wk);
    331  1.33.30.1  martin 			if (found)
    332  1.33.30.1  martin 				break;
    333  1.33.30.1  martin 		}
    334  1.33.30.1  martin 	} else {
    335  1.33.30.1  martin 		q = workqueue_queue_lookup(wq, NULL);
    336  1.33.30.1  martin 		(void) workqueue_q_wait(q, (work_impl_t *)wk);
    337  1.33.30.1  martin 	}
    338  1.33.30.1  martin }
    339  1.33.30.1  martin 
    340        1.1    yamt void
    341        1.5    yamt workqueue_destroy(struct workqueue *wq)
    342        1.5    yamt {
    343       1.14   rmind 	struct workqueue_queue *q;
    344       1.20    yamt 	struct cpu_info *ci;
    345       1.20    yamt 	CPU_INFO_ITERATOR cii;
    346        1.5    yamt 
    347       1.20    yamt 	wq->wq_func = workqueue_exit;
    348       1.20    yamt 	for (CPU_INFO_FOREACH(cii, ci)) {
    349       1.20    yamt 		q = workqueue_queue_lookup(wq, ci);
    350       1.20    yamt 		if (q->q_worker != NULL) {
    351       1.18   rmind 			workqueue_finiqueue(wq, q);
    352       1.18   rmind 		}
    353       1.14   rmind 	}
    354       1.20    yamt 	kmem_free(wq->wq_ptr, workqueue_size(wq->wq_flags));
    355        1.5    yamt }
    356        1.5    yamt 
    357        1.5    yamt void
    358       1.17    yamt workqueue_enqueue(struct workqueue *wq, struct work *wk0, struct cpu_info *ci)
    359        1.1    yamt {
    360       1.14   rmind 	struct workqueue_queue *q;
    361       1.17    yamt 	work_impl_t *wk = (void *)wk0;
    362       1.14   rmind 
    363       1.18   rmind 	KASSERT(wq->wq_flags & WQ_PERCPU || ci == NULL);
    364       1.14   rmind 	q = workqueue_queue_lookup(wq, ci);
    365        1.1    yamt 
    366        1.9      ad 	mutex_enter(&q->q_mutex);
    367  1.33.30.1  martin 	KASSERT(q->q_waiter == NULL);
    368  1.33.30.1  martin 	SIMPLEQ_INSERT_TAIL(&q->q_queue_pending, wk, wk_entry);
    369       1.13      ad 	cv_signal(&q->q_cv);
    370        1.9      ad 	mutex_exit(&q->q_mutex);
    371        1.1    yamt }
    372