Home | History | Annotate | Line # | Download | only in kern
subr_workqueue.c revision 1.17
      1  1.17    yamt /*	$NetBSD: subr_workqueue.c,v 1.17 2007/07/20 12:43:26 yamt Exp $	*/
      2   1.1    yamt 
      3   1.1    yamt /*-
      4   1.1    yamt  * Copyright (c)2002, 2005 YAMAMOTO Takashi,
      5   1.1    yamt  * All rights reserved.
      6   1.1    yamt  *
      7   1.1    yamt  * Redistribution and use in source and binary forms, with or without
      8   1.1    yamt  * modification, are permitted provided that the following conditions
      9   1.1    yamt  * are met:
     10   1.1    yamt  * 1. Redistributions of source code must retain the above copyright
     11   1.1    yamt  *    notice, this list of conditions and the following disclaimer.
     12   1.1    yamt  * 2. Redistributions in binary form must reproduce the above copyright
     13   1.1    yamt  *    notice, this list of conditions and the following disclaimer in the
     14   1.1    yamt  *    documentation and/or other materials provided with the distribution.
     15   1.1    yamt  *
     16   1.1    yamt  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     17   1.1    yamt  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     18   1.1    yamt  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     19   1.1    yamt  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     20   1.1    yamt  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     21   1.1    yamt  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     22   1.1    yamt  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     23   1.1    yamt  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     24   1.1    yamt  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     25   1.1    yamt  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26   1.1    yamt  * SUCH DAMAGE.
     27   1.1    yamt  */
     28   1.1    yamt 
     29   1.1    yamt #include <sys/cdefs.h>
     30  1.17    yamt __KERNEL_RCSID(0, "$NetBSD: subr_workqueue.c,v 1.17 2007/07/20 12:43:26 yamt Exp $");
     31   1.1    yamt 
     32   1.1    yamt #include <sys/param.h>
     33   1.1    yamt #include <sys/systm.h>
     34   1.1    yamt #include <sys/kthread.h>
     35   1.4    yamt #include <sys/kmem.h>
     36   1.1    yamt #include <sys/proc.h>
     37   1.1    yamt #include <sys/workqueue.h>
     38   1.9      ad #include <sys/mutex.h>
     39   1.9      ad #include <sys/condvar.h>
     40  1.17    yamt #include <sys/queue.h>
     41   1.1    yamt 
     42  1.17    yamt typedef struct work_impl {
     43  1.17    yamt 	SIMPLEQ_ENTRY(work_impl) wk_entry;
     44  1.17    yamt } work_impl_t;
     45  1.17    yamt 
     46  1.17    yamt SIMPLEQ_HEAD(workqhead, work_impl);
     47   1.1    yamt 
     48   1.1    yamt struct workqueue_queue {
     49   1.9      ad 	kmutex_t q_mutex;
     50   1.9      ad 	kcondvar_t q_cv;
     51   1.1    yamt 	struct workqhead q_queue;
     52  1.13      ad 	struct lwp *q_worker;
     53  1.14   rmind 	struct cpu_info *q_ci;
     54  1.14   rmind 	SLIST_ENTRY(workqueue_queue) q_list;
     55   1.1    yamt };
     56   1.1    yamt 
     57   1.1    yamt struct workqueue {
     58  1.14   rmind 	SLIST_HEAD(, workqueue_queue) wq_queue;
     59   1.1    yamt 	void (*wq_func)(struct work *, void *);
     60   1.1    yamt 	void *wq_arg;
     61   1.1    yamt 	const char *wq_name;
     62  1.12    yamt 	pri_t wq_prio;
     63   1.8    yamt 	ipl_cookie_t wq_ipl;
     64   1.1    yamt };
     65   1.1    yamt 
     66   1.1    yamt #define	POISON	0xaabbccdd
     67   1.1    yamt 
     68  1.14   rmind static struct workqueue_queue *
     69  1.14   rmind workqueue_queue_lookup(struct workqueue *wq, struct cpu_info *ci)
     70  1.14   rmind {
     71  1.14   rmind 	struct workqueue_queue *q;
     72  1.14   rmind 
     73  1.14   rmind 	SLIST_FOREACH(q, &wq->wq_queue, q_list)
     74  1.14   rmind 		if (q->q_ci == ci)
     75  1.14   rmind 			return q;
     76  1.14   rmind 
     77  1.14   rmind 	return SLIST_FIRST(&wq->wq_queue);
     78  1.14   rmind }
     79  1.14   rmind 
     80   1.1    yamt static void
     81   1.1    yamt workqueue_runlist(struct workqueue *wq, struct workqhead *list)
     82   1.1    yamt {
     83  1.17    yamt 	work_impl_t *wk;
     84  1.17    yamt 	work_impl_t *next;
     85   1.1    yamt 
     86   1.1    yamt 	/*
     87   1.1    yamt 	 * note that "list" is not a complete SIMPLEQ.
     88   1.1    yamt 	 */
     89   1.1    yamt 
     90   1.1    yamt 	for (wk = SIMPLEQ_FIRST(list); wk != NULL; wk = next) {
     91   1.1    yamt 		next = SIMPLEQ_NEXT(wk, wk_entry);
     92  1.17    yamt 		(*wq->wq_func)((void *)wk, wq->wq_arg);
     93   1.1    yamt 	}
     94   1.1    yamt }
     95   1.1    yamt 
     96   1.1    yamt static void
     97   1.1    yamt workqueue_run(struct workqueue *wq)
     98   1.1    yamt {
     99  1.14   rmind 	struct workqueue_queue *q;
    100  1.14   rmind 
    101  1.14   rmind 	/* find the workqueue of this kthread */
    102  1.14   rmind 	q = workqueue_queue_lookup(wq, curlwp->l_cpu);
    103  1.14   rmind 	KASSERT(q != NULL);
    104  1.14   rmind 
    105   1.3  rpaulo 	for (;;) {
    106   1.1    yamt 		struct workqhead tmp;
    107   1.1    yamt 
    108   1.1    yamt 		/*
    109   1.1    yamt 		 * we violate abstraction of SIMPLEQ.
    110   1.1    yamt 		 */
    111   1.1    yamt 
    112   1.1    yamt #if defined(DIAGNOSTIC)
    113   1.1    yamt 		tmp.sqh_last = (void *)POISON;
    114   1.1    yamt #endif /* defined(DIAGNOSTIC) */
    115   1.1    yamt 
    116   1.9      ad 		mutex_enter(&q->q_mutex);
    117   1.9      ad 		while (SIMPLEQ_EMPTY(&q->q_queue))
    118   1.9      ad 			cv_wait(&q->q_cv, &q->q_mutex);
    119   1.1    yamt 		tmp.sqh_first = q->q_queue.sqh_first; /* XXX */
    120   1.1    yamt 		SIMPLEQ_INIT(&q->q_queue);
    121   1.9      ad 		mutex_exit(&q->q_mutex);
    122   1.1    yamt 
    123   1.1    yamt 		workqueue_runlist(wq, &tmp);
    124   1.1    yamt 	}
    125   1.1    yamt }
    126   1.1    yamt 
    127   1.1    yamt static void
    128   1.1    yamt workqueue_worker(void *arg)
    129   1.1    yamt {
    130   1.1    yamt 	struct workqueue *wq = arg;
    131   1.9      ad 	struct lwp *l;
    132   1.9      ad 
    133   1.9      ad 	l = curlwp;
    134   1.9      ad 	lwp_lock(l);
    135   1.9      ad 	l->l_priority = wq->wq_prio;
    136   1.9      ad 	l->l_usrpri = wq->wq_prio;
    137   1.9      ad 	lwp_unlock(l);
    138   1.1    yamt 
    139   1.1    yamt 	workqueue_run(wq);
    140   1.1    yamt }
    141   1.1    yamt 
    142   1.1    yamt static void
    143   1.1    yamt workqueue_init(struct workqueue *wq, const char *name,
    144   1.1    yamt     void (*callback_func)(struct work *, void *), void *callback_arg,
    145  1.12    yamt     pri_t prio, int ipl)
    146   1.1    yamt {
    147   1.1    yamt 
    148   1.8    yamt 	wq->wq_ipl = makeiplcookie(ipl);
    149   1.1    yamt 	wq->wq_prio = prio;
    150   1.1    yamt 	wq->wq_name = name;
    151   1.1    yamt 	wq->wq_func = callback_func;
    152   1.1    yamt 	wq->wq_arg = callback_arg;
    153  1.14   rmind 	SLIST_INIT(&wq->wq_queue);
    154   1.1    yamt }
    155   1.1    yamt 
    156   1.1    yamt static int
    157  1.14   rmind workqueue_initqueue(struct workqueue *wq, int ipl,
    158  1.14   rmind     int flags, struct cpu_info *ci)
    159   1.1    yamt {
    160  1.14   rmind 	struct workqueue_queue *q;
    161  1.13      ad 	int error, ktf;
    162  1.15   rmind 	cpuid_t cpuid;
    163  1.15   rmind 
    164  1.15   rmind #ifdef MULTIPROCESSOR
    165  1.15   rmind 	cpuid = ci->ci_cpuid;
    166  1.15   rmind #else
    167  1.15   rmind 	cpuid = 0;
    168  1.15   rmind #endif
    169   1.1    yamt 
    170  1.14   rmind 	q = kmem_alloc(sizeof(struct workqueue_queue), KM_SLEEP);
    171  1.14   rmind 	SLIST_INSERT_HEAD(&wq->wq_queue, q, q_list);
    172  1.14   rmind 	q->q_ci = ci;
    173  1.14   rmind 
    174  1.13      ad 	mutex_init(&q->q_mutex, MUTEX_DRIVER, ipl);
    175   1.9      ad 	cv_init(&q->q_cv, wq->wq_name);
    176   1.1    yamt 	SIMPLEQ_INIT(&q->q_queue);
    177  1.13      ad 	ktf = ((flags & WQ_MPSAFE) != 0 ? KTHREAD_MPSAFE : 0);
    178  1.14   rmind 	error = kthread_create(wq->wq_prio, ktf, ci, workqueue_worker,
    179  1.15   rmind 	    wq, &q->q_worker, "%s/%d", wq->wq_name, (int)cpuid);
    180   1.1    yamt 
    181   1.1    yamt 	return error;
    182   1.1    yamt }
    183   1.1    yamt 
    184   1.5    yamt struct workqueue_exitargs {
    185  1.17    yamt 	work_impl_t wqe_wk;
    186   1.5    yamt 	struct workqueue_queue *wqe_q;
    187   1.5    yamt };
    188   1.5    yamt 
    189   1.5    yamt static void
    190   1.7    yamt workqueue_exit(struct work *wk, void *arg)
    191   1.5    yamt {
    192   1.5    yamt 	struct workqueue_exitargs *wqe = (void *)wk;
    193   1.5    yamt 	struct workqueue_queue *q = wqe->wqe_q;
    194   1.5    yamt 
    195   1.5    yamt 	/*
    196  1.11    yamt 	 * only competition at this point is workqueue_finiqueue.
    197   1.5    yamt 	 */
    198   1.5    yamt 
    199  1.13      ad 	KASSERT(q->q_worker == curlwp);
    200   1.9      ad 	mutex_enter(&q->q_mutex);
    201   1.5    yamt 	q->q_worker = NULL;
    202  1.10    yamt 	cv_signal(&q->q_cv);
    203   1.9      ad 	mutex_exit(&q->q_mutex);
    204   1.5    yamt 	kthread_exit(0);
    205   1.5    yamt }
    206   1.5    yamt 
    207   1.5    yamt static void
    208  1.14   rmind workqueue_finiqueue(struct workqueue *wq, struct workqueue_queue *q)
    209   1.5    yamt {
    210   1.5    yamt 	struct workqueue_exitargs wqe;
    211   1.5    yamt 
    212   1.5    yamt 	wq->wq_func = workqueue_exit;
    213   1.5    yamt 
    214   1.5    yamt 	wqe.wqe_q = q;
    215   1.5    yamt 	KASSERT(SIMPLEQ_EMPTY(&q->q_queue));
    216   1.5    yamt 	KASSERT(q->q_worker != NULL);
    217   1.9      ad 	mutex_enter(&q->q_mutex);
    218   1.5    yamt 	SIMPLEQ_INSERT_TAIL(&q->q_queue, &wqe.wqe_wk, wk_entry);
    219  1.10    yamt 	cv_signal(&q->q_cv);
    220   1.5    yamt 	while (q->q_worker != NULL) {
    221   1.9      ad 		cv_wait(&q->q_cv, &q->q_mutex);
    222   1.5    yamt 	}
    223   1.9      ad 	mutex_exit(&q->q_mutex);
    224   1.9      ad 	mutex_destroy(&q->q_mutex);
    225   1.9      ad 	cv_destroy(&q->q_cv);
    226  1.14   rmind 	kmem_free(q, sizeof(struct workqueue_queue));
    227   1.5    yamt }
    228   1.5    yamt 
    229   1.1    yamt /* --- */
    230   1.1    yamt 
    231   1.1    yamt int
    232   1.1    yamt workqueue_create(struct workqueue **wqp, const char *name,
    233   1.1    yamt     void (*callback_func)(struct work *, void *), void *callback_arg,
    234  1.12    yamt     pri_t prio, int ipl, int flags)
    235   1.1    yamt {
    236   1.1    yamt 	struct workqueue *wq;
    237  1.14   rmind 	int error = 0;
    238   1.1    yamt 
    239  1.17    yamt 	KASSERT(sizeof(work_impl_t) <= sizeof(struct work));
    240  1.17    yamt 
    241   1.4    yamt 	wq = kmem_alloc(sizeof(*wq), KM_SLEEP);
    242   1.1    yamt 
    243   1.1    yamt 	workqueue_init(wq, name, callback_func, callback_arg, prio, ipl);
    244   1.1    yamt 
    245  1.15   rmind #ifdef MULTIPROCESSOR
    246  1.14   rmind 	if (flags & WQ_PERCPU) {
    247  1.14   rmind 		struct cpu_info *ci;
    248  1.14   rmind 		CPU_INFO_ITERATOR cii;
    249  1.14   rmind 
    250  1.14   rmind 		/* create the work-queue for each CPU */
    251  1.14   rmind 		for (CPU_INFO_FOREACH(cii, ci)) {
    252  1.14   rmind 			error = workqueue_initqueue(wq, ipl, flags, ci);
    253  1.14   rmind 			if (error)
    254  1.14   rmind 				break;
    255  1.14   rmind 		}
    256  1.14   rmind 		if (error)
    257  1.14   rmind 			workqueue_destroy(wq);
    258  1.14   rmind 
    259  1.14   rmind 	} else {
    260  1.14   rmind 		error = workqueue_initqueue(wq, ipl, flags, curcpu());
    261  1.14   rmind 		if (error) {
    262  1.14   rmind 			kmem_free(wq, sizeof(*wq));
    263  1.14   rmind 			return error;
    264  1.14   rmind 		}
    265   1.1    yamt 	}
    266  1.15   rmind #else
    267  1.15   rmind 	error = workqueue_initqueue(wq, ipl, flags, curcpu());
    268  1.15   rmind 	if (error) {
    269  1.15   rmind 		kmem_free(wq, sizeof(*wq));
    270  1.15   rmind 		return error;
    271  1.15   rmind 	}
    272  1.15   rmind #endif
    273   1.1    yamt 
    274   1.1    yamt 	*wqp = wq;
    275   1.1    yamt 	return 0;
    276   1.1    yamt }
    277   1.1    yamt 
    278   1.1    yamt void
    279   1.5    yamt workqueue_destroy(struct workqueue *wq)
    280   1.5    yamt {
    281  1.14   rmind 	struct workqueue_queue *q;
    282   1.5    yamt 
    283  1.14   rmind 	while ((q = SLIST_FIRST(&wq->wq_queue)) != NULL) {
    284  1.16      ad 		SLIST_REMOVE_HEAD(&wq->wq_queue, q_list);
    285  1.14   rmind 		workqueue_finiqueue(wq, q);
    286  1.14   rmind 	}
    287   1.5    yamt 	kmem_free(wq, sizeof(*wq));
    288   1.5    yamt }
    289   1.5    yamt 
    290   1.5    yamt void
    291  1.17    yamt workqueue_enqueue(struct workqueue *wq, struct work *wk0, struct cpu_info *ci)
    292   1.1    yamt {
    293  1.14   rmind 	struct workqueue_queue *q;
    294  1.17    yamt 	work_impl_t *wk = (void *)wk0;
    295  1.14   rmind 
    296  1.14   rmind 	q = workqueue_queue_lookup(wq, ci);
    297  1.14   rmind 	KASSERT(q != NULL);
    298   1.1    yamt 
    299   1.9      ad 	mutex_enter(&q->q_mutex);
    300   1.1    yamt 	SIMPLEQ_INSERT_TAIL(&q->q_queue, wk, wk_entry);
    301  1.13      ad 	cv_signal(&q->q_cv);
    302   1.9      ad 	mutex_exit(&q->q_mutex);
    303   1.1    yamt }
    304