subr_workqueue.c revision 1.7 1 1.7 yamt /* $NetBSD: subr_workqueue.c,v 1.7 2006/11/01 10:17:59 yamt Exp $ */
2 1.1 yamt
3 1.1 yamt /*-
4 1.1 yamt * Copyright (c)2002, 2005 YAMAMOTO Takashi,
5 1.1 yamt * All rights reserved.
6 1.1 yamt *
7 1.1 yamt * Redistribution and use in source and binary forms, with or without
8 1.1 yamt * modification, are permitted provided that the following conditions
9 1.1 yamt * are met:
10 1.1 yamt * 1. Redistributions of source code must retain the above copyright
11 1.1 yamt * notice, this list of conditions and the following disclaimer.
12 1.1 yamt * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 yamt * notice, this list of conditions and the following disclaimer in the
14 1.1 yamt * documentation and/or other materials provided with the distribution.
15 1.1 yamt *
16 1.1 yamt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 1.1 yamt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 1.1 yamt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 1.1 yamt * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 1.1 yamt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 1.1 yamt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 1.1 yamt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 1.1 yamt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 1.1 yamt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 1.1 yamt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 1.1 yamt * SUCH DAMAGE.
27 1.1 yamt */
28 1.1 yamt
29 1.1 yamt #include <sys/cdefs.h>
30 1.7 yamt __KERNEL_RCSID(0, "$NetBSD: subr_workqueue.c,v 1.7 2006/11/01 10:17:59 yamt Exp $");
31 1.1 yamt
32 1.1 yamt #include <sys/param.h>
33 1.1 yamt #include <sys/systm.h>
34 1.1 yamt #include <sys/kthread.h>
35 1.4 yamt #include <sys/kmem.h>
36 1.1 yamt #include <sys/proc.h>
37 1.1 yamt #include <sys/workqueue.h>
38 1.1 yamt
39 1.1 yamt SIMPLEQ_HEAD(workqhead, work);
40 1.1 yamt
41 1.1 yamt struct workqueue_queue {
42 1.1 yamt struct simplelock q_lock;
43 1.1 yamt int q_savedipl;
44 1.1 yamt struct workqhead q_queue;
45 1.1 yamt struct proc *q_worker;
46 1.1 yamt };
47 1.1 yamt
48 1.1 yamt struct workqueue {
49 1.1 yamt struct workqueue_queue wq_queue; /* todo: make this per-cpu */
50 1.1 yamt
51 1.1 yamt void (*wq_func)(struct work *, void *);
52 1.1 yamt void *wq_arg;
53 1.1 yamt const char *wq_name;
54 1.1 yamt int wq_prio;
55 1.1 yamt int wq_ipl;
56 1.1 yamt };
57 1.1 yamt
58 1.1 yamt #define POISON 0xaabbccdd
59 1.1 yamt
60 1.1 yamt static void
61 1.7 yamt workqueue_lock(struct workqueue *wq, struct workqueue_queue *q)
62 1.1 yamt {
63 1.1 yamt int s;
64 1.1 yamt
65 1.1 yamt #if 0 /* notyet */
66 1.1 yamt s = splraiseipl(wq->wq_ipl);
67 1.1 yamt #else
68 1.1 yamt s = splhigh(); /* XXX */
69 1.1 yamt #endif
70 1.1 yamt simple_lock(&q->q_lock);
71 1.1 yamt q->q_savedipl = s;
72 1.1 yamt }
73 1.1 yamt
74 1.1 yamt static void
75 1.7 yamt workqueue_unlock(struct workqueue *wq, struct workqueue_queue *q)
76 1.1 yamt {
77 1.1 yamt int s = q->q_savedipl;
78 1.1 yamt
79 1.1 yamt simple_unlock(&q->q_lock);
80 1.1 yamt splx(s);
81 1.1 yamt }
82 1.1 yamt
83 1.1 yamt static void
84 1.1 yamt workqueue_runlist(struct workqueue *wq, struct workqhead *list)
85 1.1 yamt {
86 1.1 yamt struct work *wk;
87 1.1 yamt struct work *next;
88 1.1 yamt
89 1.1 yamt /*
90 1.1 yamt * note that "list" is not a complete SIMPLEQ.
91 1.1 yamt */
92 1.1 yamt
93 1.1 yamt for (wk = SIMPLEQ_FIRST(list); wk != NULL; wk = next) {
94 1.1 yamt next = SIMPLEQ_NEXT(wk, wk_entry);
95 1.1 yamt (*wq->wq_func)(wk, wq->wq_arg);
96 1.1 yamt }
97 1.1 yamt }
98 1.1 yamt
99 1.1 yamt static void
100 1.1 yamt workqueue_run(struct workqueue *wq)
101 1.1 yamt {
102 1.1 yamt struct workqueue_queue *q = &wq->wq_queue;
103 1.1 yamt
104 1.3 rpaulo for (;;) {
105 1.1 yamt struct workqhead tmp;
106 1.1 yamt int error;
107 1.1 yamt
108 1.1 yamt /*
109 1.1 yamt * we violate abstraction of SIMPLEQ.
110 1.1 yamt */
111 1.1 yamt
112 1.1 yamt #if defined(DIAGNOSTIC)
113 1.1 yamt tmp.sqh_last = (void *)POISON;
114 1.1 yamt #endif /* defined(DIAGNOSTIC) */
115 1.1 yamt
116 1.1 yamt workqueue_lock(wq, q);
117 1.1 yamt while (SIMPLEQ_EMPTY(&q->q_queue)) {
118 1.1 yamt error = ltsleep(q, wq->wq_prio, wq->wq_name, 0,
119 1.1 yamt &q->q_lock);
120 1.1 yamt if (error) {
121 1.1 yamt panic("%s: %s error=%d",
122 1.1 yamt __func__, wq->wq_name, error);
123 1.1 yamt }
124 1.1 yamt }
125 1.1 yamt tmp.sqh_first = q->q_queue.sqh_first; /* XXX */
126 1.1 yamt SIMPLEQ_INIT(&q->q_queue);
127 1.1 yamt workqueue_unlock(wq, q);
128 1.1 yamt
129 1.1 yamt workqueue_runlist(wq, &tmp);
130 1.1 yamt }
131 1.1 yamt }
132 1.1 yamt
133 1.1 yamt static void
134 1.1 yamt workqueue_worker(void *arg)
135 1.1 yamt {
136 1.1 yamt struct workqueue *wq = arg;
137 1.1 yamt
138 1.1 yamt workqueue_run(wq);
139 1.1 yamt }
140 1.1 yamt
141 1.1 yamt static void
142 1.1 yamt workqueue_init(struct workqueue *wq, const char *name,
143 1.1 yamt void (*callback_func)(struct work *, void *), void *callback_arg,
144 1.1 yamt int prio, int ipl)
145 1.1 yamt {
146 1.1 yamt
147 1.1 yamt wq->wq_ipl = ipl;
148 1.1 yamt wq->wq_prio = prio;
149 1.1 yamt wq->wq_name = name;
150 1.1 yamt wq->wq_func = callback_func;
151 1.1 yamt wq->wq_arg = callback_arg;
152 1.1 yamt }
153 1.1 yamt
154 1.1 yamt static int
155 1.1 yamt workqueue_initqueue(struct workqueue *wq)
156 1.1 yamt {
157 1.1 yamt struct workqueue_queue *q = &wq->wq_queue;
158 1.1 yamt int error;
159 1.1 yamt
160 1.1 yamt simple_lock_init(&q->q_lock);
161 1.1 yamt SIMPLEQ_INIT(&q->q_queue);
162 1.1 yamt error = kthread_create1(workqueue_worker, wq, &q->q_worker,
163 1.1 yamt wq->wq_name);
164 1.1 yamt
165 1.1 yamt return error;
166 1.1 yamt }
167 1.1 yamt
168 1.5 yamt struct workqueue_exitargs {
169 1.5 yamt struct work wqe_wk;
170 1.5 yamt struct workqueue_queue *wqe_q;
171 1.5 yamt };
172 1.5 yamt
173 1.5 yamt static void
174 1.7 yamt workqueue_exit(struct work *wk, void *arg)
175 1.5 yamt {
176 1.5 yamt struct workqueue_exitargs *wqe = (void *)wk;
177 1.5 yamt struct workqueue_queue *q = wqe->wqe_q;
178 1.5 yamt
179 1.5 yamt /*
180 1.5 yamt * no need to raise ipl because only competition at this point
181 1.5 yamt * is workqueue_finiqueue.
182 1.5 yamt */
183 1.5 yamt
184 1.5 yamt KASSERT(q->q_worker == curproc);
185 1.5 yamt simple_lock(&q->q_lock);
186 1.5 yamt q->q_worker = NULL;
187 1.5 yamt simple_unlock(&q->q_lock);
188 1.5 yamt wakeup(q);
189 1.5 yamt kthread_exit(0);
190 1.5 yamt }
191 1.5 yamt
192 1.5 yamt static void
193 1.5 yamt workqueue_finiqueue(struct workqueue *wq)
194 1.5 yamt {
195 1.5 yamt struct workqueue_queue *q = &wq->wq_queue;
196 1.5 yamt struct workqueue_exitargs wqe;
197 1.5 yamt
198 1.5 yamt wq->wq_func = workqueue_exit;
199 1.5 yamt
200 1.5 yamt wqe.wqe_q = q;
201 1.5 yamt KASSERT(SIMPLEQ_EMPTY(&q->q_queue));
202 1.5 yamt KASSERT(q->q_worker != NULL);
203 1.5 yamt workqueue_lock(wq, q);
204 1.5 yamt SIMPLEQ_INSERT_TAIL(&q->q_queue, &wqe.wqe_wk, wk_entry);
205 1.5 yamt wakeup(q);
206 1.5 yamt while (q->q_worker != NULL) {
207 1.5 yamt int error;
208 1.5 yamt
209 1.5 yamt error = ltsleep(q, wq->wq_prio, "wqfini", 0, &q->q_lock);
210 1.5 yamt if (error) {
211 1.5 yamt panic("%s: %s error=%d",
212 1.5 yamt __func__, wq->wq_name, error);
213 1.5 yamt }
214 1.5 yamt }
215 1.5 yamt workqueue_unlock(wq, q);
216 1.5 yamt }
217 1.5 yamt
218 1.1 yamt /* --- */
219 1.1 yamt
220 1.1 yamt int
221 1.1 yamt workqueue_create(struct workqueue **wqp, const char *name,
222 1.1 yamt void (*callback_func)(struct work *, void *), void *callback_arg,
223 1.7 yamt int prio, int ipl, int flags)
224 1.1 yamt {
225 1.1 yamt struct workqueue *wq;
226 1.1 yamt int error;
227 1.1 yamt
228 1.4 yamt wq = kmem_alloc(sizeof(*wq), KM_SLEEP);
229 1.1 yamt if (wq == NULL) {
230 1.1 yamt return ENOMEM;
231 1.1 yamt }
232 1.1 yamt
233 1.1 yamt workqueue_init(wq, name, callback_func, callback_arg, prio, ipl);
234 1.1 yamt
235 1.1 yamt error = workqueue_initqueue(wq);
236 1.1 yamt if (error) {
237 1.4 yamt kmem_free(wq, sizeof(*wq));
238 1.1 yamt return error;
239 1.1 yamt }
240 1.1 yamt
241 1.1 yamt *wqp = wq;
242 1.1 yamt return 0;
243 1.1 yamt }
244 1.1 yamt
245 1.1 yamt void
246 1.5 yamt workqueue_destroy(struct workqueue *wq)
247 1.5 yamt {
248 1.5 yamt
249 1.5 yamt workqueue_finiqueue(wq);
250 1.5 yamt kmem_free(wq, sizeof(*wq));
251 1.5 yamt }
252 1.5 yamt
253 1.5 yamt void
254 1.1 yamt workqueue_enqueue(struct workqueue *wq, struct work *wk)
255 1.1 yamt {
256 1.1 yamt struct workqueue_queue *q = &wq->wq_queue;
257 1.1 yamt boolean_t wasempty;
258 1.1 yamt
259 1.1 yamt workqueue_lock(wq, q);
260 1.1 yamt wasempty = SIMPLEQ_EMPTY(&q->q_queue);
261 1.1 yamt SIMPLEQ_INSERT_TAIL(&q->q_queue, wk, wk_entry);
262 1.1 yamt workqueue_unlock(wq, q);
263 1.1 yamt
264 1.1 yamt if (wasempty) {
265 1.1 yamt wakeup(q);
266 1.1 yamt }
267 1.1 yamt }
268