linux_work.c revision 1.5 1 1.2 riastrad /* $NetBSD: linux_work.c,v 1.5 2018/08/27 07:05:39 riastradh Exp $ */
2 1.1 skrll
3 1.1 skrll /*-
4 1.1 skrll * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 1.1 skrll * All rights reserved.
6 1.1 skrll *
7 1.1 skrll * This code is derived from software contributed to The NetBSD Foundation
8 1.1 skrll * by Taylor R. Campbell.
9 1.1 skrll *
10 1.1 skrll * Redistribution and use in source and binary forms, with or without
11 1.1 skrll * modification, are permitted provided that the following conditions
12 1.1 skrll * are met:
13 1.1 skrll * 1. Redistributions of source code must retain the above copyright
14 1.1 skrll * notice, this list of conditions and the following disclaimer.
15 1.1 skrll * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 skrll * notice, this list of conditions and the following disclaimer in the
17 1.1 skrll * documentation and/or other materials provided with the distribution.
18 1.1 skrll *
19 1.1 skrll * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 skrll * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 skrll * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 skrll * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 skrll * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 skrll * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 skrll * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 skrll * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 skrll * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 skrll * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 skrll * POSSIBILITY OF SUCH DAMAGE.
30 1.1 skrll */
31 1.1 skrll
32 1.1 skrll #include <sys/cdefs.h>
33 1.2 riastrad __KERNEL_RCSID(0, "$NetBSD: linux_work.c,v 1.5 2018/08/27 07:05:39 riastradh Exp $");
34 1.1 skrll
35 1.1 skrll #include <sys/types.h>
36 1.1 skrll #include <sys/param.h>
37 1.1 skrll #include <sys/atomic.h>
38 1.1 skrll #include <sys/callout.h>
39 1.1 skrll #include <sys/condvar.h>
40 1.1 skrll #include <sys/errno.h>
41 1.1 skrll #include <sys/intr.h>
42 1.1 skrll #include <sys/kmem.h>
43 1.1 skrll #include <sys/mutex.h>
44 1.1 skrll #include <sys/queue.h>
45 1.1 skrll #include <sys/systm.h>
46 1.1 skrll #include <sys/workqueue.h>
47 1.1 skrll #include <sys/cpu.h>
48 1.1 skrll
49 1.1 skrll #include <machine/lock.h>
50 1.1 skrll
51 1.1 skrll #include <linux/workqueue.h>
52 1.1 skrll
53 1.1 skrll /* XXX Kludge until we sync with HEAD. */
54 1.1 skrll #if DIAGNOSTIC
55 1.1 skrll #define __diagused
56 1.1 skrll #else
57 1.1 skrll #define __diagused __unused
58 1.1 skrll #endif
59 1.1 skrll
60 1.1 skrll struct workqueue_struct {
61 1.1 skrll struct workqueue *wq_workqueue;
62 1.1 skrll
63 1.3 riastrad struct rb_node wq_node;
64 1.3 riastrad struct lwp *wq_lwp;
65 1.3 riastrad
66 1.1 skrll /* XXX The following should all be per-CPU. */
67 1.1 skrll kmutex_t wq_lock;
68 1.1 skrll
69 1.1 skrll /*
70 1.1 skrll * Condvar for when any state related to this workqueue
71 1.1 skrll * changes. XXX Could split this into multiple condvars for
72 1.1 skrll * different purposes, but whatever...
73 1.1 skrll */
74 1.1 skrll kcondvar_t wq_cv;
75 1.1 skrll
76 1.1 skrll TAILQ_HEAD(, delayed_work) wq_delayed;
77 1.1 skrll struct work_struct *wq_current_work;
78 1.1 skrll };
79 1.1 skrll
80 1.1 skrll static void linux_work_lock_init(struct work_struct *);
81 1.1 skrll static void linux_work_lock(struct work_struct *);
82 1.1 skrll static void linux_work_unlock(struct work_struct *);
83 1.1 skrll static bool linux_work_locked(struct work_struct *) __diagused;
84 1.1 skrll
85 1.1 skrll static void linux_wq_barrier(struct work_struct *);
86 1.1 skrll
87 1.1 skrll static void linux_wait_for_cancelled_work(struct work_struct *);
88 1.1 skrll static void linux_wait_for_invoked_work(struct work_struct *);
89 1.1 skrll static void linux_worker(struct work *, void *);
90 1.1 skrll
91 1.1 skrll static void linux_cancel_delayed_work_callout(struct delayed_work *, bool);
92 1.1 skrll static void linux_wait_for_delayed_cancelled_work(struct delayed_work *);
93 1.1 skrll static void linux_worker_intr(void *);
94 1.1 skrll
95 1.1 skrll struct workqueue_struct *system_wq;
96 1.2 riastrad struct workqueue_struct *system_long_wq;
97 1.1 skrll
98 1.3 riastrad static struct {
99 1.3 riastrad kmutex_t lock;
100 1.3 riastrad struct rb_tree tree;
101 1.3 riastrad } workqueues __cacheline_aligned;
102 1.3 riastrad
103 1.3 riastrad static const rb_tree_ops_t workqueues_rb_ops;
104 1.3 riastrad
105 1.1 skrll int
106 1.1 skrll linux_workqueue_init(void)
107 1.1 skrll {
108 1.3 riastrad
109 1.3 riastrad mutex_init(&workqueues.lock, MUTEX_DEFAULT, IPL_VM);
110 1.3 riastrad rb_tree_init(&workqueues.tree, &workqueues_rb_ops);
111 1.1 skrll
112 1.1 skrll system_wq = alloc_ordered_workqueue("lnxsyswq", 0);
113 1.1 skrll if (system_wq == NULL)
114 1.2 riastrad goto fail0;
115 1.2 riastrad
116 1.2 riastrad system_long_wq = alloc_ordered_workqueue("lnxlngwq", 0);
117 1.2 riastrad if (system_long_wq == NULL)
118 1.2 riastrad goto fail1;
119 1.1 skrll
120 1.1 skrll return 0;
121 1.2 riastrad
122 1.2 riastrad fail2: __unused
123 1.2 riastrad destroy_workqueue(system_long_wq);
124 1.2 riastrad fail1: destroy_workqueue(system_wq);
125 1.3 riastrad fail0: mutex_destroy(&workqueues.lock);
126 1.3 riastrad return ENOMEM;
127 1.1 skrll }
128 1.1 skrll
129 1.1 skrll void
130 1.1 skrll linux_workqueue_fini(void)
131 1.1 skrll {
132 1.2 riastrad
133 1.2 riastrad destroy_workqueue(system_long_wq);
134 1.2 riastrad system_long_wq = NULL;
135 1.1 skrll destroy_workqueue(system_wq);
136 1.1 skrll system_wq = NULL;
137 1.3 riastrad KASSERT(RB_TREE_MIN(&workqueues.tree) == NULL);
138 1.3 riastrad mutex_destroy(&workqueues.lock);
139 1.3 riastrad }
140 1.3 riastrad
141 1.3 riastrad /*
143 1.3 riastrad * Table of workqueue LWPs for validation -- assumes there is only one
144 1.3 riastrad * thread per workqueue.
145 1.3 riastrad *
146 1.3 riastrad * XXX Mega-kludgerific!
147 1.3 riastrad */
148 1.3 riastrad
149 1.3 riastrad static int
150 1.3 riastrad compare_nodes(void *cookie, const void *va, const void *vb)
151 1.3 riastrad {
152 1.3 riastrad const struct workqueue_struct *wa = va;
153 1.3 riastrad const struct workqueue_struct *wb = vb;
154 1.3 riastrad
155 1.3 riastrad if ((uintptr_t)wa->wq_lwp < (uintptr_t)wb->wq_lwp)
156 1.3 riastrad return -1;
157 1.3 riastrad if ((uintptr_t)wa->wq_lwp > (uintptr_t)wb->wq_lwp)
158 1.3 riastrad return +1;
159 1.3 riastrad return 0;
160 1.3 riastrad }
161 1.3 riastrad
162 1.3 riastrad static int
163 1.3 riastrad compare_key(void *cookie, const void *vn, const void *vk)
164 1.3 riastrad {
165 1.3 riastrad const struct workqueue_struct *w = vn;
166 1.3 riastrad const struct lwp *lwp = vk;
167 1.3 riastrad
168 1.3 riastrad if ((uintptr_t)w->wq_lwp < (uintptr_t)lwp)
169 1.3 riastrad return -1;
170 1.3 riastrad if ((uintptr_t)w->wq_lwp > (uintptr_t)lwp)
171 1.3 riastrad return +1;
172 1.3 riastrad return 0;
173 1.3 riastrad }
174 1.3 riastrad
175 1.3 riastrad static const rb_tree_ops_t workqueues_rb_ops = {
176 1.3 riastrad .rbto_compare_nodes = compare_nodes,
177 1.3 riastrad .rbto_compare_key = compare_key,
178 1.3 riastrad .rbto_node_offset = offsetof(struct workqueue_struct, wq_lwp),
179 1.3 riastrad };
180 1.3 riastrad
181 1.3 riastrad struct wq_whoami_work {
182 1.3 riastrad kmutex_t www_lock;
183 1.3 riastrad kcondvar_t www_cv;
184 1.3 riastrad struct workqueue_struct *www_wq;
185 1.3 riastrad struct work_struct www_work;
186 1.3 riastrad };
187 1.3 riastrad
188 1.3 riastrad static void
189 1.3 riastrad workqueue_whoami_work(struct work_struct *work)
190 1.3 riastrad {
191 1.3 riastrad struct wq_whoami_work *www = www;
192 1.3 riastrad struct workqueue_struct *wq = www->www_wq;
193 1.3 riastrad
194 1.3 riastrad KASSERT(wq->wq_lwp == NULL);
195 1.3 riastrad wq->wq_lwp = curlwp;
196 1.3 riastrad
197 1.3 riastrad mutex_enter(&www->www_lock);
198 1.3 riastrad cv_broadcast(&www->www_cv);
199 1.3 riastrad mutex_exit(&www->www_lock);
200 1.3 riastrad }
201 1.3 riastrad
202 1.3 riastrad static void
203 1.3 riastrad workqueue_whoami(struct workqueue_struct *wq)
204 1.3 riastrad {
205 1.3 riastrad struct wq_whoami_work www;
206 1.3 riastrad struct workqueue_struct *collision __diagused;
207 1.3 riastrad
208 1.3 riastrad mutex_init(&www.www_lock, MUTEX_DEFAULT, IPL_NONE);
209 1.3 riastrad cv_init(&www.www_cv, "wqwhoami");
210 1.3 riastrad
211 1.3 riastrad INIT_WORK(&www.www_work, &workqueue_whoami_work);
212 1.3 riastrad queue_work(wq, &www.www_work);
213 1.3 riastrad
214 1.3 riastrad mutex_enter(&www.www_lock);
215 1.3 riastrad while (wq->wq_lwp == NULL)
216 1.3 riastrad cv_wait(&www.www_cv, &www.www_lock);
217 1.3 riastrad mutex_exit(&www.www_lock);
218 1.3 riastrad
219 1.3 riastrad cv_destroy(&www.www_cv);
220 1.3 riastrad mutex_destroy(&www.www_lock);
221 1.3 riastrad
222 1.3 riastrad mutex_enter(&workqueues.lock);
223 1.3 riastrad collision = rb_tree_insert_node(&workqueues.tree, wq);
224 1.3 riastrad mutex_exit(&workqueues.lock);
225 1.3 riastrad
226 1.3 riastrad KASSERT(collision == wq);
227 1.3 riastrad }
228 1.3 riastrad
229 1.3 riastrad struct work_struct *
230 1.3 riastrad current_work(void)
231 1.3 riastrad {
232 1.3 riastrad struct workqueue_struct *wq;
233 1.3 riastrad struct work_struct *work;
234 1.3 riastrad
235 1.3 riastrad mutex_enter(&workqueues.lock);
236 1.3 riastrad wq = rb_tree_find_node(&workqueues.tree, curlwp);
237 1.3 riastrad work = (wq == NULL ? NULL : wq->wq_current_work);
238 1.3 riastrad mutex_exit(&workqueues.lock);
239 1.3 riastrad
240 1.1 skrll return work;
241 1.1 skrll }
242 1.1 skrll
243 1.1 skrll /*
245 1.1 skrll * Workqueues
246 1.1 skrll */
247 1.1 skrll
248 1.1 skrll struct workqueue_struct *
249 1.1 skrll alloc_ordered_workqueue(const char *name, int linux_flags)
250 1.1 skrll {
251 1.1 skrll struct workqueue_struct *wq;
252 1.1 skrll int flags = WQ_MPSAFE;
253 1.1 skrll int error;
254 1.1 skrll
255 1.1 skrll KASSERT(linux_flags == 0);
256 1.1 skrll
257 1.1 skrll wq = kmem_alloc(sizeof(*wq), KM_SLEEP);
258 1.1 skrll error = workqueue_create(&wq->wq_workqueue, name, &linux_worker,
259 1.1 skrll wq, PRI_NONE, IPL_VM, flags);
260 1.1 skrll if (error) {
261 1.1 skrll kmem_free(wq, sizeof(*wq));
262 1.1 skrll return NULL;
263 1.1 skrll }
264 1.1 skrll
265 1.1 skrll mutex_init(&wq->wq_lock, MUTEX_DEFAULT, IPL_VM);
266 1.1 skrll cv_init(&wq->wq_cv, name);
267 1.1 skrll TAILQ_INIT(&wq->wq_delayed);
268 1.3 riastrad wq->wq_current_work = NULL;
269 1.3 riastrad
270 1.3 riastrad workqueue_whoami(wq);
271 1.1 skrll KASSERT(wq->wq_lwp != NULL);
272 1.1 skrll
273 1.1 skrll return wq;
274 1.1 skrll }
275 1.1 skrll
276 1.1 skrll void
277 1.1 skrll destroy_workqueue(struct workqueue_struct *wq)
278 1.1 skrll {
279 1.1 skrll
280 1.1 skrll /*
281 1.1 skrll * Cancel all delayed work.
282 1.1 skrll */
283 1.1 skrll for (;;) {
284 1.1 skrll struct delayed_work *dw;
285 1.1 skrll
286 1.1 skrll mutex_enter(&wq->wq_lock);
287 1.1 skrll if (TAILQ_EMPTY(&wq->wq_delayed)) {
288 1.1 skrll dw = NULL;
289 1.1 skrll } else {
290 1.1 skrll dw = TAILQ_FIRST(&wq->wq_delayed);
291 1.1 skrll TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
292 1.1 skrll }
293 1.1 skrll mutex_exit(&wq->wq_lock);
294 1.1 skrll
295 1.1 skrll if (dw == NULL)
296 1.1 skrll break;
297 1.1 skrll
298 1.1 skrll cancel_delayed_work_sync(dw);
299 1.1 skrll }
300 1.1 skrll
301 1.1 skrll /*
302 1.1 skrll * workqueue_destroy empties the queue; we need not wait for
303 1.1 skrll * completion explicitly. However, we can't destroy the
304 1.1 skrll * condvar or mutex until this is done.
305 1.1 skrll */
306 1.1 skrll workqueue_destroy(wq->wq_workqueue);
307 1.1 skrll KASSERT(wq->wq_current_work == NULL);
308 1.1 skrll wq->wq_workqueue = NULL;
309 1.1 skrll
310 1.1 skrll cv_destroy(&wq->wq_cv);
311 1.1 skrll mutex_destroy(&wq->wq_lock);
312 1.1 skrll
313 1.1 skrll kmem_free(wq, sizeof(*wq));
314 1.1 skrll }
315 1.1 skrll
316 1.1 skrll /*
318 1.1 skrll * Flush
319 1.1 skrll *
320 1.1 skrll * Note: This doesn't cancel or wait for delayed work. This seems to
321 1.1 skrll * match what Linux does (or, doesn't do).
322 1.1 skrll */
323 1.1 skrll
324 1.1 skrll void
325 1.1 skrll flush_scheduled_work(void)
326 1.1 skrll {
327 1.1 skrll flush_workqueue(system_wq);
328 1.1 skrll }
329 1.1 skrll
330 1.1 skrll struct wq_flush_work {
331 1.1 skrll struct work_struct wqfw_work;
332 1.1 skrll struct wq_flush *wqfw_flush;
333 1.1 skrll };
334 1.1 skrll
335 1.1 skrll struct wq_flush {
336 1.1 skrll kmutex_t wqf_lock;
337 1.1 skrll kcondvar_t wqf_cv;
338 1.1 skrll unsigned int wqf_n;
339 1.1 skrll };
340 1.1 skrll
341 1.1 skrll void
342 1.1 skrll flush_work(struct work_struct *work)
343 1.1 skrll {
344 1.1 skrll struct workqueue_struct *const wq = work->w_wq;
345 1.1 skrll
346 1.1 skrll if (wq != NULL)
347 1.1 skrll flush_workqueue(wq);
348 1.1 skrll }
349 1.1 skrll
350 1.1 skrll void
351 1.1 skrll flush_workqueue(struct workqueue_struct *wq)
352 1.1 skrll {
353 1.1 skrll static const struct wq_flush zero_wqf;
354 1.1 skrll struct wq_flush wqf = zero_wqf;
355 1.1 skrll
356 1.1 skrll mutex_init(&wqf.wqf_lock, MUTEX_DEFAULT, IPL_NONE);
357 1.1 skrll cv_init(&wqf.wqf_cv, "lnxwflsh");
358 1.1 skrll
359 1.1 skrll if (1) {
360 1.1 skrll struct wq_flush_work *const wqfw = kmem_zalloc(sizeof(*wqfw),
361 1.1 skrll KM_SLEEP);
362 1.1 skrll
363 1.1 skrll wqf.wqf_n = 1;
364 1.1 skrll wqfw->wqfw_flush = &wqf;
365 1.1 skrll INIT_WORK(&wqfw->wqfw_work, &linux_wq_barrier);
366 1.1 skrll wqfw->wqfw_work.w_wq = wq;
367 1.1 skrll wqfw->wqfw_work.w_state = WORK_PENDING;
368 1.1 skrll workqueue_enqueue(wq->wq_workqueue, &wqfw->wqfw_work.w_wk,
369 1.1 skrll NULL);
370 1.1 skrll } else {
371 1.1 skrll struct cpu_info *ci;
372 1.1 skrll CPU_INFO_ITERATOR cii;
373 1.1 skrll struct wq_flush_work *wqfw;
374 1.1 skrll
375 1.1 skrll panic("per-CPU Linux workqueues don't work yet!");
376 1.1 skrll
377 1.1 skrll wqf.wqf_n = 0;
378 1.1 skrll for (CPU_INFO_FOREACH(cii, ci)) {
379 1.1 skrll wqfw = kmem_zalloc(sizeof(*wqfw), KM_SLEEP);
380 1.1 skrll mutex_enter(&wqf.wqf_lock);
381 1.1 skrll wqf.wqf_n++;
382 1.1 skrll mutex_exit(&wqf.wqf_lock);
383 1.1 skrll wqfw->wqfw_flush = &wqf;
384 1.1 skrll INIT_WORK(&wqfw->wqfw_work, &linux_wq_barrier);
385 1.1 skrll wqfw->wqfw_work.w_state = WORK_PENDING;
386 1.1 skrll wqfw->wqfw_work.w_wq = wq;
387 1.1 skrll workqueue_enqueue(wq->wq_workqueue,
388 1.1 skrll &wqfw->wqfw_work.w_wk, ci);
389 1.1 skrll }
390 1.1 skrll }
391 1.1 skrll
392 1.1 skrll mutex_enter(&wqf.wqf_lock);
393 1.1 skrll while (0 < wqf.wqf_n)
394 1.1 skrll cv_wait(&wqf.wqf_cv, &wqf.wqf_lock);
395 1.1 skrll mutex_exit(&wqf.wqf_lock);
396 1.1 skrll
397 1.1 skrll cv_destroy(&wqf.wqf_cv);
398 1.1 skrll mutex_destroy(&wqf.wqf_lock);
399 1.1 skrll }
400 1.1 skrll
401 1.1 skrll static void
402 1.1 skrll linux_wq_barrier(struct work_struct *work)
403 1.1 skrll {
404 1.1 skrll struct wq_flush_work *const wqfw = container_of(work,
405 1.1 skrll struct wq_flush_work, wqfw_work);
406 1.1 skrll struct wq_flush *const wqf = wqfw->wqfw_flush;
407 1.1 skrll
408 1.1 skrll mutex_enter(&wqf->wqf_lock);
409 1.1 skrll if (--wqf->wqf_n == 0)
410 1.1 skrll cv_broadcast(&wqf->wqf_cv);
411 1.1 skrll mutex_exit(&wqf->wqf_lock);
412 1.1 skrll
413 1.1 skrll kmem_free(wqfw, sizeof(*wqfw));
414 1.1 skrll }
415 1.1 skrll
416 1.1 skrll /*
418 1.1 skrll * Work locking
419 1.1 skrll *
420 1.1 skrll * We use __cpu_simple_lock(9) rather than mutex(9) because Linux code
421 1.1 skrll * does not destroy work, so there is nowhere to call mutex_destroy.
422 1.1 skrll *
423 1.1 skrll * XXX This is getting out of hand... Really, work items shouldn't
424 1.1 skrll * have locks in them at all; instead the workqueues should.
425 1.1 skrll */
426 1.1 skrll
427 1.1 skrll static void
428 1.1 skrll linux_work_lock_init(struct work_struct *work)
429 1.1 skrll {
430 1.1 skrll
431 1.1 skrll __cpu_simple_lock_init(&work->w_lock);
432 1.1 skrll }
433 1.1 skrll
434 1.1 skrll static void
435 1.1 skrll linux_work_lock(struct work_struct *work)
436 1.1 skrll {
437 1.1 skrll struct cpu_info *ci;
438 1.1 skrll int cnt, s;
439 1.1 skrll
440 1.1 skrll /* XXX Copypasta of MUTEX_SPIN_SPLRAISE. */
441 1.1 skrll s = splvm();
442 1.1 skrll ci = curcpu();
443 1.1 skrll cnt = ci->ci_mtx_count--;
444 1.1 skrll __insn_barrier();
445 1.1 skrll if (cnt == 0)
446 1.1 skrll ci->ci_mtx_oldspl = s;
447 1.1 skrll
448 1.1 skrll __cpu_simple_lock(&work->w_lock);
449 1.1 skrll }
450 1.1 skrll
451 1.1 skrll static void
452 1.1 skrll linux_work_unlock(struct work_struct *work)
453 1.1 skrll {
454 1.1 skrll struct cpu_info *ci;
455 1.1 skrll int s;
456 1.1 skrll
457 1.1 skrll __cpu_simple_unlock(&work->w_lock);
458 1.1 skrll
459 1.1 skrll /* XXX Copypasta of MUTEX_SPIN_SPLRESTORE. */
460 1.1 skrll ci = curcpu();
461 1.1 skrll s = ci->ci_mtx_oldspl;
462 1.1 skrll __insn_barrier();
463 1.1 skrll if (++ci->ci_mtx_count == 0)
464 1.1 skrll splx(s);
465 1.1 skrll }
466 1.1 skrll
467 1.1 skrll static bool __diagused
468 1.1 skrll linux_work_locked(struct work_struct *work)
469 1.1 skrll {
470 1.1 skrll return __SIMPLELOCK_LOCKED_P(&work->w_lock);
471 1.1 skrll }
472 1.1 skrll
473 1.1 skrll /*
475 1.1 skrll * Work
476 1.1 skrll */
477 1.1 skrll
478 1.1 skrll void
479 1.1 skrll INIT_WORK(struct work_struct *work, void (*fn)(struct work_struct *))
480 1.4 riastrad {
481 1.1 skrll
482 1.1 skrll linux_work_lock_init(work);
483 1.1 skrll work->w_state = WORK_IDLE;
484 1.1 skrll work->w_wq = NULL;
485 1.1 skrll work->func = fn;
486 1.1 skrll }
487 1.1 skrll
488 1.1 skrll bool
489 1.1 skrll schedule_work(struct work_struct *work)
490 1.1 skrll {
491 1.1 skrll return queue_work(system_wq, work);
492 1.1 skrll }
493 1.1 skrll
494 1.1 skrll bool
495 1.1 skrll queue_work(struct workqueue_struct *wq, struct work_struct *work)
496 1.1 skrll {
497 1.1 skrll /* True if we put it on the queue, false if it was already there. */
498 1.1 skrll bool newly_queued;
499 1.1 skrll
500 1.1 skrll KASSERT(wq != NULL);
501 1.1 skrll
502 1.1 skrll linux_work_lock(work);
503 1.1 skrll switch (work->w_state) {
504 1.1 skrll case WORK_IDLE:
505 1.1 skrll case WORK_INVOKED:
506 1.1 skrll work->w_state = WORK_PENDING;
507 1.1 skrll work->w_wq = wq;
508 1.1 skrll workqueue_enqueue(wq->wq_workqueue, &work->w_wk, NULL);
509 1.1 skrll newly_queued = true;
510 1.1 skrll break;
511 1.1 skrll
512 1.1 skrll case WORK_DELAYED:
513 1.1 skrll panic("queue_work(delayed work %p)", work);
514 1.1 skrll break;
515 1.1 skrll
516 1.1 skrll case WORK_PENDING:
517 1.1 skrll KASSERT(work->w_wq == wq);
518 1.1 skrll newly_queued = false;
519 1.1 skrll break;
520 1.1 skrll
521 1.1 skrll case WORK_CANCELLED:
522 1.1 skrll newly_queued = false;
523 1.1 skrll break;
524 1.1 skrll
525 1.1 skrll case WORK_DELAYED_CANCELLED:
526 1.1 skrll panic("queue_work(delayed work %p)", work);
527 1.1 skrll break;
528 1.1 skrll
529 1.1 skrll default:
530 1.1 skrll panic("work %p in bad state: %d", work, (int)work->w_state);
531 1.1 skrll break;
532 1.1 skrll }
533 1.1 skrll linux_work_unlock(work);
534 1.1 skrll
535 1.1 skrll return newly_queued;
536 1.1 skrll }
537 1.1 skrll
538 1.1 skrll bool
539 1.1 skrll cancel_work_sync(struct work_struct *work)
540 1.1 skrll {
541 1.1 skrll bool cancelled_p = false;
542 1.1 skrll
543 1.1 skrll linux_work_lock(work);
544 1.1 skrll switch (work->w_state) {
545 1.1 skrll case WORK_IDLE: /* Nothing to do. */
546 1.1 skrll break;
547 1.1 skrll
548 1.1 skrll case WORK_DELAYED:
549 1.1 skrll panic("cancel_work_sync(delayed work %p)", work);
550 1.1 skrll break;
551 1.1 skrll
552 1.1 skrll case WORK_PENDING:
553 1.1 skrll work->w_state = WORK_CANCELLED;
554 1.1 skrll linux_wait_for_cancelled_work(work);
555 1.1 skrll cancelled_p = true;
556 1.1 skrll break;
557 1.1 skrll
558 1.1 skrll case WORK_INVOKED:
559 1.1 skrll linux_wait_for_invoked_work(work);
560 1.1 skrll break;
561 1.1 skrll
562 1.1 skrll case WORK_CANCELLED: /* Already done. */
563 1.1 skrll break;
564 1.1 skrll
565 1.1 skrll case WORK_DELAYED_CANCELLED:
566 1.1 skrll panic("cancel_work_sync(delayed work %p)", work);
567 1.1 skrll break;
568 1.1 skrll
569 1.1 skrll default:
570 1.1 skrll panic("work %p in bad state: %d", work, (int)work->w_state);
571 1.1 skrll break;
572 1.1 skrll }
573 1.1 skrll linux_work_unlock(work);
574 1.1 skrll
575 1.1 skrll return cancelled_p;
576 1.1 skrll }
577 1.1 skrll
578 1.1 skrll static void
579 1.1 skrll linux_wait_for_cancelled_work(struct work_struct *work)
580 1.1 skrll {
581 1.1 skrll struct workqueue_struct *wq;
582 1.1 skrll
583 1.1 skrll KASSERT(linux_work_locked(work));
584 1.1 skrll KASSERT(work->w_state == WORK_CANCELLED);
585 1.1 skrll
586 1.1 skrll wq = work->w_wq;
587 1.1 skrll do {
588 1.1 skrll mutex_enter(&wq->wq_lock);
589 1.1 skrll linux_work_unlock(work);
590 1.1 skrll cv_wait(&wq->wq_cv, &wq->wq_lock);
591 1.1 skrll mutex_exit(&wq->wq_lock);
592 1.1 skrll linux_work_lock(work);
593 1.1 skrll } while ((work->w_state == WORK_CANCELLED) && (work->w_wq == wq));
594 1.1 skrll }
595 1.1 skrll
596 1.1 skrll static void
597 1.1 skrll linux_wait_for_invoked_work(struct work_struct *work)
598 1.1 skrll {
599 1.1 skrll struct workqueue_struct *wq;
600 1.1 skrll
601 1.1 skrll KASSERT(linux_work_locked(work));
602 1.1 skrll KASSERT(work->w_state == WORK_INVOKED);
603 1.1 skrll
604 1.1 skrll wq = work->w_wq;
605 1.1 skrll mutex_enter(&wq->wq_lock);
606 1.1 skrll linux_work_unlock(work);
607 1.1 skrll while (wq->wq_current_work == work)
608 1.1 skrll cv_wait(&wq->wq_cv, &wq->wq_lock);
609 1.1 skrll mutex_exit(&wq->wq_lock);
610 1.1 skrll
611 1.1 skrll linux_work_lock(work); /* XXX needless relock */
612 1.1 skrll }
613 1.1 skrll
614 1.1 skrll static void
615 1.1 skrll linux_worker(struct work *wk, void *arg)
616 1.1 skrll {
617 1.1 skrll struct work_struct *const work = container_of(wk, struct work_struct,
618 1.1 skrll w_wk);
619 1.1 skrll struct workqueue_struct *const wq = arg;
620 1.1 skrll
621 1.1 skrll linux_work_lock(work);
622 1.1 skrll switch (work->w_state) {
623 1.1 skrll case WORK_IDLE:
624 1.1 skrll panic("idle work %p got queued: %p", work, wq);
625 1.1 skrll break;
626 1.1 skrll
627 1.1 skrll case WORK_DELAYED:
628 1.1 skrll panic("delayed work %p got queued: %p", work, wq);
629 1.1 skrll break;
630 1.1 skrll
631 1.1 skrll case WORK_PENDING:
632 1.1 skrll KASSERT(work->w_wq == wq);
633 1.1 skrll
634 1.1 skrll /* Get ready to invoke this one. */
635 1.1 skrll mutex_enter(&wq->wq_lock);
636 1.1 skrll work->w_state = WORK_INVOKED;
637 1.1 skrll KASSERT(wq->wq_current_work == NULL);
638 1.4 riastrad wq->wq_current_work = work;
639 1.1 skrll mutex_exit(&wq->wq_lock);
640 1.1 skrll
641 1.1 skrll /* Unlock it and do it. Can't use work after this. */
642 1.1 skrll linux_work_unlock(work);
643 1.1 skrll (*work->func)(work);
644 1.1 skrll
645 1.1 skrll /* All done. Notify anyone waiting for completion. */
646 1.1 skrll mutex_enter(&wq->wq_lock);
647 1.1 skrll KASSERT(wq->wq_current_work == work);
648 1.1 skrll wq->wq_current_work = NULL;
649 1.1 skrll cv_broadcast(&wq->wq_cv);
650 1.1 skrll mutex_exit(&wq->wq_lock);
651 1.1 skrll return;
652 1.1 skrll
653 1.1 skrll case WORK_INVOKED:
654 1.1 skrll panic("invoked work %p got requeued: %p", work, wq);
655 1.1 skrll break;
656 1.1 skrll
657 1.1 skrll case WORK_CANCELLED:
658 1.1 skrll KASSERT(work->w_wq == wq);
659 1.1 skrll
660 1.1 skrll /* Return to idle; notify anyone waiting for cancellation. */
661 1.1 skrll mutex_enter(&wq->wq_lock);
662 1.1 skrll work->w_state = WORK_IDLE;
663 1.1 skrll work->w_wq = NULL;
664 1.1 skrll cv_broadcast(&wq->wq_cv);
665 1.1 skrll mutex_exit(&wq->wq_lock);
666 1.1 skrll break;
667 1.1 skrll
668 1.1 skrll case WORK_DELAYED_CANCELLED:
669 1.1 skrll panic("cancelled delayed work %p got uqeued: %p", work, wq);
670 1.1 skrll break;
671 1.1 skrll
672 1.1 skrll default:
673 1.1 skrll panic("work %p in bad state: %d", work, (int)work->w_state);
674 1.1 skrll break;
675 1.1 skrll }
676 1.1 skrll linux_work_unlock(work);
677 1.1 skrll }
678 1.1 skrll
679 1.1 skrll /*
681 1.1 skrll * Delayed work
682 1.1 skrll */
683 1.1 skrll
684 1.1 skrll void
685 1.1 skrll INIT_DELAYED_WORK(struct delayed_work *dw, void (*fn)(struct work_struct *))
686 1.1 skrll {
687 1.1 skrll INIT_WORK(&dw->work, fn);
688 1.1 skrll }
689 1.1 skrll
690 1.1 skrll bool
691 1.1 skrll schedule_delayed_work(struct delayed_work *dw, unsigned long ticks)
692 1.1 skrll {
693 1.1 skrll return queue_delayed_work(system_wq, dw, ticks);
694 1.1 skrll }
695 1.1 skrll
696 1.1 skrll bool
697 1.1 skrll queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw,
698 1.1 skrll unsigned long ticks)
699 1.1 skrll {
700 1.1 skrll bool newly_queued;
701 1.1 skrll
702 1.1 skrll KASSERT(wq != NULL);
703 1.1 skrll
704 1.1 skrll linux_work_lock(&dw->work);
705 1.1 skrll switch (dw->work.w_state) {
706 1.1 skrll case WORK_IDLE:
707 1.1 skrll case WORK_INVOKED:
708 1.1 skrll if (ticks == 0) {
709 1.1 skrll /* Skip the delay and queue it now. */
710 1.1 skrll dw->work.w_state = WORK_PENDING;
711 1.1 skrll dw->work.w_wq = wq;
712 1.1 skrll workqueue_enqueue(wq->wq_workqueue, &dw->work.w_wk,
713 1.1 skrll NULL);
714 1.1 skrll } else {
715 1.1 skrll callout_init(&dw->dw_callout, CALLOUT_MPSAFE);
716 1.1 skrll callout_reset(&dw->dw_callout, ticks,
717 1.1 skrll &linux_worker_intr, dw);
718 1.1 skrll dw->work.w_state = WORK_DELAYED;
719 1.1 skrll dw->work.w_wq = wq;
720 1.1 skrll mutex_enter(&wq->wq_lock);
721 1.1 skrll TAILQ_INSERT_HEAD(&wq->wq_delayed, dw, dw_entry);
722 1.1 skrll mutex_exit(&wq->wq_lock);
723 1.1 skrll }
724 1.1 skrll newly_queued = true;
725 1.1 skrll break;
726 1.1 skrll
727 1.1 skrll case WORK_DELAYED:
728 1.1 skrll /*
729 1.1 skrll * Timer is already ticking. Leave it to time out
730 1.1 skrll * whenever it was going to time out, as Linux does --
731 1.1 skrll * neither speed it up nor postpone it.
732 1.1 skrll */
733 1.1 skrll newly_queued = false;
734 1.1 skrll break;
735 1.1 skrll
736 1.1 skrll case WORK_PENDING:
737 1.1 skrll KASSERT(dw->work.w_wq == wq);
738 1.1 skrll newly_queued = false;
739 1.1 skrll break;
740 1.1 skrll
741 1.1 skrll case WORK_CANCELLED:
742 1.1 skrll case WORK_DELAYED_CANCELLED:
743 1.1 skrll /* XXX Wait for cancellation and then queue? */
744 1.1 skrll newly_queued = false;
745 1.1 skrll break;
746 1.1 skrll
747 1.1 skrll default:
748 1.1 skrll panic("delayed work %p in bad state: %d", dw,
749 1.1 skrll (int)dw->work.w_state);
750 1.1 skrll break;
751 1.1 skrll }
752 1.1 skrll linux_work_unlock(&dw->work);
753 1.1 skrll
754 1.1 skrll return newly_queued;
755 1.1 skrll }
756 1.1 skrll
757 1.1 skrll bool
758 1.1 skrll mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw,
759 1.1 skrll unsigned long ticks)
760 1.1 skrll {
761 1.1 skrll bool timer_modified;
762 1.1 skrll
763 1.1 skrll KASSERT(wq != NULL);
764 1.1 skrll
765 1.1 skrll linux_work_lock(&dw->work);
766 1.1 skrll switch (dw->work.w_state) {
767 1.1 skrll case WORK_IDLE:
768 1.1 skrll case WORK_INVOKED:
769 1.1 skrll if (ticks == 0) {
770 1.1 skrll /* Skip the delay and queue it now. */
771 1.1 skrll dw->work.w_state = WORK_PENDING;
772 1.1 skrll dw->work.w_wq = wq;
773 1.1 skrll workqueue_enqueue(wq->wq_workqueue, &dw->work.w_wk,
774 1.1 skrll NULL);
775 1.1 skrll } else {
776 1.1 skrll callout_init(&dw->dw_callout, CALLOUT_MPSAFE);
777 1.1 skrll callout_reset(&dw->dw_callout, ticks,
778 1.1 skrll &linux_worker_intr, dw);
779 1.1 skrll dw->work.w_state = WORK_DELAYED;
780 1.1 skrll dw->work.w_wq = wq;
781 1.1 skrll mutex_enter(&wq->wq_lock);
782 1.1 skrll TAILQ_INSERT_HEAD(&wq->wq_delayed, dw, dw_entry);
783 1.1 skrll mutex_exit(&wq->wq_lock);
784 1.1 skrll }
785 1.1 skrll timer_modified = false;
786 1.1 skrll break;
787 1.1 skrll
788 1.1 skrll case WORK_DELAYED:
789 1.1 skrll /*
790 1.1 skrll * Timer is already ticking. Reschedule it.
791 1.1 skrll */
792 1.1 skrll callout_schedule(&dw->dw_callout, ticks);
793 1.1 skrll timer_modified = true;
794 1.1 skrll break;
795 1.1 skrll
796 1.1 skrll case WORK_PENDING:
797 1.1 skrll KASSERT(dw->work.w_wq == wq);
798 1.1 skrll timer_modified = false;
799 1.1 skrll break;
800 1.1 skrll
801 1.1 skrll case WORK_CANCELLED:
802 1.1 skrll case WORK_DELAYED_CANCELLED:
803 1.1 skrll /* XXX Wait for cancellation and then queue? */
804 1.1 skrll timer_modified = false;
805 1.1 skrll break;
806 1.1 skrll
807 1.1 skrll default:
808 1.1 skrll panic("delayed work %p in bad state: %d", dw,
809 1.1 skrll (int)dw->work.w_state);
810 1.1 skrll break;
811 1.1 skrll }
812 1.1 skrll linux_work_unlock(&dw->work);
813 1.1 skrll
814 1.1 skrll return timer_modified;
815 1.1 skrll }
816 1.1 skrll
817 1.1 skrll bool
818 1.1 skrll cancel_delayed_work(struct delayed_work *dw)
819 1.1 skrll {
820 1.1 skrll bool cancelled_p = false;
821 1.1 skrll
822 1.1 skrll linux_work_lock(&dw->work);
823 1.1 skrll switch (dw->work.w_state) {
824 1.1 skrll case WORK_IDLE: /* Nothing to do. */
825 1.1 skrll break;
826 1.1 skrll
827 1.1 skrll case WORK_DELAYED:
828 1.1 skrll dw->work.w_state = WORK_DELAYED_CANCELLED;
829 1.1 skrll linux_cancel_delayed_work_callout(dw, false);
830 1.1 skrll cancelled_p = true;
831 1.1 skrll break;
832 1.1 skrll
833 1.1 skrll case WORK_PENDING:
834 1.1 skrll dw->work.w_state = WORK_CANCELLED;
835 1.1 skrll cancelled_p = true;
836 1.1 skrll break;
837 1.1 skrll
838 1.1 skrll case WORK_INVOKED: /* Don't wait! */
839 1.1 skrll break;
840 1.1 skrll
841 1.1 skrll case WORK_CANCELLED: /* Already done. */
842 1.1 skrll case WORK_DELAYED_CANCELLED:
843 1.1 skrll break;
844 1.1 skrll
845 1.1 skrll default:
846 1.1 skrll panic("delayed work %p in bad state: %d", dw,
847 1.1 skrll (int)dw->work.w_state);
848 1.1 skrll break;
849 1.1 skrll }
850 1.1 skrll linux_work_unlock(&dw->work);
851 1.1 skrll
852 1.1 skrll return cancelled_p;
853 1.1 skrll }
854 1.1 skrll
855 1.1 skrll bool
856 1.1 skrll cancel_delayed_work_sync(struct delayed_work *dw)
857 1.1 skrll {
858 1.1 skrll bool cancelled_p = false;
859 1.1 skrll
860 1.1 skrll linux_work_lock(&dw->work);
861 1.1 skrll switch (dw->work.w_state) {
862 1.1 skrll case WORK_IDLE: /* Nothing to do. */
863 1.1 skrll break;
864 1.1 skrll
865 1.1 skrll case WORK_DELAYED:
866 1.1 skrll dw->work.w_state = WORK_DELAYED_CANCELLED;
867 1.1 skrll linux_cancel_delayed_work_callout(dw, true);
868 1.1 skrll cancelled_p = true;
869 1.1 skrll break;
870 1.1 skrll
871 1.1 skrll case WORK_PENDING:
872 1.1 skrll dw->work.w_state = WORK_CANCELLED;
873 1.1 skrll linux_wait_for_cancelled_work(&dw->work);
874 1.1 skrll cancelled_p = true;
875 1.1 skrll break;
876 1.1 skrll
877 1.1 skrll case WORK_INVOKED:
878 1.1 skrll linux_wait_for_invoked_work(&dw->work);
879 1.1 skrll break;
880 1.1 skrll
881 1.1 skrll case WORK_CANCELLED: /* Already done. */
882 1.1 skrll break;
883 1.1 skrll
884 1.1 skrll case WORK_DELAYED_CANCELLED:
885 1.1 skrll linux_wait_for_delayed_cancelled_work(dw);
886 1.1 skrll break;
887 1.1 skrll
888 1.1 skrll default:
889 1.1 skrll panic("delayed work %p in bad state: %d", dw,
890 1.1 skrll (int)dw->work.w_state);
891 1.1 skrll break;
892 1.5 riastrad }
893 1.5 riastrad linux_work_unlock(&dw->work);
894 1.5 riastrad
895 1.5 riastrad return cancelled_p;
896 1.5 riastrad }
897 1.5 riastrad
898 1.5 riastrad void
899 1.5 riastrad flush_delayed_work(struct delayed_work *dw)
900 1.5 riastrad {
901 1.1 skrll struct workqueue_struct *wq = dw->work.w_wq;
902 1.1 skrll
903 1.1 skrll if (wq != NULL)
904 1.1 skrll flush_workqueue(wq);
905 1.1 skrll }
906 1.1 skrll
907 1.1 skrll static void
908 1.1 skrll linux_cancel_delayed_work_callout(struct delayed_work *dw, bool wait)
909 1.1 skrll {
910 1.1 skrll bool fired_p;
911 1.1 skrll
912 1.1 skrll KASSERT(linux_work_locked(&dw->work));
913 1.1 skrll KASSERT(dw->work.w_state == WORK_DELAYED_CANCELLED);
914 1.1 skrll
915 1.1 skrll if (wait) {
916 1.1 skrll /*
917 1.1 skrll * We unlock, halt, and then relock, rather than
918 1.1 skrll * passing an interlock to callout_halt, for two
919 1.1 skrll * reasons:
920 1.1 skrll *
921 1.1 skrll * (1) The work lock is not a mutex(9), so we can't use it.
922 1.1 skrll * (2) The WORK_DELAYED_CANCELLED state serves as an interlock.
923 1.1 skrll */
924 1.1 skrll linux_work_unlock(&dw->work);
925 1.1 skrll fired_p = callout_halt(&dw->dw_callout, NULL);
926 1.1 skrll linux_work_lock(&dw->work);
927 1.1 skrll } else {
928 1.1 skrll fired_p = callout_stop(&dw->dw_callout);
929 1.1 skrll }
930 1.1 skrll
931 1.1 skrll /*
932 1.1 skrll * fired_p means we didn't cancel the callout, so it must have
933 1.1 skrll * already begun and will clean up after itself.
934 1.1 skrll *
935 1.1 skrll * !fired_p means we cancelled it so we have to clean up after
936 1.1 skrll * it. Nobody else should have changed the state in that case.
937 1.1 skrll */
938 1.1 skrll if (!fired_p) {
939 1.1 skrll struct workqueue_struct *wq;
940 1.1 skrll
941 1.1 skrll KASSERT(linux_work_locked(&dw->work));
942 1.1 skrll KASSERT(dw->work.w_state == WORK_DELAYED_CANCELLED);
943 1.1 skrll
944 1.1 skrll wq = dw->work.w_wq;
945 1.1 skrll mutex_enter(&wq->wq_lock);
946 1.1 skrll TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
947 1.1 skrll callout_destroy(&dw->dw_callout);
948 1.1 skrll dw->work.w_state = WORK_IDLE;
949 1.1 skrll dw->work.w_wq = NULL;
950 1.1 skrll cv_broadcast(&wq->wq_cv);
951 1.1 skrll mutex_exit(&wq->wq_lock);
952 1.1 skrll }
953 1.1 skrll }
954 1.1 skrll
955 1.1 skrll static void
956 1.1 skrll linux_wait_for_delayed_cancelled_work(struct delayed_work *dw)
957 1.1 skrll {
958 1.1 skrll struct workqueue_struct *wq;
959 1.1 skrll
960 1.1 skrll KASSERT(linux_work_locked(&dw->work));
961 1.1 skrll KASSERT(dw->work.w_state == WORK_DELAYED_CANCELLED);
962 1.1 skrll
963 1.1 skrll wq = dw->work.w_wq;
964 1.1 skrll do {
965 1.1 skrll mutex_enter(&wq->wq_lock);
966 1.1 skrll linux_work_unlock(&dw->work);
967 1.1 skrll cv_wait(&wq->wq_cv, &wq->wq_lock);
968 1.1 skrll mutex_exit(&wq->wq_lock);
969 1.1 skrll linux_work_lock(&dw->work);
970 1.1 skrll } while ((dw->work.w_state == WORK_DELAYED_CANCELLED) &&
971 1.1 skrll (dw->work.w_wq == wq));
972 1.1 skrll }
973 1.1 skrll
974 1.1 skrll static void
975 1.1 skrll linux_worker_intr(void *arg)
976 1.1 skrll {
977 1.1 skrll struct delayed_work *dw = arg;
978 1.1 skrll struct workqueue_struct *wq;
979 1.1 skrll
980 1.1 skrll linux_work_lock(&dw->work);
981 1.1 skrll
982 1.1 skrll KASSERT((dw->work.w_state == WORK_DELAYED) ||
983 1.1 skrll (dw->work.w_state == WORK_DELAYED_CANCELLED));
984 1.1 skrll
985 1.1 skrll wq = dw->work.w_wq;
986 1.1 skrll mutex_enter(&wq->wq_lock);
987 1.1 skrll
988 1.1 skrll /* Queue the work, or return it to idle and alert any cancellers. */
989 1.1 skrll if (__predict_true(dw->work.w_state == WORK_DELAYED)) {
990 1.1 skrll dw->work.w_state = WORK_PENDING;
991 1.1 skrll workqueue_enqueue(dw->work.w_wq->wq_workqueue, &dw->work.w_wk,
992 1.1 skrll NULL);
993 1.1 skrll } else {
994 1.1 skrll KASSERT(dw->work.w_state == WORK_DELAYED_CANCELLED);
995 1.1 skrll dw->work.w_state = WORK_IDLE;
996 1.1 skrll dw->work.w_wq = NULL;
997 1.1 skrll cv_broadcast(&wq->wq_cv);
998 1.1 skrll }
999 1.1 skrll
1000 1.1 skrll /* Either way, the callout is done. */
1001 TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
1002 callout_destroy(&dw->dw_callout);
1003
1004 mutex_exit(&wq->wq_lock);
1005 linux_work_unlock(&dw->work);
1006 }
1007