linux_work.c revision 1.18 1 /* $NetBSD: linux_work.c,v 1.18 2018/08/27 14:59:58 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2018 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_work.c,v 1.18 2018/08/27 14:59:58 riastradh Exp $");
34
35 #include <sys/types.h>
36 #include <sys/atomic.h>
37 #include <sys/callout.h>
38 #include <sys/condvar.h>
39 #include <sys/errno.h>
40 #include <sys/kmem.h>
41 #include <sys/kthread.h>
42 #include <sys/lwp.h>
43 #include <sys/mutex.h>
44 #include <sys/queue.h>
45
46 #include <linux/workqueue.h>
47
48 struct workqueue_struct {
49 kmutex_t wq_lock;
50 kcondvar_t wq_cv;
51 TAILQ_HEAD(, delayed_work) wq_delayed;
52 TAILQ_HEAD(, work_struct) wq_queue;
53 struct work_struct *wq_current_work;
54 int wq_flags;
55 struct lwp *wq_lwp;
56 uint64_t wq_gen;
57 bool wq_requeued:1;
58 bool wq_dying:1;
59 };
60
61 static void __dead linux_workqueue_thread(void *);
62 static void linux_workqueue_timeout(void *);
63 static struct workqueue_struct *
64 acquire_work(struct work_struct *,
65 struct workqueue_struct *);
66 static void release_work(struct work_struct *,
67 struct workqueue_struct *);
68 static void queue_delayed_work_anew(struct workqueue_struct *,
69 struct delayed_work *, unsigned long);
70
71 static specificdata_key_t workqueue_key __read_mostly;
72
73 struct workqueue_struct *system_wq __read_mostly;
74 struct workqueue_struct *system_long_wq __read_mostly;
75 struct workqueue_struct *system_power_efficient_wq __read_mostly;
76
77 int
78 linux_workqueue_init(void)
79 {
80 int error;
81
82 error = lwp_specific_key_create(&workqueue_key, NULL);
83 if (error)
84 goto fail0;
85
86 system_wq = alloc_ordered_workqueue("lnxsyswq", 0);
87 if (system_wq == NULL) {
88 error = ENOMEM;
89 goto fail1;
90 }
91
92 system_long_wq = alloc_ordered_workqueue("lnxlngwq", 0);
93 if (system_long_wq == NULL) {
94 error = ENOMEM;
95 goto fail2;
96 }
97
98 system_power_efficient_wq = alloc_ordered_workqueue("lnxpwrwq", 0);
99 if (system_long_wq == NULL) {
100 error = ENOMEM;
101 goto fail3;
102 }
103
104 return 0;
105
106 fail4: __unused
107 destroy_workqueue(system_power_efficient_wq);
108 fail3: destroy_workqueue(system_long_wq);
109 fail2: destroy_workqueue(system_wq);
110 fail1: lwp_specific_key_delete(workqueue_key);
111 fail0: KASSERT(error);
112 return error;
113 }
114
115 void
116 linux_workqueue_fini(void)
117 {
118
119 destroy_workqueue(system_power_efficient_wq);
120 destroy_workqueue(system_long_wq);
121 destroy_workqueue(system_wq);
122 lwp_specific_key_delete(workqueue_key);
123 }
124
125 /*
127 * Workqueues
128 */
129
130 struct workqueue_struct *
131 alloc_ordered_workqueue(const char *name, int flags)
132 {
133 struct workqueue_struct *wq;
134 int error;
135
136 KASSERT(flags == 0);
137
138 wq = kmem_alloc(sizeof(*wq), KM_SLEEP);
139
140 mutex_init(&wq->wq_lock, MUTEX_DEFAULT, IPL_NONE);
141 cv_init(&wq->wq_cv, name);
142 TAILQ_INIT(&wq->wq_delayed);
143 TAILQ_INIT(&wq->wq_queue);
144 wq->wq_current_work = NULL;
145
146 error = kthread_create(PRI_NONE,
147 KTHREAD_MPSAFE|KTHREAD_TS|KTHREAD_MUSTJOIN, NULL,
148 &linux_workqueue_thread, wq, &wq->wq_lwp, "%s", name);
149 if (error)
150 goto fail0;
151
152 return wq;
153
154 fail0: KASSERT(TAILQ_EMPTY(&wq->wq_queue));
155 KASSERT(TAILQ_EMPTY(&wq->wq_delayed));
156 cv_destroy(&wq->wq_cv);
157 mutex_destroy(&wq->wq_lock);
158 kmem_free(wq, sizeof(*wq));
159 return NULL;
160 }
161
162 void
163 destroy_workqueue(struct workqueue_struct *wq)
164 {
165
166 /*
167 * Cancel all delayed work. We do this first because any
168 * delayed work that that has already timed out, which we can't
169 * cancel, may have queued new work.
170 */
171 for (;;) {
172 struct delayed_work *dw = NULL;
173
174 mutex_enter(&wq->wq_lock);
175 if (!TAILQ_EMPTY(&wq->wq_delayed)) {
176 dw = TAILQ_FIRST(&wq->wq_delayed);
177 if (!callout_halt(&dw->dw_callout, &wq->wq_lock))
178 TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
179 }
180 mutex_exit(&wq->wq_lock);
181
182 if (dw == NULL)
183 break;
184 cancel_delayed_work_sync(dw);
185 }
186
187 /* Tell the thread to exit. */
188 mutex_enter(&wq->wq_lock);
189 wq->wq_dying = true;
190 cv_broadcast(&wq->wq_cv);
191 mutex_exit(&wq->wq_lock);
192
193 /* Wait for it to exit. */
194 (void)kthread_join(wq->wq_lwp);
195
196 KASSERT(wq->wq_current_work == NULL);
197 KASSERT(TAILQ_EMPTY(&wq->wq_queue));
198 KASSERT(TAILQ_EMPTY(&wq->wq_delayed));
199 cv_destroy(&wq->wq_cv);
200 mutex_destroy(&wq->wq_lock);
201
202 kmem_free(wq, sizeof(*wq));
203 }
204
205 /*
207 * Work thread and callout
208 */
209
210 static void __dead
211 linux_workqueue_thread(void *cookie)
212 {
213 struct workqueue_struct *const wq = cookie;
214 TAILQ_HEAD(, work_struct) tmp;
215
216 lwp_setspecific(workqueue_key, wq);
217
218 mutex_enter(&wq->wq_lock);
219 for (;;) {
220 /* Wait until there's activity. If we're dying, stop. */
221 while (TAILQ_EMPTY(&wq->wq_queue) && !wq->wq_dying)
222 cv_wait(&wq->wq_cv, &wq->wq_lock);
223 if (wq->wq_dying)
224 break;
225
226 /* Grab a batch of work off the queue. */
227 KASSERT(!TAILQ_EMPTY(&wq->wq_queue));
228 TAILQ_INIT(&tmp);
229 TAILQ_CONCAT(&tmp, &wq->wq_queue, work_entry);
230
231 /* Process each work item in the batch. */
232 while (!TAILQ_EMPTY(&tmp)) {
233 struct work_struct *const work = TAILQ_FIRST(&tmp);
234
235 KASSERT(work->work_queue == wq);
236 TAILQ_REMOVE(&tmp, work, work_entry);
237 KASSERT(wq->wq_current_work == NULL);
238 wq->wq_current_work = work;
239
240 mutex_exit(&wq->wq_lock);
241 (*work->func)(work);
242 mutex_enter(&wq->wq_lock);
243
244 KASSERT(wq->wq_current_work == work);
245 KASSERT(work->work_queue == wq);
246 if (wq->wq_requeued)
247 wq->wq_requeued = false;
248 else
249 release_work(work, wq);
250 wq->wq_current_work = NULL;
251 cv_broadcast(&wq->wq_cv);
252 }
253
254 /* Notify flush that we've completed a batch of work. */
255 wq->wq_gen++;
256 cv_broadcast(&wq->wq_cv);
257 }
258 mutex_exit(&wq->wq_lock);
259
260 kthread_exit(0);
261 }
262
263 static void
264 linux_workqueue_timeout(void *cookie)
265 {
266 struct delayed_work *const dw = cookie;
267 struct workqueue_struct *const wq = dw->work.work_queue;
268
269 KASSERT(wq != NULL);
270
271 mutex_enter(&wq->wq_lock);
272 KASSERT(dw->work.work_queue == wq);
273 switch (dw->dw_state) {
274 case DELAYED_WORK_IDLE:
275 panic("delayed work callout uninitialized: %p", dw);
276 case DELAYED_WORK_SCHEDULED:
277 dw->dw_state = DELAYED_WORK_IDLE;
278 callout_destroy(&dw->dw_callout);
279 TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
280 TAILQ_INSERT_TAIL(&wq->wq_queue, &dw->work, work_entry);
281 cv_broadcast(&wq->wq_cv);
282 break;
283 case DELAYED_WORK_RESCHEDULED:
284 dw->dw_state = DELAYED_WORK_SCHEDULED;
285 break;
286 case DELAYED_WORK_CANCELLED:
287 dw->dw_state = DELAYED_WORK_IDLE;
288 callout_destroy(&dw->dw_callout);
289 TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
290 release_work(&dw->work, wq);
291 break;
292 default:
293 panic("delayed work callout in bad state: %p", dw);
294 }
295 KASSERT(dw->dw_state == DELAYED_WORK_IDLE ||
296 dw->dw_state == DELAYED_WORK_SCHEDULED);
297 mutex_exit(&wq->wq_lock);
298 }
299
300 struct work_struct *
301 current_work(void)
302 {
303 struct workqueue_struct *wq = lwp_getspecific(workqueue_key);
304
305 /* If we're not a workqueue thread, then there's no work. */
306 if (wq == NULL)
307 return NULL;
308
309 /*
310 * Otherwise, this should be possible only while work is in
311 * progress. Return the current work item.
312 */
313 KASSERT(wq->wq_current_work != NULL);
314 return wq->wq_current_work;
315 }
316
317 /*
319 * Work
320 */
321
322 void
323 INIT_WORK(struct work_struct *work, void (*fn)(struct work_struct *))
324 {
325
326 work->work_queue = NULL;
327 work->func = fn;
328 }
329
330 static struct workqueue_struct *
331 acquire_work(struct work_struct *work, struct workqueue_struct *wq)
332 {
333 struct workqueue_struct *wq0;
334
335 KASSERT(mutex_owned(&wq->wq_lock));
336
337 wq0 = atomic_cas_ptr(&work->work_queue, NULL, wq);
338 if (wq0 == NULL) {
339 membar_enter();
340 KASSERT(work->work_queue == wq);
341 }
342
343 return wq0;
344 }
345
346 static void
347 release_work(struct work_struct *work, struct workqueue_struct *wq)
348 {
349
350 KASSERT(work->work_queue == wq);
351 KASSERT(mutex_owned(&wq->wq_lock));
352
353 membar_exit();
354 work->work_queue = NULL;
355 }
356
357 bool
358 schedule_work(struct work_struct *work)
359 {
360
361 return queue_work(system_wq, work);
362 }
363
364 bool
365 queue_work(struct workqueue_struct *wq, struct work_struct *work)
366 {
367 struct workqueue_struct *wq0;
368 bool newly_queued;
369
370 KASSERT(wq != NULL);
371
372 mutex_enter(&wq->wq_lock);
373 if (__predict_true((wq0 = acquire_work(work, wq)) == NULL)) {
374 TAILQ_INSERT_TAIL(&wq->wq_queue, work, work_entry);
375 newly_queued = true;
376 } else {
377 KASSERT(wq0 == wq);
378 newly_queued = false;
379 }
380 mutex_exit(&wq->wq_lock);
381
382 return newly_queued;
383 }
384
385 bool
386 cancel_work(struct work_struct *work)
387 {
388 struct workqueue_struct *wq;
389 bool cancelled_p = false;
390
391 /* If there's no workqueue, nothing to cancel. */
392 if ((wq = work->work_queue) == NULL)
393 goto out;
394
395 mutex_enter(&wq->wq_lock);
396 if (__predict_false(work->work_queue != wq)) {
397 cancelled_p = false;
398 } else if (wq->wq_current_work == work) {
399 cancelled_p = false;
400 } else {
401 TAILQ_REMOVE(&wq->wq_queue, work, work_entry);
402 cancelled_p = true;
403 }
404 mutex_exit(&wq->wq_lock);
405
406 out: return cancelled_p;
407 }
408
409 bool
410 cancel_work_sync(struct work_struct *work)
411 {
412 struct workqueue_struct *wq;
413 bool cancelled_p = false;
414
415 /* If there's no workqueue, nothing to cancel. */
416 if ((wq = work->work_queue) == NULL)
417 goto out;
418
419 mutex_enter(&wq->wq_lock);
420 if (__predict_false(work->work_queue != wq)) {
421 cancelled_p = false;
422 } else if (wq->wq_current_work == work) {
423 do {
424 cv_wait(&wq->wq_cv, &wq->wq_lock);
425 } while (wq->wq_current_work == work);
426 cancelled_p = false;
427 } else {
428 TAILQ_REMOVE(&wq->wq_queue, work, work_entry);
429 cancelled_p = true;
430 }
431 mutex_exit(&wq->wq_lock);
432
433 out: return cancelled_p;
434 }
435
436 /*
438 * Delayed work
439 */
440
441 void
442 INIT_DELAYED_WORK(struct delayed_work *dw, void (*fn)(struct work_struct *))
443 {
444
445 INIT_WORK(&dw->work, fn);
446 dw->dw_state = DELAYED_WORK_IDLE;
447
448 /*
449 * Defer callout_init until we are going to schedule the
450 * callout, which can then callout_destroy it, because
451 * otherwise since there's no DESTROY_DELAYED_WORK or anything
452 * we have no opportunity to call callout_destroy.
453 */
454 }
455
456 bool
457 schedule_delayed_work(struct delayed_work *dw, unsigned long ticks)
458 {
459
460 return queue_delayed_work(system_wq, dw, ticks);
461 }
462
463 static void
464 queue_delayed_work_anew(struct workqueue_struct *wq, struct delayed_work *dw,
465 unsigned long ticks)
466 {
467
468 KASSERT(mutex_owned(&wq->wq_lock));
469 KASSERT(dw->work.work_queue == wq);
470 KASSERT((dw->dw_state == DELAYED_WORK_IDLE) ||
471 (dw->dw_state == DELAYED_WORK_SCHEDULED));
472
473 if (ticks == 0) {
474 if (dw->dw_state == DELAYED_WORK_SCHEDULED) {
475 callout_destroy(&dw->dw_callout);
476 TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
477 } else {
478 KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
479 }
480 TAILQ_INSERT_TAIL(&wq->wq_queue, &dw->work, work_entry);
481 dw->dw_state = DELAYED_WORK_IDLE;
482 } else {
483 if (dw->dw_state == DELAYED_WORK_IDLE) {
484 callout_init(&dw->dw_callout, CALLOUT_MPSAFE);
485 callout_reset(&dw->dw_callout, MIN(INT_MAX, ticks),
486 &linux_workqueue_timeout, dw);
487 TAILQ_INSERT_HEAD(&wq->wq_delayed, dw, dw_entry);
488 } else {
489 KASSERT(dw->dw_state == DELAYED_WORK_SCHEDULED);
490 }
491 dw->dw_state = DELAYED_WORK_SCHEDULED;
492 }
493 }
494
495 bool
496 queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw,
497 unsigned long ticks)
498 {
499 struct workqueue_struct *wq0;
500 bool newly_queued;
501
502 mutex_enter(&wq->wq_lock);
503 if (__predict_true((wq0 = acquire_work(&dw->work, wq)) == NULL)) {
504 KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
505 queue_delayed_work_anew(wq, dw, ticks);
506 newly_queued = true;
507 } else {
508 KASSERT(wq0 == wq);
509 newly_queued = false;
510 }
511 mutex_exit(&wq->wq_lock);
512
513 return newly_queued;
514 }
515
516 bool
517 mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw,
518 unsigned long ticks)
519 {
520 struct workqueue_struct *wq0;
521 bool timer_modified;
522
523 mutex_enter(&wq->wq_lock);
524 if ((wq0 = acquire_work(&dw->work, wq)) == NULL) {
525 KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
526 queue_delayed_work_anew(wq, dw, ticks);
527 timer_modified = false;
528 } else {
529 KASSERT(wq0 == wq);
530 switch (dw->dw_state) {
531 case DELAYED_WORK_IDLE:
532 if (wq->wq_current_work != &dw->work) {
533 /* Work is queued, but hasn't started yet. */
534 TAILQ_REMOVE(&wq->wq_queue, &dw->work,
535 work_entry);
536 queue_delayed_work_anew(wq, dw, ticks);
537 timer_modified = true;
538 } else {
539 /*
540 * Too late. Queue it anew. If that
541 * would skip the callout because it's
542 * immediate, notify the workqueue.
543 */
544 wq->wq_requeued = ticks == 0;
545 queue_delayed_work_anew(wq, dw, ticks);
546 timer_modified = false;
547 }
548 break;
549 case DELAYED_WORK_SCHEDULED:
550 if (callout_stop(&dw->dw_callout)) {
551 /*
552 * Too late to stop, but we got in
553 * before the callout acquired the
554 * lock. Reschedule it and tell it
555 * we've done so.
556 */
557 dw->dw_state = DELAYED_WORK_RESCHEDULED;
558 callout_schedule(&dw->dw_callout,
559 MIN(INT_MAX, ticks));
560 } else {
561 /* Stopped it. Queue it anew. */
562 queue_delayed_work_anew(wq, dw, ticks);
563 }
564 timer_modified = true;
565 break;
566 case DELAYED_WORK_RESCHEDULED:
567 case DELAYED_WORK_CANCELLED:
568 /*
569 * Someone modified the timer _again_, or
570 * cancelled it, after the callout started but
571 * before the poor thing even had a chance to
572 * acquire the lock. Just reschedule it once
573 * more.
574 */
575 callout_schedule(&dw->dw_callout, MIN(INT_MAX, ticks));
576 dw->dw_state = DELAYED_WORK_RESCHEDULED;
577 timer_modified = true;
578 break;
579 default:
580 panic("invalid delayed work state: %d",
581 dw->dw_state);
582 }
583 }
584 mutex_exit(&wq->wq_lock);
585
586 return timer_modified;
587 }
588
589 bool
590 cancel_delayed_work(struct delayed_work *dw)
591 {
592 struct workqueue_struct *wq;
593 bool cancelled_p;
594
595 /* If there's no workqueue, nothing to cancel. */
596 if ((wq = dw->work.work_queue) == NULL)
597 return false;
598
599 mutex_enter(&wq->wq_lock);
600 if (__predict_false(dw->work.work_queue != wq)) {
601 cancelled_p = false;
602 } else {
603 switch (dw->dw_state) {
604 case DELAYED_WORK_IDLE:
605 if (wq->wq_current_work == &dw->work) {
606 /* Too late, it's already running. */
607 cancelled_p = false;
608 } else {
609 /* Got in before it started. Remove it. */
610 TAILQ_REMOVE(&wq->wq_queue, &dw->work,
611 work_entry);
612 cancelled_p = true;
613 }
614 break;
615 case DELAYED_WORK_SCHEDULED:
616 case DELAYED_WORK_RESCHEDULED:
617 case DELAYED_WORK_CANCELLED:
618 if (callout_stop(&dw->dw_callout)) {
619 /*
620 * Too late to stop, but we got in
621 * before the callout acquired the
622 * lock. Tell it to give up.
623 */
624 dw->dw_state = DELAYED_WORK_CANCELLED;
625 } else {
626 /* Stopped it. Kill it. */
627 callout_destroy(&dw->dw_callout);
628 TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
629 dw->dw_state = DELAYED_WORK_IDLE;
630 }
631 cancelled_p = true;
632 break;
633 default:
634 panic("invalid delayed work state: %d",
635 dw->dw_state);
636 }
637 }
638 mutex_exit(&wq->wq_lock);
639
640 return cancelled_p;
641 }
642
643 bool
644 cancel_delayed_work_sync(struct delayed_work *dw)
645 {
646 struct workqueue_struct *wq;
647 bool cancelled_p;
648
649 /* If there's no workqueue, nothing to cancel. */
650 if ((wq = dw->work.work_queue) == NULL)
651 return false;
652
653 mutex_enter(&wq->wq_lock);
654 if (__predict_false(dw->work.work_queue != wq)) {
655 cancelled_p = false;
656 } else {
657 retry: switch (dw->dw_state) {
658 case DELAYED_WORK_IDLE:
659 if (wq->wq_current_work == &dw->work) {
660 /* Too late, it's already running. Wait. */
661 do {
662 cv_wait(&wq->wq_cv, &wq->wq_lock);
663 } while (wq->wq_current_work == &dw->work);
664 cancelled_p = false;
665 } else {
666 /* Got in before it started. Remove it. */
667 TAILQ_REMOVE(&wq->wq_queue, &dw->work,
668 work_entry);
669 cancelled_p = true;
670 }
671 break;
672 case DELAYED_WORK_SCHEDULED:
673 case DELAYED_WORK_RESCHEDULED:
674 case DELAYED_WORK_CANCELLED:
675 /*
676 * If it has started, tell it to stop, and wait
677 * for it to complete. We drop the lock, so by
678 * the time the callout has completed, we must
679 * review the state again.
680 */
681 dw->dw_state = DELAYED_WORK_CANCELLED;
682 callout_halt(&dw->dw_callout, &wq->wq_lock);
683 goto retry;
684 default:
685 panic("invalid delayed work state: %d",
686 dw->dw_state);
687 }
688 }
689 mutex_exit(&wq->wq_lock);
690
691 return cancelled_p;
692 }
693
694 /*
696 * Flush
697 */
698
699 void
700 flush_scheduled_work(void)
701 {
702
703 flush_workqueue(system_wq);
704 }
705
706 void
707 flush_workqueue(struct workqueue_struct *wq)
708 {
709 uint64_t gen;
710
711 mutex_enter(&wq->wq_lock);
712 gen = wq->wq_gen;
713 do {
714 cv_wait(&wq->wq_cv, &wq->wq_lock);
715 } while (gen == wq->wq_gen);
716 mutex_exit(&wq->wq_lock);
717 }
718
719 bool
720 flush_work(struct work_struct *work)
721 {
722 struct workqueue_struct *wq;
723
724 /* If there's no workqueue, nothing to flush. */
725 if ((wq = work->work_queue) == NULL)
726 return false;
727
728 flush_workqueue(wq);
729 return true;
730 }
731
732 bool
733 flush_delayed_work(struct delayed_work *dw)
734 {
735 struct workqueue_struct *wq;
736 bool do_flush = false;
737
738 /* If there's no workqueue, nothing to flush. */
739 if ((wq = dw->work.work_queue) == NULL)
740 return false;
741
742 mutex_enter(&wq->wq_lock);
743 if (__predict_false(dw->work.work_queue != wq)) {
744 do_flush = true;
745 } else {
746 retry: switch (dw->dw_state) {
747 case DELAYED_WORK_IDLE:
748 if (wq->wq_current_work != &dw->work) {
749 TAILQ_REMOVE(&wq->wq_queue, &dw->work,
750 work_entry);
751 } else {
752 do_flush = true;
753 }
754 break;
755 case DELAYED_WORK_SCHEDULED:
756 case DELAYED_WORK_RESCHEDULED:
757 case DELAYED_WORK_CANCELLED:
758 dw->dw_state = DELAYED_WORK_CANCELLED;
759 callout_halt(&dw->dw_callout, &wq->wq_lock);
760 goto retry;
761 default:
762 panic("invalid delayed work state: %d",
763 dw->dw_state);
764 }
765 }
766 mutex_exit(&wq->wq_lock);
767
768 if (do_flush)
769 flush_workqueue(wq);
770
771 return true;
772 }
773