linux_work.c revision 1.22 1 /* $NetBSD: linux_work.c,v 1.22 2018/08/27 15:01:47 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2018 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_work.c,v 1.22 2018/08/27 15:01:47 riastradh Exp $");
34
35 #include <sys/types.h>
36 #include <sys/atomic.h>
37 #include <sys/callout.h>
38 #include <sys/condvar.h>
39 #include <sys/errno.h>
40 #include <sys/kmem.h>
41 #include <sys/kthread.h>
42 #include <sys/lwp.h>
43 #include <sys/mutex.h>
44 #include <sys/queue.h>
45
46 #include <linux/workqueue.h>
47
48 struct workqueue_struct {
49 kmutex_t wq_lock;
50 kcondvar_t wq_cv;
51 TAILQ_HEAD(, delayed_work) wq_delayed;
52 TAILQ_HEAD(, work_struct) wq_queue;
53 struct work_struct *wq_current_work;
54 int wq_flags;
55 struct lwp *wq_lwp;
56 uint64_t wq_gen;
57 bool wq_requeued:1;
58 bool wq_dying:1;
59 };
60
61 static void __dead linux_workqueue_thread(void *);
62 static void linux_workqueue_timeout(void *);
63 static struct workqueue_struct *
64 acquire_work(struct work_struct *,
65 struct workqueue_struct *);
66 static void release_work(struct work_struct *,
67 struct workqueue_struct *);
68 static void queue_delayed_work_anew(struct workqueue_struct *,
69 struct delayed_work *, unsigned long);
70
71 static specificdata_key_t workqueue_key __read_mostly;
72
73 struct workqueue_struct *system_wq __read_mostly;
74 struct workqueue_struct *system_long_wq __read_mostly;
75 struct workqueue_struct *system_power_efficient_wq __read_mostly;
76
77 int
78 linux_workqueue_init(void)
79 {
80 int error;
81
82 error = lwp_specific_key_create(&workqueue_key, NULL);
83 if (error)
84 goto fail0;
85
86 system_wq = alloc_ordered_workqueue("lnxsyswq", 0);
87 if (system_wq == NULL) {
88 error = ENOMEM;
89 goto fail1;
90 }
91
92 system_long_wq = alloc_ordered_workqueue("lnxlngwq", 0);
93 if (system_long_wq == NULL) {
94 error = ENOMEM;
95 goto fail2;
96 }
97
98 system_power_efficient_wq = alloc_ordered_workqueue("lnxpwrwq", 0);
99 if (system_long_wq == NULL) {
100 error = ENOMEM;
101 goto fail3;
102 }
103
104 return 0;
105
106 fail4: __unused
107 destroy_workqueue(system_power_efficient_wq);
108 fail3: destroy_workqueue(system_long_wq);
109 fail2: destroy_workqueue(system_wq);
110 fail1: lwp_specific_key_delete(workqueue_key);
111 fail0: KASSERT(error);
112 return error;
113 }
114
115 void
116 linux_workqueue_fini(void)
117 {
118
119 destroy_workqueue(system_power_efficient_wq);
120 destroy_workqueue(system_long_wq);
121 destroy_workqueue(system_wq);
122 lwp_specific_key_delete(workqueue_key);
123 }
124
125 /*
127 * Workqueues
128 */
129
130 struct workqueue_struct *
131 alloc_ordered_workqueue(const char *name, int flags)
132 {
133 struct workqueue_struct *wq;
134 int error;
135
136 KASSERT(flags == 0);
137
138 wq = kmem_alloc(sizeof(*wq), KM_SLEEP);
139
140 mutex_init(&wq->wq_lock, MUTEX_DEFAULT, IPL_NONE);
141 cv_init(&wq->wq_cv, name);
142 TAILQ_INIT(&wq->wq_delayed);
143 TAILQ_INIT(&wq->wq_queue);
144 wq->wq_current_work = NULL;
145
146 error = kthread_create(PRI_NONE,
147 KTHREAD_MPSAFE|KTHREAD_TS|KTHREAD_MUSTJOIN, NULL,
148 &linux_workqueue_thread, wq, &wq->wq_lwp, "%s", name);
149 if (error)
150 goto fail0;
151
152 return wq;
153
154 fail0: KASSERT(TAILQ_EMPTY(&wq->wq_queue));
155 KASSERT(TAILQ_EMPTY(&wq->wq_delayed));
156 cv_destroy(&wq->wq_cv);
157 mutex_destroy(&wq->wq_lock);
158 kmem_free(wq, sizeof(*wq));
159 return NULL;
160 }
161
162 void
163 destroy_workqueue(struct workqueue_struct *wq)
164 {
165
166 /*
167 * Cancel all delayed work. We do this first because any
168 * delayed work that that has already timed out, which we can't
169 * cancel, may have queued new work.
170 */
171 for (;;) {
172 struct delayed_work *dw = NULL;
173
174 mutex_enter(&wq->wq_lock);
175 if (!TAILQ_EMPTY(&wq->wq_delayed)) {
176 dw = TAILQ_FIRST(&wq->wq_delayed);
177 if (!callout_halt(&dw->dw_callout, &wq->wq_lock))
178 TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
179 }
180 mutex_exit(&wq->wq_lock);
181
182 if (dw == NULL)
183 break;
184 cancel_delayed_work_sync(dw);
185 }
186
187 /* Tell the thread to exit. */
188 mutex_enter(&wq->wq_lock);
189 wq->wq_dying = true;
190 cv_broadcast(&wq->wq_cv);
191 mutex_exit(&wq->wq_lock);
192
193 /* Wait for it to exit. */
194 (void)kthread_join(wq->wq_lwp);
195
196 KASSERT(wq->wq_current_work == NULL);
197 KASSERT(TAILQ_EMPTY(&wq->wq_queue));
198 KASSERT(TAILQ_EMPTY(&wq->wq_delayed));
199 cv_destroy(&wq->wq_cv);
200 mutex_destroy(&wq->wq_lock);
201
202 kmem_free(wq, sizeof(*wq));
203 }
204
205 /*
207 * Work thread and callout
208 */
209
210 static void __dead
211 linux_workqueue_thread(void *cookie)
212 {
213 struct workqueue_struct *const wq = cookie;
214 TAILQ_HEAD(, work_struct) tmp;
215
216 lwp_setspecific(workqueue_key, wq);
217
218 mutex_enter(&wq->wq_lock);
219 for (;;) {
220 /* Wait until there's activity. If we're dying, stop. */
221 while (TAILQ_EMPTY(&wq->wq_queue) && !wq->wq_dying)
222 cv_wait(&wq->wq_cv, &wq->wq_lock);
223 if (wq->wq_dying)
224 break;
225
226 /* Grab a batch of work off the queue. */
227 KASSERT(!TAILQ_EMPTY(&wq->wq_queue));
228 TAILQ_INIT(&tmp);
229 TAILQ_CONCAT(&tmp, &wq->wq_queue, work_entry);
230
231 /* Process each work item in the batch. */
232 while (!TAILQ_EMPTY(&tmp)) {
233 struct work_struct *const work = TAILQ_FIRST(&tmp);
234
235 KASSERT(work->work_queue == wq);
236 TAILQ_REMOVE(&tmp, work, work_entry);
237 KASSERT(wq->wq_current_work == NULL);
238 wq->wq_current_work = work;
239
240 mutex_exit(&wq->wq_lock);
241 (*work->func)(work);
242 mutex_enter(&wq->wq_lock);
243
244 KASSERT(wq->wq_current_work == work);
245 KASSERT(work->work_queue == wq);
246 if (wq->wq_requeued)
247 wq->wq_requeued = false;
248 else
249 release_work(work, wq);
250 wq->wq_current_work = NULL;
251 cv_broadcast(&wq->wq_cv);
252 }
253
254 /* Notify flush that we've completed a batch of work. */
255 wq->wq_gen++;
256 cv_broadcast(&wq->wq_cv);
257 }
258 mutex_exit(&wq->wq_lock);
259
260 kthread_exit(0);
261 }
262
263 static void
264 linux_workqueue_timeout(void *cookie)
265 {
266 struct delayed_work *const dw = cookie;
267 struct workqueue_struct *const wq = dw->work.work_queue;
268
269 KASSERT(wq != NULL);
270
271 mutex_enter(&wq->wq_lock);
272 KASSERT(dw->work.work_queue == wq);
273 switch (dw->dw_state) {
274 case DELAYED_WORK_IDLE:
275 panic("delayed work callout uninitialized: %p", dw);
276 case DELAYED_WORK_SCHEDULED:
277 dw->dw_state = DELAYED_WORK_IDLE;
278 callout_destroy(&dw->dw_callout);
279 TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
280 TAILQ_INSERT_TAIL(&wq->wq_queue, &dw->work, work_entry);
281 cv_broadcast(&wq->wq_cv);
282 break;
283 case DELAYED_WORK_RESCHEDULED:
284 dw->dw_state = DELAYED_WORK_SCHEDULED;
285 break;
286 case DELAYED_WORK_CANCELLED:
287 dw->dw_state = DELAYED_WORK_IDLE;
288 callout_destroy(&dw->dw_callout);
289 TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
290 release_work(&dw->work, wq);
291 /* Can't touch dw any more. */
292 goto out;
293 default:
294 panic("delayed work callout in bad state: %p", dw);
295 }
296 KASSERT(dw->dw_state == DELAYED_WORK_IDLE ||
297 dw->dw_state == DELAYED_WORK_SCHEDULED);
298 out: mutex_exit(&wq->wq_lock);
299 }
300
301 struct work_struct *
302 current_work(void)
303 {
304 struct workqueue_struct *wq = lwp_getspecific(workqueue_key);
305
306 /* If we're not a workqueue thread, then there's no work. */
307 if (wq == NULL)
308 return NULL;
309
310 /*
311 * Otherwise, this should be possible only while work is in
312 * progress. Return the current work item.
313 */
314 KASSERT(wq->wq_current_work != NULL);
315 return wq->wq_current_work;
316 }
317
318 /*
320 * Work
321 */
322
323 void
324 INIT_WORK(struct work_struct *work, void (*fn)(struct work_struct *))
325 {
326
327 work->work_queue = NULL;
328 work->func = fn;
329 }
330
331 static struct workqueue_struct *
332 acquire_work(struct work_struct *work, struct workqueue_struct *wq)
333 {
334 struct workqueue_struct *wq0;
335
336 KASSERT(mutex_owned(&wq->wq_lock));
337
338 wq0 = atomic_cas_ptr(&work->work_queue, NULL, wq);
339 if (wq0 == NULL) {
340 membar_enter();
341 KASSERT(work->work_queue == wq);
342 }
343
344 return wq0;
345 }
346
347 static void
348 release_work(struct work_struct *work, struct workqueue_struct *wq)
349 {
350
351 KASSERT(work->work_queue == wq);
352 KASSERT(mutex_owned(&wq->wq_lock));
353
354 membar_exit();
355 work->work_queue = NULL;
356 }
357
358 bool
359 schedule_work(struct work_struct *work)
360 {
361
362 return queue_work(system_wq, work);
363 }
364
365 bool
366 queue_work(struct workqueue_struct *wq, struct work_struct *work)
367 {
368 struct workqueue_struct *wq0;
369 bool newly_queued;
370
371 KASSERT(wq != NULL);
372
373 mutex_enter(&wq->wq_lock);
374 if (__predict_true((wq0 = acquire_work(work, wq)) == NULL)) {
375 TAILQ_INSERT_TAIL(&wq->wq_queue, work, work_entry);
376 newly_queued = true;
377 } else {
378 KASSERT(wq0 == wq);
379 newly_queued = false;
380 }
381 mutex_exit(&wq->wq_lock);
382
383 return newly_queued;
384 }
385
386 bool
387 cancel_work(struct work_struct *work)
388 {
389 struct workqueue_struct *wq;
390 bool cancelled_p = false;
391
392 /* If there's no workqueue, nothing to cancel. */
393 if ((wq = work->work_queue) == NULL)
394 goto out;
395
396 mutex_enter(&wq->wq_lock);
397 if (__predict_false(work->work_queue != wq)) {
398 cancelled_p = false;
399 } else if (wq->wq_current_work == work) {
400 cancelled_p = false;
401 } else {
402 TAILQ_REMOVE(&wq->wq_queue, work, work_entry);
403 cancelled_p = true;
404 }
405 mutex_exit(&wq->wq_lock);
406
407 out: return cancelled_p;
408 }
409
410 bool
411 cancel_work_sync(struct work_struct *work)
412 {
413 struct workqueue_struct *wq;
414 bool cancelled_p = false;
415
416 /* If there's no workqueue, nothing to cancel. */
417 if ((wq = work->work_queue) == NULL)
418 goto out;
419
420 mutex_enter(&wq->wq_lock);
421 if (__predict_false(work->work_queue != wq)) {
422 cancelled_p = false;
423 } else if (wq->wq_current_work == work) {
424 do {
425 cv_wait(&wq->wq_cv, &wq->wq_lock);
426 } while (wq->wq_current_work == work);
427 cancelled_p = false;
428 } else {
429 TAILQ_REMOVE(&wq->wq_queue, work, work_entry);
430 cancelled_p = true;
431 }
432 mutex_exit(&wq->wq_lock);
433
434 out: return cancelled_p;
435 }
436
437 /*
439 * Delayed work
440 */
441
442 void
443 INIT_DELAYED_WORK(struct delayed_work *dw, void (*fn)(struct work_struct *))
444 {
445
446 INIT_WORK(&dw->work, fn);
447 dw->dw_state = DELAYED_WORK_IDLE;
448
449 /*
450 * Defer callout_init until we are going to schedule the
451 * callout, which can then callout_destroy it, because
452 * otherwise since there's no DESTROY_DELAYED_WORK or anything
453 * we have no opportunity to call callout_destroy.
454 */
455 }
456
457 bool
458 schedule_delayed_work(struct delayed_work *dw, unsigned long ticks)
459 {
460
461 return queue_delayed_work(system_wq, dw, ticks);
462 }
463
464 static void
465 queue_delayed_work_anew(struct workqueue_struct *wq, struct delayed_work *dw,
466 unsigned long ticks)
467 {
468
469 KASSERT(mutex_owned(&wq->wq_lock));
470 KASSERT(dw->work.work_queue == wq);
471 KASSERT((dw->dw_state == DELAYED_WORK_IDLE) ||
472 (dw->dw_state == DELAYED_WORK_SCHEDULED));
473
474 if (ticks == 0) {
475 if (dw->dw_state == DELAYED_WORK_SCHEDULED) {
476 callout_destroy(&dw->dw_callout);
477 TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
478 } else {
479 KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
480 }
481 TAILQ_INSERT_TAIL(&wq->wq_queue, &dw->work, work_entry);
482 dw->dw_state = DELAYED_WORK_IDLE;
483 } else {
484 if (dw->dw_state == DELAYED_WORK_IDLE) {
485 callout_init(&dw->dw_callout, CALLOUT_MPSAFE);
486 callout_reset(&dw->dw_callout, MIN(INT_MAX, ticks),
487 &linux_workqueue_timeout, dw);
488 TAILQ_INSERT_HEAD(&wq->wq_delayed, dw, dw_entry);
489 } else {
490 KASSERT(dw->dw_state == DELAYED_WORK_SCHEDULED);
491 }
492 dw->dw_state = DELAYED_WORK_SCHEDULED;
493 }
494 }
495
496 bool
497 queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw,
498 unsigned long ticks)
499 {
500 struct workqueue_struct *wq0;
501 bool newly_queued;
502
503 mutex_enter(&wq->wq_lock);
504 if (__predict_true((wq0 = acquire_work(&dw->work, wq)) == NULL)) {
505 KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
506 queue_delayed_work_anew(wq, dw, ticks);
507 newly_queued = true;
508 } else {
509 KASSERT(wq0 == wq);
510 newly_queued = false;
511 }
512 mutex_exit(&wq->wq_lock);
513
514 return newly_queued;
515 }
516
517 bool
518 mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw,
519 unsigned long ticks)
520 {
521 struct workqueue_struct *wq0;
522 bool timer_modified;
523
524 mutex_enter(&wq->wq_lock);
525 if ((wq0 = acquire_work(&dw->work, wq)) == NULL) {
526 KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
527 queue_delayed_work_anew(wq, dw, ticks);
528 timer_modified = false;
529 } else {
530 KASSERT(wq0 == wq);
531 switch (dw->dw_state) {
532 case DELAYED_WORK_IDLE:
533 if (wq->wq_current_work != &dw->work) {
534 /* Work is queued, but hasn't started yet. */
535 TAILQ_REMOVE(&wq->wq_queue, &dw->work,
536 work_entry);
537 queue_delayed_work_anew(wq, dw, ticks);
538 timer_modified = true;
539 } else {
540 /*
541 * Too late. Queue it anew. If that
542 * would skip the callout because it's
543 * immediate, notify the workqueue.
544 */
545 wq->wq_requeued = ticks == 0;
546 queue_delayed_work_anew(wq, dw, ticks);
547 timer_modified = false;
548 }
549 break;
550 case DELAYED_WORK_SCHEDULED:
551 if (callout_stop(&dw->dw_callout)) {
552 /*
553 * Too late to stop, but we got in
554 * before the callout acquired the
555 * lock. Reschedule it and tell it
556 * we've done so.
557 */
558 dw->dw_state = DELAYED_WORK_RESCHEDULED;
559 callout_schedule(&dw->dw_callout,
560 MIN(INT_MAX, ticks));
561 } else {
562 /* Stopped it. Queue it anew. */
563 queue_delayed_work_anew(wq, dw, ticks);
564 }
565 timer_modified = true;
566 break;
567 case DELAYED_WORK_RESCHEDULED:
568 case DELAYED_WORK_CANCELLED:
569 /*
570 * Someone modified the timer _again_, or
571 * cancelled it, after the callout started but
572 * before the poor thing even had a chance to
573 * acquire the lock. Just reschedule it once
574 * more.
575 */
576 callout_schedule(&dw->dw_callout, MIN(INT_MAX, ticks));
577 dw->dw_state = DELAYED_WORK_RESCHEDULED;
578 timer_modified = true;
579 break;
580 default:
581 panic("invalid delayed work state: %d",
582 dw->dw_state);
583 }
584 }
585 mutex_exit(&wq->wq_lock);
586
587 return timer_modified;
588 }
589
590 bool
591 cancel_delayed_work(struct delayed_work *dw)
592 {
593 struct workqueue_struct *wq;
594 bool cancelled_p;
595
596 /* If there's no workqueue, nothing to cancel. */
597 if ((wq = dw->work.work_queue) == NULL)
598 return false;
599
600 mutex_enter(&wq->wq_lock);
601 if (__predict_false(dw->work.work_queue != wq)) {
602 cancelled_p = false;
603 } else {
604 switch (dw->dw_state) {
605 case DELAYED_WORK_IDLE:
606 if (wq->wq_current_work == &dw->work) {
607 /* Too late, it's already running. */
608 cancelled_p = false;
609 } else {
610 /* Got in before it started. Remove it. */
611 TAILQ_REMOVE(&wq->wq_queue, &dw->work,
612 work_entry);
613 cancelled_p = true;
614 }
615 break;
616 case DELAYED_WORK_SCHEDULED:
617 case DELAYED_WORK_RESCHEDULED:
618 case DELAYED_WORK_CANCELLED:
619 /*
620 * If it is scheduled, mark it cancelled and
621 * try to stop the callout before it starts.
622 *
623 * If it's too late and the callout has already
624 * begun to execute, tough.
625 *
626 * If we stopped the callout before it started,
627 * however, then destroy the callout and
628 * dissociate it from the workqueue ourselves.
629 *
630 * XXX This logic is duplicated in the
631 * DELAYED_WORK_CANCELLED case of
632 * linux_workqueue_timeout.
633 */
634 dw->dw_state = DELAYED_WORK_CANCELLED;
635 cancelled_p = true;
636 if (callout_stop(&dw->dw_callout))
637 break;
638 KASSERT(dw->dw_state == DELAYED_WORK_CANCELLED);
639 dw->dw_state = DELAYED_WORK_IDLE;
640 callout_destroy(&dw->dw_callout);
641 TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
642 release_work(&dw->work, wq);
643 break;
644 default:
645 panic("invalid delayed work state: %d",
646 dw->dw_state);
647 }
648 }
649 mutex_exit(&wq->wq_lock);
650
651 return cancelled_p;
652 }
653
654 bool
655 cancel_delayed_work_sync(struct delayed_work *dw)
656 {
657 struct workqueue_struct *wq;
658 bool cancelled_p = false;
659
660 retry:
661 /*
662 * If there's no workqueue, nothing to cancel, unless we've
663 * started over from cancelling the callout.
664 */
665 if ((wq = dw->work.work_queue) == NULL)
666 return cancelled_p;
667
668 mutex_enter(&wq->wq_lock);
669 if (__predict_false(dw->work.work_queue != wq)) {
670 cancelled_p = false;
671 } else {
672 switch (dw->dw_state) {
673 case DELAYED_WORK_IDLE:
674 if (wq->wq_current_work == &dw->work) {
675 /* Too late, it's already running. Wait. */
676 do {
677 cv_wait(&wq->wq_cv, &wq->wq_lock);
678 } while (wq->wq_current_work == &dw->work);
679 cancelled_p = false;
680 } else {
681 /* Got in before it started. Remove it. */
682 TAILQ_REMOVE(&wq->wq_queue, &dw->work,
683 work_entry);
684 cancelled_p = true;
685 }
686 break;
687 case DELAYED_WORK_SCHEDULED:
688 case DELAYED_WORK_RESCHEDULED:
689 case DELAYED_WORK_CANCELLED:
690 /*
691 * If it is scheduled, mark it cancelled and
692 * try to stop the callout before it starts.
693 *
694 * If it's too late and the callout has already
695 * begun to execute, we must wait for it to
696 * complete. In that case, the work has been
697 * dissociated from the queue, so we must start
698 * over from the top.
699 *
700 * If we stopped the callout before it started,
701 * however, then destroy the callout and
702 * dissociate it from the workqueue ourselves.
703 *
704 * XXX This logic is duplicated in the
705 * DELAYED_WORK_CANCELLED case of
706 * linux_workqueue_timeout.
707 */
708 dw->dw_state = DELAYED_WORK_CANCELLED;
709 cancelled_p = true;
710 if (callout_halt(&dw->dw_callout, &wq->wq_lock))
711 goto retry;
712 KASSERT(dw->dw_state == DELAYED_WORK_CANCELLED);
713 dw->dw_state = DELAYED_WORK_IDLE;
714 callout_destroy(&dw->dw_callout);
715 TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
716 release_work(&dw->work, wq);
717 break;
718 default:
719 panic("invalid delayed work state: %d",
720 dw->dw_state);
721 }
722 }
723 mutex_exit(&wq->wq_lock);
724
725 return cancelled_p;
726 }
727
728 /*
730 * Flush
731 */
732
733 void
734 flush_scheduled_work(void)
735 {
736
737 flush_workqueue(system_wq);
738 }
739
740 void
741 flush_workqueue(struct workqueue_struct *wq)
742 {
743 uint64_t gen;
744
745 mutex_enter(&wq->wq_lock);
746 if (wq->wq_current_work || !TAILQ_EMPTY(&wq->wq_queue)) {
747 gen = wq->wq_gen;
748 do {
749 cv_wait(&wq->wq_cv, &wq->wq_lock);
750 } while (gen == wq->wq_gen);
751 }
752 mutex_exit(&wq->wq_lock);
753 }
754
755 bool
756 flush_work(struct work_struct *work)
757 {
758 struct workqueue_struct *wq;
759
760 /* If there's no workqueue, nothing to flush. */
761 if ((wq = work->work_queue) == NULL)
762 return false;
763
764 flush_workqueue(wq);
765 return true;
766 }
767
768 bool
769 flush_delayed_work(struct delayed_work *dw)
770 {
771 struct workqueue_struct *wq;
772 bool do_flush = false;
773
774 /* If there's no workqueue, nothing to flush. */
775 if ((wq = dw->work.work_queue) == NULL)
776 return false;
777
778 mutex_enter(&wq->wq_lock);
779 if (__predict_false(dw->work.work_queue != wq)) {
780 do_flush = true;
781 } else {
782 retry: switch (dw->dw_state) {
783 case DELAYED_WORK_IDLE:
784 if (wq->wq_current_work != &dw->work) {
785 TAILQ_REMOVE(&wq->wq_queue, &dw->work,
786 work_entry);
787 } else {
788 do_flush = true;
789 }
790 break;
791 case DELAYED_WORK_SCHEDULED:
792 case DELAYED_WORK_RESCHEDULED:
793 case DELAYED_WORK_CANCELLED:
794 dw->dw_state = DELAYED_WORK_CANCELLED;
795 callout_halt(&dw->dw_callout, &wq->wq_lock);
796 goto retry;
797 default:
798 panic("invalid delayed work state: %d",
799 dw->dw_state);
800 }
801 }
802 mutex_exit(&wq->wq_lock);
803
804 if (do_flush)
805 flush_workqueue(wq);
806
807 return true;
808 }
809