linux_work.c revision 1.14 1 /* $NetBSD: linux_work.c,v 1.14 2018/08/27 14:58:09 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2018 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_work.c,v 1.14 2018/08/27 14:58:09 riastradh Exp $");
34
35 #include <sys/types.h>
36 #include <sys/atomic.h>
37 #include <sys/callout.h>
38 #include <sys/condvar.h>
39 #include <sys/errno.h>
40 #include <sys/kmem.h>
41 #include <sys/kthread.h>
42 #include <sys/lwp.h>
43 #include <sys/mutex.h>
44 #include <sys/queue.h>
45
46 #include <linux/workqueue.h>
47
48 struct workqueue_struct {
49 kmutex_t wq_lock;
50 kcondvar_t wq_cv;
51 TAILQ_HEAD(, delayed_work) wq_delayed;
52 TAILQ_HEAD(, work_struct) wq_queue;
53 struct work_struct *wq_current_work;
54 int wq_flags;
55 struct lwp *wq_lwp;
56 uint64_t wq_gen;
57 bool wq_requeued:1;
58 bool wq_dying:1;
59 };
60
61 static void __dead linux_workqueue_thread(void *);
62 static void linux_workqueue_timeout(void *);
63 static void queue_delayed_work_anew(struct workqueue_struct *,
64 struct delayed_work *, unsigned long);
65
66 static specificdata_key_t workqueue_key __read_mostly;
67
68 struct workqueue_struct *system_wq __read_mostly;
69 struct workqueue_struct *system_long_wq __read_mostly;
70 struct workqueue_struct *system_power_efficient_wq __read_mostly;
71
72 int
73 linux_workqueue_init(void)
74 {
75 int error;
76
77 error = lwp_specific_key_create(&workqueue_key, NULL);
78 if (error)
79 goto fail0;
80
81 system_wq = alloc_ordered_workqueue("lnxsyswq", 0);
82 if (system_wq == NULL) {
83 error = ENOMEM;
84 goto fail1;
85 }
86
87 system_long_wq = alloc_ordered_workqueue("lnxlngwq", 0);
88 if (system_long_wq == NULL) {
89 error = ENOMEM;
90 goto fail2;
91 }
92
93 system_power_efficient_wq = alloc_ordered_workqueue("lnxpwrwq", 0);
94 if (system_long_wq == NULL) {
95 error = ENOMEM;
96 goto fail3;
97 }
98
99 return 0;
100
101 fail4: __unused
102 destroy_workqueue(system_power_efficient_wq);
103 fail3: destroy_workqueue(system_long_wq);
104 fail2: destroy_workqueue(system_wq);
105 fail1: lwp_specific_key_delete(workqueue_key);
106 fail0: KASSERT(error);
107 return error;
108 }
109
110 void
111 linux_workqueue_fini(void)
112 {
113
114 destroy_workqueue(system_power_efficient_wq);
115 destroy_workqueue(system_long_wq);
116 destroy_workqueue(system_wq);
117 lwp_specific_key_delete(workqueue_key);
118 }
119
120 /*
122 * Workqueues
123 */
124
125 struct workqueue_struct *
126 alloc_ordered_workqueue(const char *name, int flags)
127 {
128 struct workqueue_struct *wq;
129 int error;
130
131 KASSERT(flags == 0);
132
133 wq = kmem_alloc(sizeof(*wq), KM_SLEEP);
134
135 mutex_init(&wq->wq_lock, MUTEX_DEFAULT, IPL_NONE);
136 cv_init(&wq->wq_cv, name);
137 TAILQ_INIT(&wq->wq_delayed);
138 TAILQ_INIT(&wq->wq_queue);
139 wq->wq_current_work = NULL;
140
141 error = kthread_create(PRI_NONE,
142 KTHREAD_MPSAFE|KTHREAD_TS|KTHREAD_MUSTJOIN, NULL,
143 &linux_workqueue_thread, wq, &wq->wq_lwp, "%s", name);
144 if (error)
145 goto fail0;
146
147 return wq;
148
149 fail0: KASSERT(TAILQ_EMPTY(&wq->wq_queue));
150 KASSERT(TAILQ_EMPTY(&wq->wq_delayed));
151 cv_destroy(&wq->wq_cv);
152 mutex_destroy(&wq->wq_lock);
153 kmem_free(wq, sizeof(*wq));
154 return NULL;
155 }
156
157 void
158 destroy_workqueue(struct workqueue_struct *wq)
159 {
160
161 /*
162 * Cancel all delayed work. We do this first because any
163 * delayed work that that has already timed out, which we can't
164 * cancel, may have queued new work.
165 */
166 for (;;) {
167 struct delayed_work *dw = NULL;
168
169 mutex_enter(&wq->wq_lock);
170 if (!TAILQ_EMPTY(&wq->wq_delayed)) {
171 dw = TAILQ_FIRST(&wq->wq_delayed);
172 if (!callout_halt(&dw->dw_callout, &wq->wq_lock))
173 TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
174 }
175 mutex_exit(&wq->wq_lock);
176
177 if (dw == NULL)
178 break;
179 cancel_delayed_work_sync(dw);
180 }
181
182 /* Tell the thread to exit. */
183 mutex_enter(&wq->wq_lock);
184 wq->wq_dying = true;
185 cv_broadcast(&wq->wq_cv);
186 mutex_exit(&wq->wq_lock);
187
188 /* Wait for it to exit. */
189 (void)kthread_join(wq->wq_lwp);
190
191 KASSERT(wq->wq_current_work == NULL);
192 KASSERT(TAILQ_EMPTY(&wq->wq_queue));
193 KASSERT(TAILQ_EMPTY(&wq->wq_delayed));
194 cv_destroy(&wq->wq_cv);
195 mutex_destroy(&wq->wq_lock);
196
197 kmem_free(wq, sizeof(*wq));
198 }
199
200 /*
202 * Work thread and callout
203 */
204
205 static void __dead
206 linux_workqueue_thread(void *cookie)
207 {
208 struct workqueue_struct *const wq = cookie;
209 TAILQ_HEAD(, work_struct) tmp;
210
211 lwp_setspecific(workqueue_key, wq);
212
213 mutex_enter(&wq->wq_lock);
214 for (;;) {
215 /* Wait until there's activity. If we're dying, stop. */
216 while (TAILQ_EMPTY(&wq->wq_queue) && !wq->wq_dying)
217 cv_wait(&wq->wq_cv, &wq->wq_lock);
218 if (wq->wq_dying)
219 break;
220
221 /* Grab a batch of work off the queue. */
222 KASSERT(!TAILQ_EMPTY(&wq->wq_queue));
223 TAILQ_INIT(&tmp);
224 TAILQ_CONCAT(&tmp, &wq->wq_queue, work_entry);
225
226 /* Process each work item in the batch. */
227 while (!TAILQ_EMPTY(&tmp)) {
228 struct work_struct *const work = TAILQ_FIRST(&tmp);
229
230 TAILQ_REMOVE(&tmp, work, work_entry);
231 KASSERT(wq->wq_current_work == NULL);
232 wq->wq_current_work = work;
233
234 mutex_exit(&wq->wq_lock);
235 (*work->func)(work);
236 mutex_enter(&wq->wq_lock);
237
238 KASSERT(wq->wq_current_work == work);
239 KASSERT(work->work_queue == wq);
240 if (wq->wq_requeued)
241 wq->wq_requeued = false;
242 else
243 work->work_queue = NULL;
244 wq->wq_current_work = NULL;
245 cv_broadcast(&wq->wq_cv);
246 }
247
248 /* Notify flush that we've completed a batch of work. */
249 wq->wq_gen++;
250 cv_broadcast(&wq->wq_cv);
251 }
252 mutex_exit(&wq->wq_lock);
253
254 kthread_exit(0);
255 }
256
257 static void
258 linux_workqueue_timeout(void *cookie)
259 {
260 struct delayed_work *const dw = cookie;
261 struct workqueue_struct *const wq = dw->work.work_queue;
262
263 KASSERT(wq != NULL);
264
265 mutex_enter(&wq->wq_lock);
266 switch (dw->dw_state) {
267 case DELAYED_WORK_IDLE:
268 panic("delayed work callout uninitialized: %p", dw);
269 case DELAYED_WORK_SCHEDULED:
270 dw->dw_state = DELAYED_WORK_IDLE;
271 callout_destroy(&dw->dw_callout);
272 TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
273 TAILQ_INSERT_TAIL(&wq->wq_queue, &dw->work, work_entry);
274 cv_broadcast(&wq->wq_cv);
275 break;
276 case DELAYED_WORK_RESCHEDULED:
277 dw->dw_state = DELAYED_WORK_SCHEDULED;
278 break;
279 case DELAYED_WORK_CANCELLED:
280 dw->dw_state = DELAYED_WORK_IDLE;
281 callout_destroy(&dw->dw_callout);
282 TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
283 break;
284 default:
285 panic("delayed work callout in bad state: %p", dw);
286 }
287 mutex_exit(&wq->wq_lock);
288 }
289
290 struct work_struct *
291 current_work(void)
292 {
293 struct workqueue_struct *wq = lwp_getspecific(workqueue_key);
294
295 /* If we're not a workqueue thread, then there's no work. */
296 if (wq == NULL)
297 return NULL;
298
299 /*
300 * Otherwise, this should be possible only while work is in
301 * progress. Return the current work item.
302 */
303 KASSERT(wq->wq_current_work != NULL);
304 return wq->wq_current_work;
305 }
306
307 /*
309 * Work
310 */
311
312 void
313 INIT_WORK(struct work_struct *work, void (*fn)(struct work_struct *))
314 {
315
316 work->work_queue = NULL;
317 work->func = fn;
318 }
319
320 bool
321 schedule_work(struct work_struct *work)
322 {
323
324 return queue_work(system_wq, work);
325 }
326
327 bool
328 queue_work(struct workqueue_struct *wq, struct work_struct *work)
329 {
330 struct workqueue_struct *wq0;
331 bool newly_queued;
332
333 KASSERT(wq != NULL);
334
335 mutex_enter(&wq->wq_lock);
336 if (__predict_true((wq0 = atomic_cas_ptr(&work->work_queue, NULL, wq))
337 == NULL)) {
338 TAILQ_INSERT_TAIL(&wq->wq_queue, work, work_entry);
339 newly_queued = true;
340 } else {
341 KASSERT(wq0 == wq);
342 newly_queued = false;
343 }
344 mutex_exit(&wq->wq_lock);
345
346 return newly_queued;
347 }
348
349 bool
350 cancel_work(struct work_struct *work)
351 {
352 struct workqueue_struct *wq;
353 bool cancelled_p = false;
354
355 /* If there's no workqueue, nothing to cancel. */
356 if ((wq = work->work_queue) == NULL)
357 goto out;
358
359 mutex_enter(&wq->wq_lock);
360 if (__predict_false(work->work_queue != wq)) {
361 cancelled_p = false;
362 } else if (wq->wq_current_work == work) {
363 cancelled_p = false;
364 } else {
365 TAILQ_REMOVE(&wq->wq_queue, work, work_entry);
366 cancelled_p = true;
367 }
368 mutex_exit(&wq->wq_lock);
369
370 out: return cancelled_p;
371 }
372
373 bool
374 cancel_work_sync(struct work_struct *work)
375 {
376 struct workqueue_struct *wq;
377 bool cancelled_p = false;
378
379 /* If there's no workqueue, nothing to cancel. */
380 if ((wq = work->work_queue) == NULL)
381 goto out;
382
383 mutex_enter(&wq->wq_lock);
384 if (__predict_false(work->work_queue != wq)) {
385 cancelled_p = false;
386 } else if (wq->wq_current_work == work) {
387 do {
388 cv_wait(&wq->wq_cv, &wq->wq_lock);
389 } while (wq->wq_current_work == work);
390 cancelled_p = false;
391 } else {
392 TAILQ_REMOVE(&wq->wq_queue, work, work_entry);
393 cancelled_p = true;
394 }
395 mutex_exit(&wq->wq_lock);
396
397 out: return cancelled_p;
398 }
399
400 /*
402 * Delayed work
403 */
404
405 void
406 INIT_DELAYED_WORK(struct delayed_work *dw, void (*fn)(struct work_struct *))
407 {
408
409 INIT_WORK(&dw->work, fn);
410 dw->dw_state = DELAYED_WORK_IDLE;
411
412 /*
413 * Defer callout_init until we are going to schedule the
414 * callout, which can then callout_destroy it, because
415 * otherwise since there's no DESTROY_DELAYED_WORK or anything
416 * we have no opportunity to call callout_destroy.
417 */
418 }
419
420 bool
421 schedule_delayed_work(struct delayed_work *dw, unsigned long ticks)
422 {
423
424 return queue_delayed_work(system_wq, dw, ticks);
425 }
426
427 static void
428 queue_delayed_work_anew(struct workqueue_struct *wq, struct delayed_work *dw,
429 unsigned long ticks)
430 {
431
432 KASSERT(mutex_owned(&wq->wq_lock));
433 KASSERT(dw->work.work_queue == wq);
434 KASSERT((dw->dw_state == DELAYED_WORK_IDLE) ||
435 (dw->dw_state == DELAYED_WORK_SCHEDULED));
436
437 if (ticks == 0) {
438 if (dw->dw_state == DELAYED_WORK_SCHEDULED) {
439 callout_destroy(&dw->dw_callout);
440 TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
441 } else {
442 KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
443 }
444 TAILQ_INSERT_TAIL(&wq->wq_queue, &dw->work, work_entry);
445 dw->dw_state = DELAYED_WORK_IDLE;
446 } else {
447 if (dw->dw_state == DELAYED_WORK_IDLE) {
448 callout_init(&dw->dw_callout, CALLOUT_MPSAFE);
449 callout_reset(&dw->dw_callout, MIN(INT_MAX, ticks),
450 &linux_workqueue_timeout, dw);
451 TAILQ_INSERT_HEAD(&wq->wq_delayed, dw, dw_entry);
452 } else {
453 KASSERT(dw->dw_state == DELAYED_WORK_SCHEDULED);
454 }
455 dw->dw_state = DELAYED_WORK_SCHEDULED;
456 }
457 }
458
459 bool
460 queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw,
461 unsigned long ticks)
462 {
463 struct workqueue_struct *wq0;
464 bool newly_queued;
465
466 mutex_enter(&wq->wq_lock);
467 if (__predict_true((wq0 = atomic_cas_ptr(&dw->work.work_queue, NULL,
468 wq)) == NULL)) {
469 KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
470 queue_delayed_work_anew(wq, dw, ticks);
471 newly_queued = true;
472 } else {
473 KASSERT(wq0 == wq);
474 newly_queued = false;
475 }
476 mutex_exit(&wq->wq_lock);
477
478 return newly_queued;
479 }
480
481 bool
482 mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw,
483 unsigned long ticks)
484 {
485 struct workqueue_struct *wq0;
486 bool timer_modified;
487
488 mutex_enter(&wq->wq_lock);
489 if ((wq0 = atomic_cas_ptr(&dw->work.work_queue, NULL, wq)) == NULL) {
490 KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
491 queue_delayed_work_anew(wq, dw, ticks);
492 timer_modified = false;
493 } else {
494 KASSERT(wq0 == wq);
495 switch (dw->dw_state) {
496 case DELAYED_WORK_IDLE:
497 if (wq->wq_current_work != &dw->work) {
498 /* Work is queued, but hasn't started yet. */
499 TAILQ_REMOVE(&wq->wq_queue, &dw->work,
500 work_entry);
501 queue_delayed_work_anew(wq, dw, ticks);
502 timer_modified = true;
503 } else {
504 /*
505 * Too late. Queue it anew. If that
506 * would skip the callout because it's
507 * immediate, notify the workqueue.
508 */
509 wq->wq_requeued = ticks == 0;
510 queue_delayed_work_anew(wq, dw, ticks);
511 timer_modified = false;
512 }
513 break;
514 case DELAYED_WORK_SCHEDULED:
515 if (callout_stop(&dw->dw_callout)) {
516 /*
517 * Too late to stop, but we got in
518 * before the callout acquired the
519 * lock. Reschedule it and tell it
520 * we've done so.
521 */
522 dw->dw_state = DELAYED_WORK_RESCHEDULED;
523 callout_schedule(&dw->dw_callout,
524 MIN(INT_MAX, ticks));
525 } else {
526 /* Stopped it. Queue it anew. */
527 queue_delayed_work_anew(wq, dw, ticks);
528 }
529 timer_modified = true;
530 break;
531 case DELAYED_WORK_RESCHEDULED:
532 case DELAYED_WORK_CANCELLED:
533 /*
534 * Someone modified the timer _again_, or
535 * cancelled it, after the callout started but
536 * before the poor thing even had a chance to
537 * acquire the lock. Just reschedule it once
538 * more.
539 */
540 callout_schedule(&dw->dw_callout, MIN(INT_MAX, ticks));
541 dw->dw_state = DELAYED_WORK_RESCHEDULED;
542 timer_modified = true;
543 break;
544 default:
545 panic("invalid delayed work state: %d",
546 dw->dw_state);
547 }
548 }
549 mutex_exit(&wq->wq_lock);
550
551 return timer_modified;
552 }
553
554 bool
555 cancel_delayed_work(struct delayed_work *dw)
556 {
557 struct workqueue_struct *wq;
558 bool cancelled_p;
559
560 /* If there's no workqueue, nothing to cancel. */
561 if ((wq = dw->work.work_queue) == NULL)
562 return false;
563
564 mutex_enter(&wq->wq_lock);
565 if (__predict_false(dw->work.work_queue != wq)) {
566 cancelled_p = false;
567 } else {
568 switch (dw->dw_state) {
569 case DELAYED_WORK_IDLE:
570 if (wq->wq_current_work == &dw->work) {
571 /* Too late, it's already running. */
572 cancelled_p = false;
573 } else {
574 /* Got in before it started. Remove it. */
575 TAILQ_REMOVE(&wq->wq_queue, &dw->work,
576 work_entry);
577 cancelled_p = true;
578 }
579 break;
580 case DELAYED_WORK_SCHEDULED:
581 case DELAYED_WORK_RESCHEDULED:
582 case DELAYED_WORK_CANCELLED:
583 if (callout_stop(&dw->dw_callout)) {
584 /*
585 * Too late to stop, but we got in
586 * before the callout acquired the
587 * lock. Tell it to give up.
588 */
589 dw->dw_state = DELAYED_WORK_CANCELLED;
590 } else {
591 /* Stopped it. Kill it. */
592 callout_destroy(&dw->dw_callout);
593 TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
594 dw->dw_state = DELAYED_WORK_IDLE;
595 }
596 cancelled_p = true;
597 default:
598 panic("invalid delayed work state: %d",
599 dw->dw_state);
600 }
601 }
602 mutex_exit(&wq->wq_lock);
603
604 return cancelled_p;
605 }
606
607 bool
608 cancel_delayed_work_sync(struct delayed_work *dw)
609 {
610 struct workqueue_struct *wq;
611 bool cancelled_p;
612
613 /* If there's no workqueue, nothing to cancel. */
614 if ((wq = dw->work.work_queue) == NULL)
615 return false;
616
617 mutex_enter(&wq->wq_lock);
618 if (__predict_false(dw->work.work_queue != wq)) {
619 cancelled_p = false;
620 } else {
621 retry: switch (dw->dw_state) {
622 case DELAYED_WORK_IDLE:
623 if (wq->wq_current_work == &dw->work) {
624 /* Too late, it's already running. Wait. */
625 do {
626 cv_wait(&wq->wq_cv, &wq->wq_lock);
627 } while (wq->wq_current_work == &dw->work);
628 cancelled_p = false;
629 } else {
630 /* Got in before it started. Remove it. */
631 TAILQ_REMOVE(&wq->wq_queue, &dw->work,
632 work_entry);
633 cancelled_p = true;
634 }
635 break;
636 case DELAYED_WORK_SCHEDULED:
637 case DELAYED_WORK_RESCHEDULED:
638 case DELAYED_WORK_CANCELLED:
639 /*
640 * If it has started, tell it to stop, and wait
641 * for it to complete. We drop the lock, so by
642 * the time the callout has completed, we must
643 * review the state again.
644 */
645 dw->dw_state = DELAYED_WORK_CANCELLED;
646 callout_halt(&dw->dw_callout, &wq->wq_lock);
647 goto retry;
648 default:
649 panic("invalid delayed work state: %d",
650 dw->dw_state);
651 }
652 }
653 mutex_exit(&wq->wq_lock);
654
655 return cancelled_p;
656 }
657
658 /*
660 * Flush
661 */
662
663 void
664 flush_scheduled_work(void)
665 {
666
667 flush_workqueue(system_wq);
668 }
669
670 void
671 flush_workqueue(struct workqueue_struct *wq)
672 {
673 uint64_t gen;
674
675 mutex_enter(&wq->wq_lock);
676 gen = wq->wq_gen;
677 do {
678 cv_wait(&wq->wq_cv, &wq->wq_lock);
679 } while (gen == wq->wq_gen);
680 mutex_exit(&wq->wq_lock);
681 }
682
683 bool
684 flush_work(struct work_struct *work)
685 {
686 struct workqueue_struct *wq;
687
688 /* If there's no workqueue, nothing to flush. */
689 if ((wq = work->work_queue) == NULL)
690 return false;
691
692 flush_workqueue(wq);
693 return true;
694 }
695
696 bool
697 flush_delayed_work(struct delayed_work *dw)
698 {
699 struct workqueue_struct *wq;
700 bool do_flush = false;
701
702 /* If there's no workqueue, nothing to flush. */
703 if ((wq = dw->work.work_queue) == NULL)
704 return false;
705
706 mutex_enter(&wq->wq_lock);
707 if (__predict_false(dw->work.work_queue != wq)) {
708 do_flush = true;
709 } else {
710 retry: switch (dw->dw_state) {
711 case DELAYED_WORK_IDLE:
712 if (wq->wq_current_work != &dw->work) {
713 TAILQ_REMOVE(&wq->wq_queue, &dw->work,
714 work_entry);
715 } else {
716 do_flush = true;
717 }
718 break;
719 case DELAYED_WORK_SCHEDULED:
720 case DELAYED_WORK_RESCHEDULED:
721 case DELAYED_WORK_CANCELLED:
722 dw->dw_state = DELAYED_WORK_CANCELLED;
723 callout_halt(&dw->dw_callout, &wq->wq_lock);
724 goto retry;
725 default:
726 panic("invalid delayed work state: %d",
727 dw->dw_state);
728 }
729 }
730 mutex_exit(&wq->wq_lock);
731
732 if (do_flush)
733 flush_workqueue(wq);
734
735 return true;
736 }
737