linux_work.c revision 1.1.6.2 1 /* $NetBSD: linux_work.c,v 1.1.6.2 2016/09/06 20:33:10 skrll Exp $ */
2
3 /*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_work.c,v 1.1.6.2 2016/09/06 20:33:10 skrll Exp $");
34
35 #include <sys/types.h>
36 #include <sys/param.h>
37 #include <sys/atomic.h>
38 #include <sys/callout.h>
39 #include <sys/condvar.h>
40 #include <sys/errno.h>
41 #include <sys/intr.h>
42 #include <sys/kmem.h>
43 #include <sys/mutex.h>
44 #include <sys/queue.h>
45 #include <sys/systm.h>
46 #include <sys/workqueue.h>
47 #include <sys/cpu.h>
48
49 #include <machine/lock.h>
50
51 #include <linux/workqueue.h>
52
53 /* XXX Kludge until we sync with HEAD. */
54 #if DIAGNOSTIC
55 #define __diagused
56 #else
57 #define __diagused __unused
58 #endif
59
60 struct workqueue_struct {
61 struct workqueue *wq_workqueue;
62
63 /* XXX The following should all be per-CPU. */
64 kmutex_t wq_lock;
65
66 /*
67 * Condvar for when any state related to this workqueue
68 * changes. XXX Could split this into multiple condvars for
69 * different purposes, but whatever...
70 */
71 kcondvar_t wq_cv;
72
73 TAILQ_HEAD(, delayed_work) wq_delayed;
74 struct work_struct *wq_current_work;
75 };
76
77 static void linux_work_lock_init(struct work_struct *);
78 static void linux_work_lock(struct work_struct *);
79 static void linux_work_unlock(struct work_struct *);
80 static bool linux_work_locked(struct work_struct *) __diagused;
81
82 static void linux_wq_barrier(struct work_struct *);
83
84 static void linux_wait_for_cancelled_work(struct work_struct *);
85 static void linux_wait_for_invoked_work(struct work_struct *);
86 static void linux_worker(struct work *, void *);
87
88 static void linux_cancel_delayed_work_callout(struct delayed_work *, bool);
89 static void linux_wait_for_delayed_cancelled_work(struct delayed_work *);
90 static void linux_worker_intr(void *);
91
92 struct workqueue_struct *system_wq;
93
94 int
95 linux_workqueue_init(void)
96 {
97
98 system_wq = alloc_ordered_workqueue("lnxsyswq", 0);
99 if (system_wq == NULL)
100 return ENOMEM;
101
102 return 0;
103 }
104
105 void
106 linux_workqueue_fini(void)
107 {
108 destroy_workqueue(system_wq);
109 system_wq = NULL;
110 }
111
112 /*
114 * Workqueues
115 */
116
117 struct workqueue_struct *
118 alloc_ordered_workqueue(const char *name, int linux_flags)
119 {
120 struct workqueue_struct *wq;
121 int flags = WQ_MPSAFE;
122 int error;
123
124 KASSERT(linux_flags == 0);
125
126 wq = kmem_alloc(sizeof(*wq), KM_SLEEP);
127 error = workqueue_create(&wq->wq_workqueue, name, &linux_worker,
128 wq, PRI_NONE, IPL_VM, flags);
129 if (error) {
130 kmem_free(wq, sizeof(*wq));
131 return NULL;
132 }
133
134 mutex_init(&wq->wq_lock, MUTEX_DEFAULT, IPL_VM);
135 cv_init(&wq->wq_cv, name);
136 TAILQ_INIT(&wq->wq_delayed);
137 wq->wq_current_work = NULL;
138
139 return wq;
140 }
141
142 void
143 destroy_workqueue(struct workqueue_struct *wq)
144 {
145
146 /*
147 * Cancel all delayed work.
148 */
149 for (;;) {
150 struct delayed_work *dw;
151
152 mutex_enter(&wq->wq_lock);
153 if (TAILQ_EMPTY(&wq->wq_delayed)) {
154 dw = NULL;
155 } else {
156 dw = TAILQ_FIRST(&wq->wq_delayed);
157 TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
158 }
159 mutex_exit(&wq->wq_lock);
160
161 if (dw == NULL)
162 break;
163
164 cancel_delayed_work_sync(dw);
165 }
166
167 /*
168 * workqueue_destroy empties the queue; we need not wait for
169 * completion explicitly. However, we can't destroy the
170 * condvar or mutex until this is done.
171 */
172 workqueue_destroy(wq->wq_workqueue);
173 KASSERT(wq->wq_current_work == NULL);
174 wq->wq_workqueue = NULL;
175
176 cv_destroy(&wq->wq_cv);
177 mutex_destroy(&wq->wq_lock);
178
179 kmem_free(wq, sizeof(*wq));
180 }
181
182 /*
184 * Flush
185 *
186 * Note: This doesn't cancel or wait for delayed work. This seems to
187 * match what Linux does (or, doesn't do).
188 */
189
190 void
191 flush_scheduled_work(void)
192 {
193 flush_workqueue(system_wq);
194 }
195
196 struct wq_flush_work {
197 struct work_struct wqfw_work;
198 struct wq_flush *wqfw_flush;
199 };
200
201 struct wq_flush {
202 kmutex_t wqf_lock;
203 kcondvar_t wqf_cv;
204 unsigned int wqf_n;
205 };
206
207 void
208 flush_work(struct work_struct *work)
209 {
210 struct workqueue_struct *const wq = work->w_wq;
211
212 if (wq != NULL)
213 flush_workqueue(wq);
214 }
215
216 void
217 flush_workqueue(struct workqueue_struct *wq)
218 {
219 static const struct wq_flush zero_wqf;
220 struct wq_flush wqf = zero_wqf;
221
222 mutex_init(&wqf.wqf_lock, MUTEX_DEFAULT, IPL_NONE);
223 cv_init(&wqf.wqf_cv, "lnxwflsh");
224
225 if (1) {
226 struct wq_flush_work *const wqfw = kmem_zalloc(sizeof(*wqfw),
227 KM_SLEEP);
228
229 wqf.wqf_n = 1;
230 wqfw->wqfw_flush = &wqf;
231 INIT_WORK(&wqfw->wqfw_work, &linux_wq_barrier);
232 wqfw->wqfw_work.w_wq = wq;
233 wqfw->wqfw_work.w_state = WORK_PENDING;
234 workqueue_enqueue(wq->wq_workqueue, &wqfw->wqfw_work.w_wk,
235 NULL);
236 } else {
237 struct cpu_info *ci;
238 CPU_INFO_ITERATOR cii;
239 struct wq_flush_work *wqfw;
240
241 panic("per-CPU Linux workqueues don't work yet!");
242
243 wqf.wqf_n = 0;
244 for (CPU_INFO_FOREACH(cii, ci)) {
245 wqfw = kmem_zalloc(sizeof(*wqfw), KM_SLEEP);
246 mutex_enter(&wqf.wqf_lock);
247 wqf.wqf_n++;
248 mutex_exit(&wqf.wqf_lock);
249 wqfw->wqfw_flush = &wqf;
250 INIT_WORK(&wqfw->wqfw_work, &linux_wq_barrier);
251 wqfw->wqfw_work.w_state = WORK_PENDING;
252 wqfw->wqfw_work.w_wq = wq;
253 workqueue_enqueue(wq->wq_workqueue,
254 &wqfw->wqfw_work.w_wk, ci);
255 }
256 }
257
258 mutex_enter(&wqf.wqf_lock);
259 while (0 < wqf.wqf_n)
260 cv_wait(&wqf.wqf_cv, &wqf.wqf_lock);
261 mutex_exit(&wqf.wqf_lock);
262
263 cv_destroy(&wqf.wqf_cv);
264 mutex_destroy(&wqf.wqf_lock);
265 }
266
267 static void
268 linux_wq_barrier(struct work_struct *work)
269 {
270 struct wq_flush_work *const wqfw = container_of(work,
271 struct wq_flush_work, wqfw_work);
272 struct wq_flush *const wqf = wqfw->wqfw_flush;
273
274 mutex_enter(&wqf->wqf_lock);
275 if (--wqf->wqf_n == 0)
276 cv_broadcast(&wqf->wqf_cv);
277 mutex_exit(&wqf->wqf_lock);
278
279 kmem_free(wqfw, sizeof(*wqfw));
280 }
281
282 /*
284 * Work locking
285 *
286 * We use __cpu_simple_lock(9) rather than mutex(9) because Linux code
287 * does not destroy work, so there is nowhere to call mutex_destroy.
288 *
289 * XXX This is getting out of hand... Really, work items shouldn't
290 * have locks in them at all; instead the workqueues should.
291 */
292
293 static void
294 linux_work_lock_init(struct work_struct *work)
295 {
296
297 __cpu_simple_lock_init(&work->w_lock);
298 }
299
300 static void
301 linux_work_lock(struct work_struct *work)
302 {
303 struct cpu_info *ci;
304 int cnt, s;
305
306 /* XXX Copypasta of MUTEX_SPIN_SPLRAISE. */
307 s = splvm();
308 ci = curcpu();
309 cnt = ci->ci_mtx_count--;
310 __insn_barrier();
311 if (cnt == 0)
312 ci->ci_mtx_oldspl = s;
313
314 __cpu_simple_lock(&work->w_lock);
315 }
316
317 static void
318 linux_work_unlock(struct work_struct *work)
319 {
320 struct cpu_info *ci;
321 int s;
322
323 __cpu_simple_unlock(&work->w_lock);
324
325 /* XXX Copypasta of MUTEX_SPIN_SPLRESTORE. */
326 ci = curcpu();
327 s = ci->ci_mtx_oldspl;
328 __insn_barrier();
329 if (++ci->ci_mtx_count == 0)
330 splx(s);
331 }
332
333 static bool __diagused
334 linux_work_locked(struct work_struct *work)
335 {
336 return __SIMPLELOCK_LOCKED_P(&work->w_lock);
337 }
338
339 /*
341 * Work
342 */
343
344 void
345 INIT_WORK(struct work_struct *work, void (*fn)(struct work_struct *))
346 {
347
348 linux_work_lock_init(work);
349 work->w_state = WORK_IDLE;
350 work->w_wq = NULL;
351 work->w_fn = fn;
352 }
353
354 bool
355 schedule_work(struct work_struct *work)
356 {
357 return queue_work(system_wq, work);
358 }
359
360 bool
361 queue_work(struct workqueue_struct *wq, struct work_struct *work)
362 {
363 /* True if we put it on the queue, false if it was already there. */
364 bool newly_queued;
365
366 KASSERT(wq != NULL);
367
368 linux_work_lock(work);
369 switch (work->w_state) {
370 case WORK_IDLE:
371 case WORK_INVOKED:
372 work->w_state = WORK_PENDING;
373 work->w_wq = wq;
374 workqueue_enqueue(wq->wq_workqueue, &work->w_wk, NULL);
375 newly_queued = true;
376 break;
377
378 case WORK_DELAYED:
379 panic("queue_work(delayed work %p)", work);
380 break;
381
382 case WORK_PENDING:
383 KASSERT(work->w_wq == wq);
384 newly_queued = false;
385 break;
386
387 case WORK_CANCELLED:
388 newly_queued = false;
389 break;
390
391 case WORK_DELAYED_CANCELLED:
392 panic("queue_work(delayed work %p)", work);
393 break;
394
395 default:
396 panic("work %p in bad state: %d", work, (int)work->w_state);
397 break;
398 }
399 linux_work_unlock(work);
400
401 return newly_queued;
402 }
403
404 bool
405 cancel_work_sync(struct work_struct *work)
406 {
407 bool cancelled_p = false;
408
409 linux_work_lock(work);
410 switch (work->w_state) {
411 case WORK_IDLE: /* Nothing to do. */
412 break;
413
414 case WORK_DELAYED:
415 panic("cancel_work_sync(delayed work %p)", work);
416 break;
417
418 case WORK_PENDING:
419 work->w_state = WORK_CANCELLED;
420 linux_wait_for_cancelled_work(work);
421 cancelled_p = true;
422 break;
423
424 case WORK_INVOKED:
425 linux_wait_for_invoked_work(work);
426 break;
427
428 case WORK_CANCELLED: /* Already done. */
429 break;
430
431 case WORK_DELAYED_CANCELLED:
432 panic("cancel_work_sync(delayed work %p)", work);
433 break;
434
435 default:
436 panic("work %p in bad state: %d", work, (int)work->w_state);
437 break;
438 }
439 linux_work_unlock(work);
440
441 return cancelled_p;
442 }
443
444 static void
445 linux_wait_for_cancelled_work(struct work_struct *work)
446 {
447 struct workqueue_struct *wq;
448
449 KASSERT(linux_work_locked(work));
450 KASSERT(work->w_state == WORK_CANCELLED);
451
452 wq = work->w_wq;
453 do {
454 mutex_enter(&wq->wq_lock);
455 linux_work_unlock(work);
456 cv_wait(&wq->wq_cv, &wq->wq_lock);
457 mutex_exit(&wq->wq_lock);
458 linux_work_lock(work);
459 } while ((work->w_state == WORK_CANCELLED) && (work->w_wq == wq));
460 }
461
462 static void
463 linux_wait_for_invoked_work(struct work_struct *work)
464 {
465 struct workqueue_struct *wq;
466
467 KASSERT(linux_work_locked(work));
468 KASSERT(work->w_state == WORK_INVOKED);
469
470 wq = work->w_wq;
471 mutex_enter(&wq->wq_lock);
472 linux_work_unlock(work);
473 while (wq->wq_current_work == work)
474 cv_wait(&wq->wq_cv, &wq->wq_lock);
475 mutex_exit(&wq->wq_lock);
476
477 linux_work_lock(work); /* XXX needless relock */
478 }
479
480 static void
481 linux_worker(struct work *wk, void *arg)
482 {
483 struct work_struct *const work = container_of(wk, struct work_struct,
484 w_wk);
485 struct workqueue_struct *const wq = arg;
486
487 linux_work_lock(work);
488 switch (work->w_state) {
489 case WORK_IDLE:
490 panic("idle work %p got queued: %p", work, wq);
491 break;
492
493 case WORK_DELAYED:
494 panic("delayed work %p got queued: %p", work, wq);
495 break;
496
497 case WORK_PENDING:
498 KASSERT(work->w_wq == wq);
499
500 /* Get ready to invoke this one. */
501 mutex_enter(&wq->wq_lock);
502 work->w_state = WORK_INVOKED;
503 KASSERT(wq->wq_current_work == NULL);
504 wq->wq_current_work = work;
505 mutex_exit(&wq->wq_lock);
506
507 /* Unlock it and do it. Can't use work after this. */
508 linux_work_unlock(work);
509 (*work->w_fn)(work);
510
511 /* All done. Notify anyone waiting for completion. */
512 mutex_enter(&wq->wq_lock);
513 KASSERT(wq->wq_current_work == work);
514 wq->wq_current_work = NULL;
515 cv_broadcast(&wq->wq_cv);
516 mutex_exit(&wq->wq_lock);
517 return;
518
519 case WORK_INVOKED:
520 panic("invoked work %p got requeued: %p", work, wq);
521 break;
522
523 case WORK_CANCELLED:
524 KASSERT(work->w_wq == wq);
525
526 /* Return to idle; notify anyone waiting for cancellation. */
527 mutex_enter(&wq->wq_lock);
528 work->w_state = WORK_IDLE;
529 work->w_wq = NULL;
530 cv_broadcast(&wq->wq_cv);
531 mutex_exit(&wq->wq_lock);
532 break;
533
534 case WORK_DELAYED_CANCELLED:
535 panic("cancelled delayed work %p got uqeued: %p", work, wq);
536 break;
537
538 default:
539 panic("work %p in bad state: %d", work, (int)work->w_state);
540 break;
541 }
542 linux_work_unlock(work);
543 }
544
545 /*
547 * Delayed work
548 */
549
550 void
551 INIT_DELAYED_WORK(struct delayed_work *dw, void (*fn)(struct work_struct *))
552 {
553 INIT_WORK(&dw->work, fn);
554 }
555
556 bool
557 schedule_delayed_work(struct delayed_work *dw, unsigned long ticks)
558 {
559 return queue_delayed_work(system_wq, dw, ticks);
560 }
561
562 bool
563 queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw,
564 unsigned long ticks)
565 {
566 bool newly_queued;
567
568 KASSERT(wq != NULL);
569
570 linux_work_lock(&dw->work);
571 switch (dw->work.w_state) {
572 case WORK_IDLE:
573 case WORK_INVOKED:
574 if (ticks == 0) {
575 /* Skip the delay and queue it now. */
576 dw->work.w_state = WORK_PENDING;
577 dw->work.w_wq = wq;
578 workqueue_enqueue(wq->wq_workqueue, &dw->work.w_wk,
579 NULL);
580 } else {
581 callout_init(&dw->dw_callout, CALLOUT_MPSAFE);
582 callout_reset(&dw->dw_callout, ticks,
583 &linux_worker_intr, dw);
584 dw->work.w_state = WORK_DELAYED;
585 dw->work.w_wq = wq;
586 mutex_enter(&wq->wq_lock);
587 TAILQ_INSERT_HEAD(&wq->wq_delayed, dw, dw_entry);
588 mutex_exit(&wq->wq_lock);
589 }
590 newly_queued = true;
591 break;
592
593 case WORK_DELAYED:
594 /*
595 * Timer is already ticking. Leave it to time out
596 * whenever it was going to time out, as Linux does --
597 * neither speed it up nor postpone it.
598 */
599 newly_queued = false;
600 break;
601
602 case WORK_PENDING:
603 KASSERT(dw->work.w_wq == wq);
604 newly_queued = false;
605 break;
606
607 case WORK_CANCELLED:
608 case WORK_DELAYED_CANCELLED:
609 /* XXX Wait for cancellation and then queue? */
610 newly_queued = false;
611 break;
612
613 default:
614 panic("delayed work %p in bad state: %d", dw,
615 (int)dw->work.w_state);
616 break;
617 }
618 linux_work_unlock(&dw->work);
619
620 return newly_queued;
621 }
622
623 bool
624 mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw,
625 unsigned long ticks)
626 {
627 bool timer_modified;
628
629 KASSERT(wq != NULL);
630
631 linux_work_lock(&dw->work);
632 switch (dw->work.w_state) {
633 case WORK_IDLE:
634 case WORK_INVOKED:
635 if (ticks == 0) {
636 /* Skip the delay and queue it now. */
637 dw->work.w_state = WORK_PENDING;
638 dw->work.w_wq = wq;
639 workqueue_enqueue(wq->wq_workqueue, &dw->work.w_wk,
640 NULL);
641 } else {
642 callout_init(&dw->dw_callout, CALLOUT_MPSAFE);
643 callout_reset(&dw->dw_callout, ticks,
644 &linux_worker_intr, dw);
645 dw->work.w_state = WORK_DELAYED;
646 dw->work.w_wq = wq;
647 mutex_enter(&wq->wq_lock);
648 TAILQ_INSERT_HEAD(&wq->wq_delayed, dw, dw_entry);
649 mutex_exit(&wq->wq_lock);
650 }
651 timer_modified = false;
652 break;
653
654 case WORK_DELAYED:
655 /*
656 * Timer is already ticking. Reschedule it.
657 */
658 callout_schedule(&dw->dw_callout, ticks);
659 timer_modified = true;
660 break;
661
662 case WORK_PENDING:
663 KASSERT(dw->work.w_wq == wq);
664 timer_modified = false;
665 break;
666
667 case WORK_CANCELLED:
668 case WORK_DELAYED_CANCELLED:
669 /* XXX Wait for cancellation and then queue? */
670 timer_modified = false;
671 break;
672
673 default:
674 panic("delayed work %p in bad state: %d", dw,
675 (int)dw->work.w_state);
676 break;
677 }
678 linux_work_unlock(&dw->work);
679
680 return timer_modified;
681 }
682
683 bool
684 cancel_delayed_work(struct delayed_work *dw)
685 {
686 bool cancelled_p = false;
687
688 linux_work_lock(&dw->work);
689 switch (dw->work.w_state) {
690 case WORK_IDLE: /* Nothing to do. */
691 break;
692
693 case WORK_DELAYED:
694 dw->work.w_state = WORK_DELAYED_CANCELLED;
695 linux_cancel_delayed_work_callout(dw, false);
696 cancelled_p = true;
697 break;
698
699 case WORK_PENDING:
700 dw->work.w_state = WORK_CANCELLED;
701 cancelled_p = true;
702 break;
703
704 case WORK_INVOKED: /* Don't wait! */
705 break;
706
707 case WORK_CANCELLED: /* Already done. */
708 case WORK_DELAYED_CANCELLED:
709 break;
710
711 default:
712 panic("delayed work %p in bad state: %d", dw,
713 (int)dw->work.w_state);
714 break;
715 }
716 linux_work_unlock(&dw->work);
717
718 return cancelled_p;
719 }
720
721 bool
722 cancel_delayed_work_sync(struct delayed_work *dw)
723 {
724 bool cancelled_p = false;
725
726 linux_work_lock(&dw->work);
727 switch (dw->work.w_state) {
728 case WORK_IDLE: /* Nothing to do. */
729 break;
730
731 case WORK_DELAYED:
732 dw->work.w_state = WORK_DELAYED_CANCELLED;
733 linux_cancel_delayed_work_callout(dw, true);
734 cancelled_p = true;
735 break;
736
737 case WORK_PENDING:
738 dw->work.w_state = WORK_CANCELLED;
739 linux_wait_for_cancelled_work(&dw->work);
740 cancelled_p = true;
741 break;
742
743 case WORK_INVOKED:
744 linux_wait_for_invoked_work(&dw->work);
745 break;
746
747 case WORK_CANCELLED: /* Already done. */
748 break;
749
750 case WORK_DELAYED_CANCELLED:
751 linux_wait_for_delayed_cancelled_work(dw);
752 break;
753
754 default:
755 panic("delayed work %p in bad state: %d", dw,
756 (int)dw->work.w_state);
757 break;
758 }
759 linux_work_unlock(&dw->work);
760
761 return cancelled_p;
762 }
763
764 static void
765 linux_cancel_delayed_work_callout(struct delayed_work *dw, bool wait)
766 {
767 bool fired_p;
768
769 KASSERT(linux_work_locked(&dw->work));
770 KASSERT(dw->work.w_state == WORK_DELAYED_CANCELLED);
771
772 if (wait) {
773 /*
774 * We unlock, halt, and then relock, rather than
775 * passing an interlock to callout_halt, for two
776 * reasons:
777 *
778 * (1) The work lock is not a mutex(9), so we can't use it.
779 * (2) The WORK_DELAYED_CANCELLED state serves as an interlock.
780 */
781 linux_work_unlock(&dw->work);
782 fired_p = callout_halt(&dw->dw_callout, NULL);
783 linux_work_lock(&dw->work);
784 } else {
785 fired_p = callout_stop(&dw->dw_callout);
786 }
787
788 /*
789 * fired_p means we didn't cancel the callout, so it must have
790 * already begun and will clean up after itself.
791 *
792 * !fired_p means we cancelled it so we have to clean up after
793 * it. Nobody else should have changed the state in that case.
794 */
795 if (!fired_p) {
796 struct workqueue_struct *wq;
797
798 KASSERT(linux_work_locked(&dw->work));
799 KASSERT(dw->work.w_state == WORK_DELAYED_CANCELLED);
800
801 wq = dw->work.w_wq;
802 mutex_enter(&wq->wq_lock);
803 TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
804 callout_destroy(&dw->dw_callout);
805 dw->work.w_state = WORK_IDLE;
806 dw->work.w_wq = NULL;
807 cv_broadcast(&wq->wq_cv);
808 mutex_exit(&wq->wq_lock);
809 }
810 }
811
812 static void
813 linux_wait_for_delayed_cancelled_work(struct delayed_work *dw)
814 {
815 struct workqueue_struct *wq;
816
817 KASSERT(linux_work_locked(&dw->work));
818 KASSERT(dw->work.w_state == WORK_DELAYED_CANCELLED);
819
820 wq = dw->work.w_wq;
821 do {
822 mutex_enter(&wq->wq_lock);
823 linux_work_unlock(&dw->work);
824 cv_wait(&wq->wq_cv, &wq->wq_lock);
825 mutex_exit(&wq->wq_lock);
826 linux_work_lock(&dw->work);
827 } while ((dw->work.w_state == WORK_DELAYED_CANCELLED) &&
828 (dw->work.w_wq == wq));
829 }
830
831 static void
832 linux_worker_intr(void *arg)
833 {
834 struct delayed_work *dw = arg;
835 struct workqueue_struct *wq;
836
837 linux_work_lock(&dw->work);
838
839 KASSERT((dw->work.w_state == WORK_DELAYED) ||
840 (dw->work.w_state == WORK_DELAYED_CANCELLED));
841
842 wq = dw->work.w_wq;
843 mutex_enter(&wq->wq_lock);
844
845 /* Queue the work, or return it to idle and alert any cancellers. */
846 if (__predict_true(dw->work.w_state == WORK_DELAYED)) {
847 dw->work.w_state = WORK_PENDING;
848 workqueue_enqueue(dw->work.w_wq->wq_workqueue, &dw->work.w_wk,
849 NULL);
850 } else {
851 KASSERT(dw->work.w_state == WORK_DELAYED_CANCELLED);
852 dw->work.w_state = WORK_IDLE;
853 dw->work.w_wq = NULL;
854 cv_broadcast(&wq->wq_cv);
855 }
856
857 /* Either way, the callout is done. */
858 TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
859 callout_destroy(&dw->dw_callout);
860
861 mutex_exit(&wq->wq_lock);
862 linux_work_unlock(&dw->work);
863 }
864