linux_work.c revision 1.25 1 /* $NetBSD: linux_work.c,v 1.25 2018/08/27 15:02:38 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2018 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_work.c,v 1.25 2018/08/27 15:02:38 riastradh Exp $");
34
35 #include <sys/types.h>
36 #include <sys/atomic.h>
37 #include <sys/callout.h>
38 #include <sys/condvar.h>
39 #include <sys/errno.h>
40 #include <sys/kmem.h>
41 #include <sys/kthread.h>
42 #include <sys/lwp.h>
43 #include <sys/mutex.h>
44 #include <sys/queue.h>
45
46 #include <linux/workqueue.h>
47
48 struct workqueue_struct {
49 kmutex_t wq_lock;
50 kcondvar_t wq_cv;
51 TAILQ_HEAD(, delayed_work) wq_delayed;
52 TAILQ_HEAD(, work_struct) wq_queue;
53 struct work_struct *wq_current_work;
54 int wq_flags;
55 struct lwp *wq_lwp;
56 uint64_t wq_gen;
57 bool wq_requeued:1;
58 bool wq_dying:1;
59 };
60
61 static void __dead linux_workqueue_thread(void *);
62 static void linux_workqueue_timeout(void *);
63 static struct workqueue_struct *
64 acquire_work(struct work_struct *,
65 struct workqueue_struct *);
66 static void release_work(struct work_struct *,
67 struct workqueue_struct *);
68 static void queue_delayed_work_anew(struct workqueue_struct *,
69 struct delayed_work *, unsigned long);
70 static void cancel_delayed_work_done(struct workqueue_struct *,
71 struct delayed_work *);
72
73 static specificdata_key_t workqueue_key __read_mostly;
74
75 struct workqueue_struct *system_wq __read_mostly;
76 struct workqueue_struct *system_long_wq __read_mostly;
77 struct workqueue_struct *system_power_efficient_wq __read_mostly;
78
79 int
80 linux_workqueue_init(void)
81 {
82 int error;
83
84 error = lwp_specific_key_create(&workqueue_key, NULL);
85 if (error)
86 goto fail0;
87
88 system_wq = alloc_ordered_workqueue("lnxsyswq", 0);
89 if (system_wq == NULL) {
90 error = ENOMEM;
91 goto fail1;
92 }
93
94 system_long_wq = alloc_ordered_workqueue("lnxlngwq", 0);
95 if (system_long_wq == NULL) {
96 error = ENOMEM;
97 goto fail2;
98 }
99
100 system_power_efficient_wq = alloc_ordered_workqueue("lnxpwrwq", 0);
101 if (system_long_wq == NULL) {
102 error = ENOMEM;
103 goto fail3;
104 }
105
106 return 0;
107
108 fail4: __unused
109 destroy_workqueue(system_power_efficient_wq);
110 fail3: destroy_workqueue(system_long_wq);
111 fail2: destroy_workqueue(system_wq);
112 fail1: lwp_specific_key_delete(workqueue_key);
113 fail0: KASSERT(error);
114 return error;
115 }
116
117 void
118 linux_workqueue_fini(void)
119 {
120
121 destroy_workqueue(system_power_efficient_wq);
122 destroy_workqueue(system_long_wq);
123 destroy_workqueue(system_wq);
124 lwp_specific_key_delete(workqueue_key);
125 }
126
127 /*
129 * Workqueues
130 */
131
132 struct workqueue_struct *
133 alloc_ordered_workqueue(const char *name, int flags)
134 {
135 struct workqueue_struct *wq;
136 int error;
137
138 KASSERT(flags == 0);
139
140 wq = kmem_zalloc(sizeof(*wq), KM_SLEEP);
141
142 mutex_init(&wq->wq_lock, MUTEX_DEFAULT, IPL_NONE);
143 cv_init(&wq->wq_cv, name);
144 TAILQ_INIT(&wq->wq_delayed);
145 TAILQ_INIT(&wq->wq_queue);
146 wq->wq_current_work = NULL;
147 wq->wq_flags = 0;
148 wq->wq_lwp = NULL;
149 wq->wq_gen = 0;
150 wq->wq_requeued = false;
151 wq->wq_dying = false;
152
153 error = kthread_create(PRI_NONE,
154 KTHREAD_MPSAFE|KTHREAD_TS|KTHREAD_MUSTJOIN, NULL,
155 &linux_workqueue_thread, wq, &wq->wq_lwp, "%s", name);
156 if (error)
157 goto fail0;
158
159 return wq;
160
161 fail0: KASSERT(TAILQ_EMPTY(&wq->wq_queue));
162 KASSERT(TAILQ_EMPTY(&wq->wq_delayed));
163 cv_destroy(&wq->wq_cv);
164 mutex_destroy(&wq->wq_lock);
165 kmem_free(wq, sizeof(*wq));
166 return NULL;
167 }
168
169 void
170 destroy_workqueue(struct workqueue_struct *wq)
171 {
172
173 /*
174 * Cancel all delayed work. We do this first because any
175 * delayed work that that has already timed out, which we can't
176 * cancel, may have queued new work.
177 */
178 for (;;) {
179 struct delayed_work *dw = NULL;
180
181 mutex_enter(&wq->wq_lock);
182 if (!TAILQ_EMPTY(&wq->wq_delayed)) {
183 dw = TAILQ_FIRST(&wq->wq_delayed);
184 if (!callout_halt(&dw->dw_callout, &wq->wq_lock))
185 TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
186 }
187 mutex_exit(&wq->wq_lock);
188
189 if (dw == NULL)
190 break;
191 cancel_delayed_work_sync(dw);
192 }
193
194 /* Tell the thread to exit. */
195 mutex_enter(&wq->wq_lock);
196 wq->wq_dying = true;
197 cv_broadcast(&wq->wq_cv);
198 mutex_exit(&wq->wq_lock);
199
200 /* Wait for it to exit. */
201 (void)kthread_join(wq->wq_lwp);
202
203 KASSERT(wq->wq_dying);
204 KASSERT(!wq->wq_requeued);
205 KASSERT(wq->wq_flags == 0);
206 KASSERT(wq->wq_current_work == NULL);
207 KASSERT(TAILQ_EMPTY(&wq->wq_queue));
208 KASSERT(TAILQ_EMPTY(&wq->wq_delayed));
209 cv_destroy(&wq->wq_cv);
210 mutex_destroy(&wq->wq_lock);
211
212 kmem_free(wq, sizeof(*wq));
213 }
214
215 /*
217 * Work thread and callout
218 */
219
220 static void __dead
221 linux_workqueue_thread(void *cookie)
222 {
223 struct workqueue_struct *const wq = cookie;
224 TAILQ_HEAD(, work_struct) tmp;
225
226 lwp_setspecific(workqueue_key, wq);
227
228 mutex_enter(&wq->wq_lock);
229 for (;;) {
230 /* Wait until there's activity. If we're dying, stop. */
231 while (TAILQ_EMPTY(&wq->wq_queue) && !wq->wq_dying)
232 cv_wait(&wq->wq_cv, &wq->wq_lock);
233 if (wq->wq_dying)
234 break;
235
236 /* Grab a batch of work off the queue. */
237 KASSERT(!TAILQ_EMPTY(&wq->wq_queue));
238 TAILQ_INIT(&tmp);
239 TAILQ_CONCAT(&tmp, &wq->wq_queue, work_entry);
240
241 /* Process each work item in the batch. */
242 while (!TAILQ_EMPTY(&tmp)) {
243 struct work_struct *const work = TAILQ_FIRST(&tmp);
244
245 KASSERT(work->work_queue == wq);
246 TAILQ_REMOVE(&tmp, work, work_entry);
247 KASSERT(wq->wq_current_work == NULL);
248 wq->wq_current_work = work;
249
250 mutex_exit(&wq->wq_lock);
251 (*work->func)(work);
252 mutex_enter(&wq->wq_lock);
253
254 KASSERT(wq->wq_current_work == work);
255 KASSERT(work->work_queue == wq);
256 if (wq->wq_requeued)
257 wq->wq_requeued = false;
258 else
259 release_work(work, wq);
260 wq->wq_current_work = NULL;
261 cv_broadcast(&wq->wq_cv);
262 }
263
264 /* Notify flush that we've completed a batch of work. */
265 wq->wq_gen++;
266 cv_broadcast(&wq->wq_cv);
267 }
268 mutex_exit(&wq->wq_lock);
269
270 kthread_exit(0);
271 }
272
273 static void
274 linux_workqueue_timeout(void *cookie)
275 {
276 struct delayed_work *const dw = cookie;
277 struct workqueue_struct *const wq = dw->work.work_queue;
278
279 KASSERT(wq != NULL);
280
281 mutex_enter(&wq->wq_lock);
282 KASSERT(dw->work.work_queue == wq);
283 switch (dw->dw_state) {
284 case DELAYED_WORK_IDLE:
285 panic("delayed work callout uninitialized: %p", dw);
286 case DELAYED_WORK_SCHEDULED:
287 dw->dw_state = DELAYED_WORK_IDLE;
288 callout_destroy(&dw->dw_callout);
289 TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
290 TAILQ_INSERT_TAIL(&wq->wq_queue, &dw->work, work_entry);
291 cv_broadcast(&wq->wq_cv);
292 break;
293 case DELAYED_WORK_RESCHEDULED:
294 dw->dw_state = DELAYED_WORK_SCHEDULED;
295 break;
296 case DELAYED_WORK_CANCELLED:
297 cancel_delayed_work_done(wq, dw);
298 /* Can't touch dw any more. */
299 goto out;
300 default:
301 panic("delayed work callout in bad state: %p", dw);
302 }
303 KASSERT(dw->dw_state == DELAYED_WORK_IDLE ||
304 dw->dw_state == DELAYED_WORK_SCHEDULED);
305 out: mutex_exit(&wq->wq_lock);
306 }
307
308 struct work_struct *
309 current_work(void)
310 {
311 struct workqueue_struct *wq = lwp_getspecific(workqueue_key);
312
313 /* If we're not a workqueue thread, then there's no work. */
314 if (wq == NULL)
315 return NULL;
316
317 /*
318 * Otherwise, this should be possible only while work is in
319 * progress. Return the current work item.
320 */
321 KASSERT(wq->wq_current_work != NULL);
322 return wq->wq_current_work;
323 }
324
325 /*
327 * Work
328 */
329
330 void
331 INIT_WORK(struct work_struct *work, void (*fn)(struct work_struct *))
332 {
333
334 work->work_queue = NULL;
335 work->func = fn;
336 }
337
338 static struct workqueue_struct *
339 acquire_work(struct work_struct *work, struct workqueue_struct *wq)
340 {
341 struct workqueue_struct *wq0;
342
343 KASSERT(mutex_owned(&wq->wq_lock));
344
345 wq0 = atomic_cas_ptr(&work->work_queue, NULL, wq);
346 if (wq0 == NULL) {
347 membar_enter();
348 KASSERT(work->work_queue == wq);
349 }
350
351 return wq0;
352 }
353
354 static void
355 release_work(struct work_struct *work, struct workqueue_struct *wq)
356 {
357
358 KASSERT(work->work_queue == wq);
359 KASSERT(mutex_owned(&wq->wq_lock));
360
361 membar_exit();
362 work->work_queue = NULL;
363 }
364
365 bool
366 schedule_work(struct work_struct *work)
367 {
368
369 return queue_work(system_wq, work);
370 }
371
372 bool
373 queue_work(struct workqueue_struct *wq, struct work_struct *work)
374 {
375 struct workqueue_struct *wq0;
376 bool newly_queued;
377
378 KASSERT(wq != NULL);
379
380 mutex_enter(&wq->wq_lock);
381 if (__predict_true((wq0 = acquire_work(work, wq)) == NULL)) {
382 TAILQ_INSERT_TAIL(&wq->wq_queue, work, work_entry);
383 newly_queued = true;
384 } else {
385 KASSERT(wq0 == wq);
386 newly_queued = false;
387 }
388 mutex_exit(&wq->wq_lock);
389
390 return newly_queued;
391 }
392
393 bool
394 cancel_work(struct work_struct *work)
395 {
396 struct workqueue_struct *wq;
397 bool cancelled_p = false;
398
399 /* If there's no workqueue, nothing to cancel. */
400 if ((wq = work->work_queue) == NULL)
401 goto out;
402
403 mutex_enter(&wq->wq_lock);
404 if (__predict_false(work->work_queue != wq)) {
405 cancelled_p = false;
406 } else if (wq->wq_current_work == work) {
407 cancelled_p = false;
408 } else {
409 TAILQ_REMOVE(&wq->wq_queue, work, work_entry);
410 cancelled_p = true;
411 }
412 mutex_exit(&wq->wq_lock);
413
414 out: return cancelled_p;
415 }
416
417 bool
418 cancel_work_sync(struct work_struct *work)
419 {
420 struct workqueue_struct *wq;
421 bool cancelled_p = false;
422
423 /* If there's no workqueue, nothing to cancel. */
424 if ((wq = work->work_queue) == NULL)
425 goto out;
426
427 mutex_enter(&wq->wq_lock);
428 if (__predict_false(work->work_queue != wq)) {
429 cancelled_p = false;
430 } else if (wq->wq_current_work == work) {
431 do {
432 cv_wait(&wq->wq_cv, &wq->wq_lock);
433 } while (wq->wq_current_work == work);
434 cancelled_p = false;
435 } else {
436 TAILQ_REMOVE(&wq->wq_queue, work, work_entry);
437 cancelled_p = true;
438 }
439 mutex_exit(&wq->wq_lock);
440
441 out: return cancelled_p;
442 }
443
444 /*
446 * Delayed work
447 */
448
449 void
450 INIT_DELAYED_WORK(struct delayed_work *dw, void (*fn)(struct work_struct *))
451 {
452
453 INIT_WORK(&dw->work, fn);
454 dw->dw_state = DELAYED_WORK_IDLE;
455
456 /*
457 * Defer callout_init until we are going to schedule the
458 * callout, which can then callout_destroy it, because
459 * otherwise since there's no DESTROY_DELAYED_WORK or anything
460 * we have no opportunity to call callout_destroy.
461 */
462 }
463
464 bool
465 schedule_delayed_work(struct delayed_work *dw, unsigned long ticks)
466 {
467
468 return queue_delayed_work(system_wq, dw, ticks);
469 }
470
471 static void
472 queue_delayed_work_anew(struct workqueue_struct *wq, struct delayed_work *dw,
473 unsigned long ticks)
474 {
475
476 KASSERT(mutex_owned(&wq->wq_lock));
477 KASSERT(dw->work.work_queue == wq);
478 KASSERT((dw->dw_state == DELAYED_WORK_IDLE) ||
479 (dw->dw_state == DELAYED_WORK_SCHEDULED));
480
481 if (ticks == 0) {
482 if (dw->dw_state == DELAYED_WORK_SCHEDULED) {
483 callout_destroy(&dw->dw_callout);
484 TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
485 } else {
486 KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
487 }
488 TAILQ_INSERT_TAIL(&wq->wq_queue, &dw->work, work_entry);
489 dw->dw_state = DELAYED_WORK_IDLE;
490 } else {
491 if (dw->dw_state == DELAYED_WORK_IDLE) {
492 callout_init(&dw->dw_callout, CALLOUT_MPSAFE);
493 callout_reset(&dw->dw_callout, MIN(INT_MAX, ticks),
494 &linux_workqueue_timeout, dw);
495 TAILQ_INSERT_HEAD(&wq->wq_delayed, dw, dw_entry);
496 } else {
497 KASSERT(dw->dw_state == DELAYED_WORK_SCHEDULED);
498 }
499 dw->dw_state = DELAYED_WORK_SCHEDULED;
500 }
501 }
502
503 static void
504 cancel_delayed_work_done(struct workqueue_struct *wq, struct delayed_work *dw)
505 {
506
507 KASSERT(mutex_owned(&wq->wq_lock));
508 KASSERT(dw->work.work_queue == wq);
509 KASSERT(dw->dw_state == DELAYED_WORK_CANCELLED);
510 dw->dw_state = DELAYED_WORK_IDLE;
511 callout_destroy(&dw->dw_callout);
512 TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
513 release_work(&dw->work, wq);
514 /* Can't touch dw after this point. */
515 }
516
517 bool
518 queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw,
519 unsigned long ticks)
520 {
521 struct workqueue_struct *wq0;
522 bool newly_queued;
523
524 mutex_enter(&wq->wq_lock);
525 if (__predict_true((wq0 = acquire_work(&dw->work, wq)) == NULL)) {
526 KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
527 queue_delayed_work_anew(wq, dw, ticks);
528 newly_queued = true;
529 } else {
530 KASSERT(wq0 == wq);
531 newly_queued = false;
532 }
533 mutex_exit(&wq->wq_lock);
534
535 return newly_queued;
536 }
537
538 bool
539 mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw,
540 unsigned long ticks)
541 {
542 struct workqueue_struct *wq0;
543 bool timer_modified;
544
545 mutex_enter(&wq->wq_lock);
546 if ((wq0 = acquire_work(&dw->work, wq)) == NULL) {
547 KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
548 queue_delayed_work_anew(wq, dw, ticks);
549 timer_modified = false;
550 } else {
551 KASSERT(wq0 == wq);
552 switch (dw->dw_state) {
553 case DELAYED_WORK_IDLE:
554 if (wq->wq_current_work != &dw->work) {
555 /* Work is queued, but hasn't started yet. */
556 TAILQ_REMOVE(&wq->wq_queue, &dw->work,
557 work_entry);
558 queue_delayed_work_anew(wq, dw, ticks);
559 timer_modified = true;
560 } else {
561 /*
562 * Too late. Queue it anew. If that
563 * would skip the callout because it's
564 * immediate, notify the workqueue.
565 */
566 wq->wq_requeued = ticks == 0;
567 queue_delayed_work_anew(wq, dw, ticks);
568 timer_modified = false;
569 }
570 break;
571 case DELAYED_WORK_SCHEDULED:
572 if (callout_stop(&dw->dw_callout)) {
573 /*
574 * Too late to stop, but we got in
575 * before the callout acquired the
576 * lock. Reschedule it and tell it
577 * we've done so.
578 */
579 dw->dw_state = DELAYED_WORK_RESCHEDULED;
580 callout_schedule(&dw->dw_callout,
581 MIN(INT_MAX, ticks));
582 } else {
583 /* Stopped it. Queue it anew. */
584 queue_delayed_work_anew(wq, dw, ticks);
585 }
586 timer_modified = true;
587 break;
588 case DELAYED_WORK_RESCHEDULED:
589 case DELAYED_WORK_CANCELLED:
590 /*
591 * Someone modified the timer _again_, or
592 * cancelled it, after the callout started but
593 * before the poor thing even had a chance to
594 * acquire the lock. Just reschedule it once
595 * more.
596 */
597 callout_schedule(&dw->dw_callout, MIN(INT_MAX, ticks));
598 dw->dw_state = DELAYED_WORK_RESCHEDULED;
599 timer_modified = true;
600 break;
601 default:
602 panic("invalid delayed work state: %d",
603 dw->dw_state);
604 }
605 }
606 mutex_exit(&wq->wq_lock);
607
608 return timer_modified;
609 }
610
611 bool
612 cancel_delayed_work(struct delayed_work *dw)
613 {
614 struct workqueue_struct *wq;
615 bool cancelled_p;
616
617 /* If there's no workqueue, nothing to cancel. */
618 if ((wq = dw->work.work_queue) == NULL)
619 return false;
620
621 mutex_enter(&wq->wq_lock);
622 if (__predict_false(dw->work.work_queue != wq)) {
623 cancelled_p = false;
624 } else {
625 switch (dw->dw_state) {
626 case DELAYED_WORK_IDLE:
627 if (wq->wq_current_work == &dw->work) {
628 /* Too late, it's already running. */
629 cancelled_p = false;
630 } else {
631 /* Got in before it started. Remove it. */
632 TAILQ_REMOVE(&wq->wq_queue, &dw->work,
633 work_entry);
634 cancelled_p = true;
635 }
636 break;
637 case DELAYED_WORK_SCHEDULED:
638 case DELAYED_WORK_RESCHEDULED:
639 case DELAYED_WORK_CANCELLED:
640 /*
641 * If it is scheduled, mark it cancelled and
642 * try to stop the callout before it starts.
643 *
644 * If it's too late and the callout has already
645 * begun to execute, tough.
646 *
647 * If we stopped the callout before it started,
648 * however, then destroy the callout and
649 * dissociate it from the workqueue ourselves.
650 */
651 dw->dw_state = DELAYED_WORK_CANCELLED;
652 cancelled_p = true;
653 if (callout_stop(&dw->dw_callout))
654 break;
655 cancel_delayed_work_done(wq, dw);
656 break;
657 default:
658 panic("invalid delayed work state: %d",
659 dw->dw_state);
660 }
661 }
662 mutex_exit(&wq->wq_lock);
663
664 return cancelled_p;
665 }
666
667 bool
668 cancel_delayed_work_sync(struct delayed_work *dw)
669 {
670 struct workqueue_struct *wq;
671 bool cancelled_p;
672
673 /* If there's no workqueue, nothing to cancel. */
674 if ((wq = dw->work.work_queue) == NULL)
675 return false;
676
677 mutex_enter(&wq->wq_lock);
678 if (__predict_false(dw->work.work_queue != wq)) {
679 cancelled_p = false;
680 } else {
681 switch (dw->dw_state) {
682 case DELAYED_WORK_IDLE:
683 if (wq->wq_current_work == &dw->work) {
684 /* Too late, it's already running. Wait. */
685 do {
686 cv_wait(&wq->wq_cv, &wq->wq_lock);
687 } while (wq->wq_current_work == &dw->work);
688 cancelled_p = false;
689 } else {
690 /* Got in before it started. Remove it. */
691 TAILQ_REMOVE(&wq->wq_queue, &dw->work,
692 work_entry);
693 cancelled_p = true;
694 }
695 break;
696 case DELAYED_WORK_SCHEDULED:
697 case DELAYED_WORK_RESCHEDULED:
698 case DELAYED_WORK_CANCELLED:
699 /*
700 * If it is scheduled, mark it cancelled and
701 * try to stop the callout before it starts.
702 *
703 * If it's too late and the callout has already
704 * begun to execute, we must wait for it to
705 * complete. But we got in soon enough to ask
706 * the callout not to run, so we successfully
707 * cancelled it in that case.
708 *
709 * If we stopped the callout before it started,
710 * however, then destroy the callout and
711 * dissociate it from the workqueue ourselves.
712 */
713 dw->dw_state = DELAYED_WORK_CANCELLED;
714 cancelled_p = true;
715 if (callout_halt(&dw->dw_callout, &wq->wq_lock))
716 break;
717 cancel_delayed_work_done(wq, dw);
718 break;
719 default:
720 panic("invalid delayed work state: %d",
721 dw->dw_state);
722 }
723 }
724 mutex_exit(&wq->wq_lock);
725
726 return cancelled_p;
727 }
728
729 /*
731 * Flush
732 */
733
734 void
735 flush_scheduled_work(void)
736 {
737
738 flush_workqueue(system_wq);
739 }
740
741 void
742 flush_workqueue(struct workqueue_struct *wq)
743 {
744 uint64_t gen;
745
746 mutex_enter(&wq->wq_lock);
747 if (wq->wq_current_work || !TAILQ_EMPTY(&wq->wq_queue)) {
748 gen = wq->wq_gen;
749 do {
750 cv_wait(&wq->wq_cv, &wq->wq_lock);
751 } while (gen == wq->wq_gen);
752 }
753 mutex_exit(&wq->wq_lock);
754 }
755
756 bool
757 flush_work(struct work_struct *work)
758 {
759 struct workqueue_struct *wq;
760
761 /* If there's no workqueue, nothing to flush. */
762 if ((wq = work->work_queue) == NULL)
763 return false;
764
765 flush_workqueue(wq);
766 return true;
767 }
768
769 bool
770 flush_delayed_work(struct delayed_work *dw)
771 {
772 struct workqueue_struct *wq;
773 bool do_flush = false;
774
775 /* If there's no workqueue, nothing to flush. */
776 if ((wq = dw->work.work_queue) == NULL)
777 return false;
778
779 mutex_enter(&wq->wq_lock);
780 if (__predict_false(dw->work.work_queue != wq)) {
781 do_flush = true;
782 } else {
783 retry: switch (dw->dw_state) {
784 case DELAYED_WORK_IDLE:
785 if (wq->wq_current_work != &dw->work) {
786 TAILQ_REMOVE(&wq->wq_queue, &dw->work,
787 work_entry);
788 } else {
789 do_flush = true;
790 }
791 break;
792 case DELAYED_WORK_SCHEDULED:
793 case DELAYED_WORK_RESCHEDULED:
794 case DELAYED_WORK_CANCELLED:
795 dw->dw_state = DELAYED_WORK_CANCELLED;
796 callout_halt(&dw->dw_callout, &wq->wq_lock);
797 goto retry;
798 default:
799 panic("invalid delayed work state: %d",
800 dw->dw_state);
801 }
802 }
803 mutex_exit(&wq->wq_lock);
804
805 if (do_flush)
806 flush_workqueue(wq);
807
808 return true;
809 }
810