linux_work.c revision 1.28 1 /* $NetBSD: linux_work.c,v 1.28 2018/08/27 15:03:20 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2018 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_work.c,v 1.28 2018/08/27 15:03:20 riastradh Exp $");
34
35 #include <sys/types.h>
36 #include <sys/atomic.h>
37 #include <sys/callout.h>
38 #include <sys/condvar.h>
39 #include <sys/errno.h>
40 #include <sys/kmem.h>
41 #include <sys/kthread.h>
42 #include <sys/lwp.h>
43 #include <sys/mutex.h>
44 #include <sys/queue.h>
45
46 #include <linux/workqueue.h>
47
48 struct workqueue_struct {
49 kmutex_t wq_lock;
50 kcondvar_t wq_cv;
51 TAILQ_HEAD(, delayed_work) wq_delayed;
52 TAILQ_HEAD(, work_struct) wq_queue;
53 struct work_struct *wq_current_work;
54 int wq_flags;
55 struct lwp *wq_lwp;
56 uint64_t wq_gen;
57 bool wq_requeued:1;
58 bool wq_dying:1;
59 };
60
61 static void __dead linux_workqueue_thread(void *);
62 static void linux_workqueue_timeout(void *);
63 static struct workqueue_struct *
64 acquire_work(struct work_struct *,
65 struct workqueue_struct *);
66 static void release_work(struct work_struct *,
67 struct workqueue_struct *);
68 static void queue_delayed_work_anew(struct workqueue_struct *,
69 struct delayed_work *, unsigned long);
70 static void cancel_delayed_work_done(struct workqueue_struct *,
71 struct delayed_work *);
72
73 static specificdata_key_t workqueue_key __read_mostly;
74
75 struct workqueue_struct *system_wq __read_mostly;
76 struct workqueue_struct *system_long_wq __read_mostly;
77 struct workqueue_struct *system_power_efficient_wq __read_mostly;
78
79 int
80 linux_workqueue_init(void)
81 {
82 int error;
83
84 error = lwp_specific_key_create(&workqueue_key, NULL);
85 if (error)
86 goto fail0;
87
88 system_wq = alloc_ordered_workqueue("lnxsyswq", 0);
89 if (system_wq == NULL) {
90 error = ENOMEM;
91 goto fail1;
92 }
93
94 system_long_wq = alloc_ordered_workqueue("lnxlngwq", 0);
95 if (system_long_wq == NULL) {
96 error = ENOMEM;
97 goto fail2;
98 }
99
100 system_power_efficient_wq = alloc_ordered_workqueue("lnxpwrwq", 0);
101 if (system_long_wq == NULL) {
102 error = ENOMEM;
103 goto fail3;
104 }
105
106 return 0;
107
108 fail4: __unused
109 destroy_workqueue(system_power_efficient_wq);
110 fail3: destroy_workqueue(system_long_wq);
111 fail2: destroy_workqueue(system_wq);
112 fail1: lwp_specific_key_delete(workqueue_key);
113 fail0: KASSERT(error);
114 return error;
115 }
116
117 void
118 linux_workqueue_fini(void)
119 {
120
121 destroy_workqueue(system_power_efficient_wq);
122 destroy_workqueue(system_long_wq);
123 destroy_workqueue(system_wq);
124 lwp_specific_key_delete(workqueue_key);
125 }
126
127 /*
129 * Workqueues
130 */
131
132 struct workqueue_struct *
133 alloc_ordered_workqueue(const char *name, int flags)
134 {
135 struct workqueue_struct *wq;
136 int error;
137
138 KASSERT(flags == 0);
139
140 wq = kmem_zalloc(sizeof(*wq), KM_SLEEP);
141
142 mutex_init(&wq->wq_lock, MUTEX_DEFAULT, IPL_NONE);
143 cv_init(&wq->wq_cv, name);
144 TAILQ_INIT(&wq->wq_delayed);
145 TAILQ_INIT(&wq->wq_queue);
146 wq->wq_current_work = NULL;
147 wq->wq_flags = 0;
148 wq->wq_lwp = NULL;
149 wq->wq_gen = 0;
150 wq->wq_requeued = false;
151 wq->wq_dying = false;
152
153 error = kthread_create(PRI_NONE,
154 KTHREAD_MPSAFE|KTHREAD_TS|KTHREAD_MUSTJOIN, NULL,
155 &linux_workqueue_thread, wq, &wq->wq_lwp, "%s", name);
156 if (error)
157 goto fail0;
158
159 return wq;
160
161 fail0: KASSERT(TAILQ_EMPTY(&wq->wq_queue));
162 KASSERT(TAILQ_EMPTY(&wq->wq_delayed));
163 cv_destroy(&wq->wq_cv);
164 mutex_destroy(&wq->wq_lock);
165 kmem_free(wq, sizeof(*wq));
166 return NULL;
167 }
168
169 void
170 destroy_workqueue(struct workqueue_struct *wq)
171 {
172
173 /*
174 * Cancel all delayed work. We do this first because any
175 * delayed work that that has already timed out, which we can't
176 * cancel, may have queued new work.
177 */
178 mutex_enter(&wq->wq_lock);
179 while (!TAILQ_EMPTY(&wq->wq_delayed)) {
180 struct delayed_work *const dw = TAILQ_FIRST(&wq->wq_delayed);
181
182 KASSERT(dw->work.work_queue == wq);
183 KASSERTMSG((dw->dw_state == DELAYED_WORK_SCHEDULED ||
184 dw->dw_state == DELAYED_WORK_RESCHEDULED ||
185 dw->dw_state == DELAYED_WORK_CANCELLED),
186 "delayed work %p in bad state: %d",
187 dw, dw->dw_state);
188
189 /*
190 * Mark it cancelled and try to stop the callout before
191 * it starts.
192 *
193 * If it's too late and the callout has already begun
194 * to execute, then it will notice that we asked to
195 * cancel it and remove itself from the queue before
196 * returning.
197 *
198 * If we stopped the callout before it started,
199 * however, then we can safely destroy the callout and
200 * dissociate it from the workqueue ourselves.
201 */
202 dw->dw_state = DELAYED_WORK_CANCELLED;
203 if (!callout_halt(&dw->dw_callout, &wq->wq_lock))
204 cancel_delayed_work_done(wq, dw);
205 }
206 mutex_exit(&wq->wq_lock);
207
208 /*
209 * At this point, no new work can be put on the queue.
210 */
211
212 /* Tell the thread to exit. */
213 mutex_enter(&wq->wq_lock);
214 wq->wq_dying = true;
215 cv_broadcast(&wq->wq_cv);
216 mutex_exit(&wq->wq_lock);
217
218 /* Wait for it to exit. */
219 (void)kthread_join(wq->wq_lwp);
220
221 KASSERT(wq->wq_dying);
222 KASSERT(!wq->wq_requeued);
223 KASSERT(wq->wq_flags == 0);
224 KASSERT(wq->wq_current_work == NULL);
225 KASSERT(TAILQ_EMPTY(&wq->wq_queue));
226 KASSERT(TAILQ_EMPTY(&wq->wq_delayed));
227 cv_destroy(&wq->wq_cv);
228 mutex_destroy(&wq->wq_lock);
229
230 kmem_free(wq, sizeof(*wq));
231 }
232
233 /*
235 * Work thread and callout
236 */
237
238 static void __dead
239 linux_workqueue_thread(void *cookie)
240 {
241 struct workqueue_struct *const wq = cookie;
242 TAILQ_HEAD(, work_struct) tmp;
243
244 lwp_setspecific(workqueue_key, wq);
245
246 mutex_enter(&wq->wq_lock);
247 for (;;) {
248 /*
249 * Wait until there's activity. If there's no work and
250 * we're dying, stop here.
251 */
252 while (TAILQ_EMPTY(&wq->wq_queue) && !wq->wq_dying)
253 cv_wait(&wq->wq_cv, &wq->wq_lock);
254 if (TAILQ_EMPTY(&wq->wq_queue)) {
255 KASSERT(wq->wq_dying);
256 break;
257 }
258
259 /* Grab a batch of work off the queue. */
260 KASSERT(!TAILQ_EMPTY(&wq->wq_queue));
261 TAILQ_INIT(&tmp);
262 TAILQ_CONCAT(&tmp, &wq->wq_queue, work_entry);
263
264 /* Process each work item in the batch. */
265 while (!TAILQ_EMPTY(&tmp)) {
266 struct work_struct *const work = TAILQ_FIRST(&tmp);
267
268 KASSERT(work->work_queue == wq);
269 TAILQ_REMOVE(&tmp, work, work_entry);
270 KASSERT(wq->wq_current_work == NULL);
271 wq->wq_current_work = work;
272
273 mutex_exit(&wq->wq_lock);
274 (*work->func)(work);
275 mutex_enter(&wq->wq_lock);
276
277 KASSERT(wq->wq_current_work == work);
278 KASSERT(work->work_queue == wq);
279 if (wq->wq_requeued)
280 wq->wq_requeued = false;
281 else
282 release_work(work, wq);
283 wq->wq_current_work = NULL;
284 cv_broadcast(&wq->wq_cv);
285 }
286
287 /* Notify flush that we've completed a batch of work. */
288 wq->wq_gen++;
289 cv_broadcast(&wq->wq_cv);
290 }
291 mutex_exit(&wq->wq_lock);
292
293 kthread_exit(0);
294 }
295
296 static void
297 linux_workqueue_timeout(void *cookie)
298 {
299 struct delayed_work *const dw = cookie;
300 struct workqueue_struct *const wq = dw->work.work_queue;
301
302 KASSERT(wq != NULL);
303
304 mutex_enter(&wq->wq_lock);
305 KASSERT(dw->work.work_queue == wq);
306 switch (dw->dw_state) {
307 case DELAYED_WORK_IDLE:
308 panic("delayed work callout uninitialized: %p", dw);
309 case DELAYED_WORK_SCHEDULED:
310 dw->dw_state = DELAYED_WORK_IDLE;
311 callout_destroy(&dw->dw_callout);
312 TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
313 TAILQ_INSERT_TAIL(&wq->wq_queue, &dw->work, work_entry);
314 cv_broadcast(&wq->wq_cv);
315 break;
316 case DELAYED_WORK_RESCHEDULED:
317 dw->dw_state = DELAYED_WORK_SCHEDULED;
318 break;
319 case DELAYED_WORK_CANCELLED:
320 cancel_delayed_work_done(wq, dw);
321 /* Can't touch dw any more. */
322 goto out;
323 default:
324 panic("delayed work callout in bad state: %p", dw);
325 }
326 KASSERT(dw->dw_state == DELAYED_WORK_IDLE ||
327 dw->dw_state == DELAYED_WORK_SCHEDULED);
328 out: mutex_exit(&wq->wq_lock);
329 }
330
331 struct work_struct *
332 current_work(void)
333 {
334 struct workqueue_struct *wq = lwp_getspecific(workqueue_key);
335
336 /* If we're not a workqueue thread, then there's no work. */
337 if (wq == NULL)
338 return NULL;
339
340 /*
341 * Otherwise, this should be possible only while work is in
342 * progress. Return the current work item.
343 */
344 KASSERT(wq->wq_current_work != NULL);
345 return wq->wq_current_work;
346 }
347
348 /*
350 * Work
351 */
352
353 void
354 INIT_WORK(struct work_struct *work, void (*fn)(struct work_struct *))
355 {
356
357 work->work_queue = NULL;
358 work->func = fn;
359 }
360
361 static struct workqueue_struct *
362 acquire_work(struct work_struct *work, struct workqueue_struct *wq)
363 {
364 struct workqueue_struct *wq0;
365
366 KASSERT(mutex_owned(&wq->wq_lock));
367
368 wq0 = atomic_cas_ptr(&work->work_queue, NULL, wq);
369 if (wq0 == NULL) {
370 membar_enter();
371 KASSERT(work->work_queue == wq);
372 }
373
374 return wq0;
375 }
376
377 static void
378 release_work(struct work_struct *work, struct workqueue_struct *wq)
379 {
380
381 KASSERT(work->work_queue == wq);
382 KASSERT(mutex_owned(&wq->wq_lock));
383
384 membar_exit();
385 work->work_queue = NULL;
386 }
387
388 bool
389 schedule_work(struct work_struct *work)
390 {
391
392 return queue_work(system_wq, work);
393 }
394
395 bool
396 queue_work(struct workqueue_struct *wq, struct work_struct *work)
397 {
398 struct workqueue_struct *wq0;
399 bool newly_queued;
400
401 KASSERT(wq != NULL);
402
403 mutex_enter(&wq->wq_lock);
404 if (__predict_true((wq0 = acquire_work(work, wq)) == NULL)) {
405 TAILQ_INSERT_TAIL(&wq->wq_queue, work, work_entry);
406 newly_queued = true;
407 } else {
408 KASSERT(wq0 == wq);
409 newly_queued = false;
410 }
411 mutex_exit(&wq->wq_lock);
412
413 return newly_queued;
414 }
415
416 bool
417 cancel_work(struct work_struct *work)
418 {
419 struct workqueue_struct *wq;
420 bool cancelled_p = false;
421
422 /* If there's no workqueue, nothing to cancel. */
423 if ((wq = work->work_queue) == NULL)
424 goto out;
425
426 mutex_enter(&wq->wq_lock);
427 if (__predict_false(work->work_queue != wq)) {
428 cancelled_p = false;
429 } else if (wq->wq_current_work == work) {
430 cancelled_p = false;
431 } else {
432 TAILQ_REMOVE(&wq->wq_queue, work, work_entry);
433 cancelled_p = true;
434 }
435 mutex_exit(&wq->wq_lock);
436
437 out: return cancelled_p;
438 }
439
440 bool
441 cancel_work_sync(struct work_struct *work)
442 {
443 struct workqueue_struct *wq;
444 bool cancelled_p = false;
445
446 /* If there's no workqueue, nothing to cancel. */
447 if ((wq = work->work_queue) == NULL)
448 goto out;
449
450 mutex_enter(&wq->wq_lock);
451 if (__predict_false(work->work_queue != wq)) {
452 cancelled_p = false;
453 } else if (wq->wq_current_work == work) {
454 do {
455 cv_wait(&wq->wq_cv, &wq->wq_lock);
456 } while (wq->wq_current_work == work);
457 cancelled_p = false;
458 } else {
459 TAILQ_REMOVE(&wq->wq_queue, work, work_entry);
460 cancelled_p = true;
461 }
462 mutex_exit(&wq->wq_lock);
463
464 out: return cancelled_p;
465 }
466
467 /*
469 * Delayed work
470 */
471
472 void
473 INIT_DELAYED_WORK(struct delayed_work *dw, void (*fn)(struct work_struct *))
474 {
475
476 INIT_WORK(&dw->work, fn);
477 dw->dw_state = DELAYED_WORK_IDLE;
478
479 /*
480 * Defer callout_init until we are going to schedule the
481 * callout, which can then callout_destroy it, because
482 * otherwise since there's no DESTROY_DELAYED_WORK or anything
483 * we have no opportunity to call callout_destroy.
484 */
485 }
486
487 bool
488 schedule_delayed_work(struct delayed_work *dw, unsigned long ticks)
489 {
490
491 return queue_delayed_work(system_wq, dw, ticks);
492 }
493
494 static void
495 queue_delayed_work_anew(struct workqueue_struct *wq, struct delayed_work *dw,
496 unsigned long ticks)
497 {
498
499 KASSERT(mutex_owned(&wq->wq_lock));
500 KASSERT(dw->work.work_queue == wq);
501 KASSERT((dw->dw_state == DELAYED_WORK_IDLE) ||
502 (dw->dw_state == DELAYED_WORK_SCHEDULED));
503
504 if (ticks == 0) {
505 if (dw->dw_state == DELAYED_WORK_SCHEDULED) {
506 callout_destroy(&dw->dw_callout);
507 TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
508 } else {
509 KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
510 }
511 TAILQ_INSERT_TAIL(&wq->wq_queue, &dw->work, work_entry);
512 dw->dw_state = DELAYED_WORK_IDLE;
513 } else {
514 if (dw->dw_state == DELAYED_WORK_IDLE) {
515 callout_init(&dw->dw_callout, CALLOUT_MPSAFE);
516 callout_reset(&dw->dw_callout, MIN(INT_MAX, ticks),
517 &linux_workqueue_timeout, dw);
518 TAILQ_INSERT_HEAD(&wq->wq_delayed, dw, dw_entry);
519 } else {
520 KASSERT(dw->dw_state == DELAYED_WORK_SCHEDULED);
521 }
522 dw->dw_state = DELAYED_WORK_SCHEDULED;
523 }
524 }
525
526 static void
527 cancel_delayed_work_done(struct workqueue_struct *wq, struct delayed_work *dw)
528 {
529
530 KASSERT(mutex_owned(&wq->wq_lock));
531 KASSERT(dw->work.work_queue == wq);
532 KASSERT(dw->dw_state == DELAYED_WORK_CANCELLED);
533 dw->dw_state = DELAYED_WORK_IDLE;
534 callout_destroy(&dw->dw_callout);
535 TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
536 release_work(&dw->work, wq);
537 /* Can't touch dw after this point. */
538 }
539
540 bool
541 queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw,
542 unsigned long ticks)
543 {
544 struct workqueue_struct *wq0;
545 bool newly_queued;
546
547 mutex_enter(&wq->wq_lock);
548 if (__predict_true((wq0 = acquire_work(&dw->work, wq)) == NULL)) {
549 KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
550 queue_delayed_work_anew(wq, dw, ticks);
551 newly_queued = true;
552 } else {
553 KASSERT(wq0 == wq);
554 newly_queued = false;
555 }
556 mutex_exit(&wq->wq_lock);
557
558 return newly_queued;
559 }
560
561 bool
562 mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw,
563 unsigned long ticks)
564 {
565 struct workqueue_struct *wq0;
566 bool timer_modified;
567
568 mutex_enter(&wq->wq_lock);
569 if ((wq0 = acquire_work(&dw->work, wq)) == NULL) {
570 KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
571 queue_delayed_work_anew(wq, dw, ticks);
572 timer_modified = false;
573 } else {
574 KASSERT(wq0 == wq);
575 switch (dw->dw_state) {
576 case DELAYED_WORK_IDLE:
577 if (wq->wq_current_work == &dw->work) {
578 /*
579 * Too late. Queue it anew. If that
580 * would skip the callout because it's
581 * immediate, notify the workqueue.
582 */
583 wq->wq_requeued = ticks == 0;
584 queue_delayed_work_anew(wq, dw, ticks);
585 timer_modified = false;
586 } else {
587 /* Work is queued, but hasn't started yet. */
588 TAILQ_REMOVE(&wq->wq_queue, &dw->work,
589 work_entry);
590 queue_delayed_work_anew(wq, dw, ticks);
591 timer_modified = true;
592 }
593 break;
594 case DELAYED_WORK_SCHEDULED:
595 if (callout_stop(&dw->dw_callout)) {
596 /*
597 * Too late to stop, but we got in
598 * before the callout acquired the
599 * lock. Reschedule it and tell it
600 * we've done so.
601 */
602 dw->dw_state = DELAYED_WORK_RESCHEDULED;
603 callout_schedule(&dw->dw_callout,
604 MIN(INT_MAX, ticks));
605 } else {
606 /* Stopped it. Queue it anew. */
607 queue_delayed_work_anew(wq, dw, ticks);
608 }
609 timer_modified = true;
610 break;
611 case DELAYED_WORK_RESCHEDULED:
612 case DELAYED_WORK_CANCELLED:
613 /*
614 * Someone modified the timer _again_, or
615 * cancelled it, after the callout started but
616 * before the poor thing even had a chance to
617 * acquire the lock. Just reschedule it once
618 * more.
619 */
620 callout_schedule(&dw->dw_callout, MIN(INT_MAX, ticks));
621 dw->dw_state = DELAYED_WORK_RESCHEDULED;
622 timer_modified = true;
623 break;
624 default:
625 panic("invalid delayed work state: %d",
626 dw->dw_state);
627 }
628 }
629 mutex_exit(&wq->wq_lock);
630
631 return timer_modified;
632 }
633
634 bool
635 cancel_delayed_work(struct delayed_work *dw)
636 {
637 struct workqueue_struct *wq;
638 bool cancelled_p;
639
640 /* If there's no workqueue, nothing to cancel. */
641 if ((wq = dw->work.work_queue) == NULL)
642 return false;
643
644 mutex_enter(&wq->wq_lock);
645 if (__predict_false(dw->work.work_queue != wq)) {
646 cancelled_p = false;
647 } else {
648 switch (dw->dw_state) {
649 case DELAYED_WORK_IDLE:
650 if (wq->wq_current_work == &dw->work) {
651 /* Too late, it's already running. */
652 cancelled_p = false;
653 } else {
654 /* Got in before it started. Remove it. */
655 TAILQ_REMOVE(&wq->wq_queue, &dw->work,
656 work_entry);
657 cancelled_p = true;
658 }
659 break;
660 case DELAYED_WORK_SCHEDULED:
661 case DELAYED_WORK_RESCHEDULED:
662 case DELAYED_WORK_CANCELLED:
663 /*
664 * If it is scheduled, mark it cancelled and
665 * try to stop the callout before it starts.
666 *
667 * If it's too late and the callout has already
668 * begun to execute, tough.
669 *
670 * If we stopped the callout before it started,
671 * however, then destroy the callout and
672 * dissociate it from the workqueue ourselves.
673 */
674 dw->dw_state = DELAYED_WORK_CANCELLED;
675 cancelled_p = true;
676 if (!callout_stop(&dw->dw_callout))
677 cancel_delayed_work_done(wq, dw);
678 break;
679 default:
680 panic("invalid delayed work state: %d",
681 dw->dw_state);
682 }
683 }
684 mutex_exit(&wq->wq_lock);
685
686 return cancelled_p;
687 }
688
689 bool
690 cancel_delayed_work_sync(struct delayed_work *dw)
691 {
692 struct workqueue_struct *wq;
693 bool cancelled_p;
694
695 /* If there's no workqueue, nothing to cancel. */
696 if ((wq = dw->work.work_queue) == NULL)
697 return false;
698
699 mutex_enter(&wq->wq_lock);
700 if (__predict_false(dw->work.work_queue != wq)) {
701 cancelled_p = false;
702 } else {
703 switch (dw->dw_state) {
704 case DELAYED_WORK_IDLE:
705 if (wq->wq_current_work == &dw->work) {
706 /* Too late, it's already running. Wait. */
707 do {
708 cv_wait(&wq->wq_cv, &wq->wq_lock);
709 } while (wq->wq_current_work == &dw->work);
710 cancelled_p = false;
711 } else {
712 /* Got in before it started. Remove it. */
713 TAILQ_REMOVE(&wq->wq_queue, &dw->work,
714 work_entry);
715 cancelled_p = true;
716 }
717 break;
718 case DELAYED_WORK_SCHEDULED:
719 case DELAYED_WORK_RESCHEDULED:
720 case DELAYED_WORK_CANCELLED:
721 /*
722 * If it is scheduled, mark it cancelled and
723 * try to stop the callout before it starts.
724 *
725 * If it's too late and the callout has already
726 * begun to execute, we must wait for it to
727 * complete. But we got in soon enough to ask
728 * the callout not to run, so we successfully
729 * cancelled it in that case.
730 *
731 * If we stopped the callout before it started,
732 * however, then destroy the callout and
733 * dissociate it from the workqueue ourselves.
734 */
735 dw->dw_state = DELAYED_WORK_CANCELLED;
736 cancelled_p = true;
737 if (!callout_halt(&dw->dw_callout, &wq->wq_lock))
738 cancel_delayed_work_done(wq, dw);
739 break;
740 default:
741 panic("invalid delayed work state: %d",
742 dw->dw_state);
743 }
744 }
745 mutex_exit(&wq->wq_lock);
746
747 return cancelled_p;
748 }
749
750 /*
752 * Flush
753 */
754
755 void
756 flush_scheduled_work(void)
757 {
758
759 flush_workqueue(system_wq);
760 }
761
762 static void
763 flush_workqueue_locked(struct workqueue_struct *wq)
764 {
765 uint64_t gen;
766
767 KASSERT(mutex_owned(&wq->wq_lock));
768
769 /* Get the current generation number. */
770 gen = wq->wq_gen;
771
772 /*
773 * If there's a batch of work in progress, we must wait for the
774 * worker thread to finish that batch.
775 */
776 if (wq->wq_current_work != NULL)
777 gen++;
778
779 /*
780 * If there's any work yet to be claimed from the queue by the
781 * worker thread, we must wait for it to finish one more batch
782 * too.
783 */
784 if (!TAILQ_EMPTY(&wq->wq_queue))
785 gen++;
786
787 /* Wait until the generation number has caught up. */
788 while (wq->wq_gen < gen)
789 cv_wait(&wq->wq_cv, &wq->wq_lock);
790 }
791
792 void
793 flush_workqueue(struct workqueue_struct *wq)
794 {
795
796 mutex_enter(&wq->wq_lock);
797 flush_workqueue_locked(wq);
798 mutex_exit(&wq->wq_lock);
799 }
800
801 void
802 flush_work(struct work_struct *work)
803 {
804 struct workqueue_struct *wq;
805
806 /* If there's no workqueue, nothing to flush. */
807 if ((wq = work->work_queue) == NULL)
808 return;
809
810 flush_workqueue(wq);
811 }
812
813 void
814 flush_delayed_work(struct delayed_work *dw)
815 {
816 struct workqueue_struct *wq;
817
818 /* If there's no workqueue, nothing to flush. */
819 if ((wq = dw->work.work_queue) == NULL)
820 return;
821
822 mutex_enter(&wq->wq_lock);
823 if (__predict_true(dw->work.work_queue == wq)) {
824 switch (dw->dw_state) {
825 case DELAYED_WORK_IDLE:
826 /*
827 * It has a workqueue assigned and the callout
828 * is idle, so it must be in progress or on the
829 * queue. In that case, wait for it to
830 * complete. Waiting for the whole queue to
831 * flush is overkill, but doesn't hurt.
832 */
833 flush_workqueue_locked(wq);
834 break;
835 case DELAYED_WORK_SCHEDULED:
836 case DELAYED_WORK_RESCHEDULED:
837 case DELAYED_WORK_CANCELLED:
838 /*
839 * The callout is still scheduled to run.
840 * Notify it that we are cancelling, and try to
841 * stop the callout before it runs.
842 *
843 * If we do stop the callout, we are now
844 * responsible for dissociating the work from
845 * the queue.
846 *
847 * Otherwise, wait for it to complete and
848 * dissociate itself -- it will not put itself
849 * on the workqueue once it is cancelled.
850 */
851 dw->dw_state = DELAYED_WORK_CANCELLED;
852 if (!callout_halt(&dw->dw_callout, &wq->wq_lock))
853 cancel_delayed_work_done(wq, dw);
854 default:
855 panic("invalid delayed work state: %d",
856 dw->dw_state);
857 }
858 }
859 mutex_exit(&wq->wq_lock);
860 }
861