linux_work.c revision 1.1.24.1 1 /* $NetBSD: linux_work.c,v 1.1.24.1 2018/09/06 06:56:08 pgoyette Exp $ */
2
3 /*-
4 * Copyright (c) 2018 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_work.c,v 1.1.24.1 2018/09/06 06:56:08 pgoyette Exp $");
34
35 #include <sys/types.h>
36 #include <sys/atomic.h>
37 #include <sys/callout.h>
38 #include <sys/condvar.h>
39 #include <sys/errno.h>
40 #include <sys/kmem.h>
41 #include <sys/kthread.h>
42 #include <sys/lwp.h>
43 #include <sys/mutex.h>
44 #include <sys/queue.h>
45 #include <sys/sdt.h>
46
47 #include <linux/workqueue.h>
48
49 TAILQ_HEAD(work_head, work_struct);
50 TAILQ_HEAD(dwork_head, delayed_work);
51
52 struct workqueue_struct {
53 kmutex_t wq_lock;
54 kcondvar_t wq_cv;
55 struct dwork_head wq_delayed; /* delayed work scheduled */
56 struct work_head wq_queue; /* work to run */
57 struct work_head wq_dqueue; /* delayed work to run now */
58 struct work_struct *wq_current_work;
59 int wq_flags;
60 bool wq_dying;
61 uint64_t wq_gen;
62 struct lwp *wq_lwp;
63 };
64
65 static void __dead linux_workqueue_thread(void *);
66 static void linux_workqueue_timeout(void *);
67 static bool work_claimed(struct work_struct *,
68 struct workqueue_struct *);
69 static struct workqueue_struct *
70 work_queue(struct work_struct *);
71 static bool acquire_work(struct work_struct *,
72 struct workqueue_struct *);
73 static void release_work(struct work_struct *,
74 struct workqueue_struct *);
75 static void wait_for_current_work(struct work_struct *,
76 struct workqueue_struct *);
77 static void dw_callout_init(struct workqueue_struct *,
78 struct delayed_work *);
79 static void dw_callout_destroy(struct workqueue_struct *,
80 struct delayed_work *);
81 static void cancel_delayed_work_done(struct workqueue_struct *,
82 struct delayed_work *);
83
84 SDT_PROBE_DEFINE2(sdt, linux, work, acquire,
85 "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/);
86 SDT_PROBE_DEFINE2(sdt, linux, work, release,
87 "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/);
88 SDT_PROBE_DEFINE2(sdt, linux, work, queue,
89 "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/);
90 SDT_PROBE_DEFINE2(sdt, linux, work, cancel,
91 "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/);
92 SDT_PROBE_DEFINE3(sdt, linux, work, schedule,
93 "struct delayed_work *"/*dw*/, "struct workqueue_struct *"/*wq*/,
94 "unsigned long"/*ticks*/);
95 SDT_PROBE_DEFINE2(sdt, linux, work, timer,
96 "struct delayed_work *"/*dw*/, "struct workqueue_struct *"/*wq*/);
97 SDT_PROBE_DEFINE2(sdt, linux, work, wait__start,
98 "struct delayed_work *"/*dw*/, "struct workqueue_struct *"/*wq*/);
99 SDT_PROBE_DEFINE2(sdt, linux, work, wait__done,
100 "struct delayed_work *"/*dw*/, "struct workqueue_struct *"/*wq*/);
101 SDT_PROBE_DEFINE2(sdt, linux, work, run,
102 "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/);
103 SDT_PROBE_DEFINE2(sdt, linux, work, done,
104 "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/);
105 SDT_PROBE_DEFINE1(sdt, linux, work, batch__start,
106 "struct workqueue_struct *"/*wq*/);
107 SDT_PROBE_DEFINE1(sdt, linux, work, batch__done,
108 "struct workqueue_struct *"/*wq*/);
109 SDT_PROBE_DEFINE1(sdt, linux, work, flush__start,
110 "struct workqueue_struct *"/*wq*/);
111 SDT_PROBE_DEFINE1(sdt, linux, work, flush__done,
112 "struct workqueue_struct *"/*wq*/);
113
114 static specificdata_key_t workqueue_key __read_mostly;
115
116 struct workqueue_struct *system_wq __read_mostly;
117 struct workqueue_struct *system_long_wq __read_mostly;
118 struct workqueue_struct *system_power_efficient_wq __read_mostly;
119
120 static inline uintptr_t
121 atomic_cas_uintptr(volatile uintptr_t *p, uintptr_t old, uintptr_t new)
122 {
123
124 return (uintptr_t)atomic_cas_ptr(p, (void *)old, (void *)new);
125 }
126
127 /*
128 * linux_workqueue_init()
129 *
130 * Initialize the Linux workqueue subsystem. Return 0 on success,
131 * NetBSD error on failure.
132 */
133 int
134 linux_workqueue_init(void)
135 {
136 int error;
137
138 error = lwp_specific_key_create(&workqueue_key, NULL);
139 if (error)
140 goto fail0;
141
142 system_wq = alloc_ordered_workqueue("lnxsyswq", 0);
143 if (system_wq == NULL) {
144 error = ENOMEM;
145 goto fail1;
146 }
147
148 system_long_wq = alloc_ordered_workqueue("lnxlngwq", 0);
149 if (system_long_wq == NULL) {
150 error = ENOMEM;
151 goto fail2;
152 }
153
154 system_power_efficient_wq = alloc_ordered_workqueue("lnxpwrwq", 0);
155 if (system_long_wq == NULL) {
156 error = ENOMEM;
157 goto fail3;
158 }
159
160 return 0;
161
162 fail4: __unused
163 destroy_workqueue(system_power_efficient_wq);
164 fail3: destroy_workqueue(system_long_wq);
165 fail2: destroy_workqueue(system_wq);
166 fail1: lwp_specific_key_delete(workqueue_key);
167 fail0: KASSERT(error);
168 return error;
169 }
170
171 /*
172 * linux_workqueue_fini()
173 *
174 * Destroy the Linux workqueue subsystem. Never fails.
175 */
176 void
177 linux_workqueue_fini(void)
178 {
179
180 destroy_workqueue(system_power_efficient_wq);
181 destroy_workqueue(system_long_wq);
182 destroy_workqueue(system_wq);
183 lwp_specific_key_delete(workqueue_key);
184 }
185
186 /*
188 * Workqueues
189 */
190
191 /*
192 * alloc_ordered_workqueue(name, flags)
193 *
194 * Create a workqueue of the given name. No flags are currently
195 * defined. Return NULL on failure, pointer to struct
196 * workqueue_struct object on success.
197 */
198 struct workqueue_struct *
199 alloc_ordered_workqueue(const char *name, int flags)
200 {
201 struct workqueue_struct *wq;
202 int error;
203
204 KASSERT(flags == 0);
205
206 wq = kmem_zalloc(sizeof(*wq), KM_SLEEP);
207
208 mutex_init(&wq->wq_lock, MUTEX_DEFAULT, IPL_VM);
209 cv_init(&wq->wq_cv, name);
210 TAILQ_INIT(&wq->wq_delayed);
211 TAILQ_INIT(&wq->wq_queue);
212 TAILQ_INIT(&wq->wq_dqueue);
213 wq->wq_current_work = NULL;
214 wq->wq_flags = 0;
215 wq->wq_dying = false;
216 wq->wq_gen = 0;
217 wq->wq_lwp = NULL;
218
219 error = kthread_create(PRI_NONE,
220 KTHREAD_MPSAFE|KTHREAD_TS|KTHREAD_MUSTJOIN, NULL,
221 &linux_workqueue_thread, wq, &wq->wq_lwp, "%s", name);
222 if (error)
223 goto fail0;
224
225 return wq;
226
227 fail0: KASSERT(TAILQ_EMPTY(&wq->wq_dqueue));
228 KASSERT(TAILQ_EMPTY(&wq->wq_queue));
229 KASSERT(TAILQ_EMPTY(&wq->wq_delayed));
230 cv_destroy(&wq->wq_cv);
231 mutex_destroy(&wq->wq_lock);
232 kmem_free(wq, sizeof(*wq));
233 return NULL;
234 }
235
236 /*
237 * destroy_workqueue(wq)
238 *
239 * Destroy a workqueue created with wq. Cancel any pending
240 * delayed work. Wait for all queued work to complete.
241 *
242 * May sleep.
243 */
244 void
245 destroy_workqueue(struct workqueue_struct *wq)
246 {
247
248 /*
249 * Cancel all delayed work. We do this first because any
250 * delayed work that that has already timed out, which we can't
251 * cancel, may have queued new work.
252 */
253 mutex_enter(&wq->wq_lock);
254 while (!TAILQ_EMPTY(&wq->wq_delayed)) {
255 struct delayed_work *const dw = TAILQ_FIRST(&wq->wq_delayed);
256
257 KASSERT(work_queue(&dw->work) == wq);
258 KASSERTMSG((dw->dw_state == DELAYED_WORK_SCHEDULED ||
259 dw->dw_state == DELAYED_WORK_RESCHEDULED ||
260 dw->dw_state == DELAYED_WORK_CANCELLED),
261 "delayed work %p in bad state: %d",
262 dw, dw->dw_state);
263
264 /*
265 * Mark it cancelled and try to stop the callout before
266 * it starts.
267 *
268 * If it's too late and the callout has already begun
269 * to execute, then it will notice that we asked to
270 * cancel it and remove itself from the queue before
271 * returning.
272 *
273 * If we stopped the callout before it started,
274 * however, then we can safely destroy the callout and
275 * dissociate it from the workqueue ourselves.
276 */
277 SDT_PROBE2(sdt, linux, work, cancel, &dw->work, wq);
278 dw->dw_state = DELAYED_WORK_CANCELLED;
279 if (!callout_halt(&dw->dw_callout, &wq->wq_lock))
280 cancel_delayed_work_done(wq, dw);
281 }
282 mutex_exit(&wq->wq_lock);
283
284 /*
285 * At this point, no new work can be put on the queue.
286 */
287
288 /* Tell the thread to exit. */
289 mutex_enter(&wq->wq_lock);
290 wq->wq_dying = true;
291 cv_broadcast(&wq->wq_cv);
292 mutex_exit(&wq->wq_lock);
293
294 /* Wait for it to exit. */
295 (void)kthread_join(wq->wq_lwp);
296
297 KASSERT(wq->wq_dying);
298 KASSERT(wq->wq_flags == 0);
299 KASSERT(wq->wq_current_work == NULL);
300 KASSERT(TAILQ_EMPTY(&wq->wq_dqueue));
301 KASSERT(TAILQ_EMPTY(&wq->wq_queue));
302 KASSERT(TAILQ_EMPTY(&wq->wq_delayed));
303 cv_destroy(&wq->wq_cv);
304 mutex_destroy(&wq->wq_lock);
305
306 kmem_free(wq, sizeof(*wq));
307 }
308
309 /*
311 * Work thread and callout
312 */
313
314 /*
315 * linux_workqueue_thread(cookie)
316 *
317 * Main function for a workqueue's worker thread. Waits until
318 * there is work queued, grabs a batch of work off the queue,
319 * executes it all, bumps the generation number, and repeats,
320 * until dying.
321 */
322 static void __dead
323 linux_workqueue_thread(void *cookie)
324 {
325 struct workqueue_struct *const wq = cookie;
326 struct work_head queue, dqueue;
327 struct work_head *const q[2] = { &queue, &dqueue };
328 unsigned i;
329
330 lwp_setspecific(workqueue_key, wq);
331
332 mutex_enter(&wq->wq_lock);
333 for (;;) {
334 /*
335 * Wait until there's activity. If there's no work and
336 * we're dying, stop here.
337 */
338 if (TAILQ_EMPTY(&wq->wq_queue) &&
339 TAILQ_EMPTY(&wq->wq_dqueue)) {
340 if (wq->wq_dying)
341 break;
342 cv_wait(&wq->wq_cv, &wq->wq_lock);
343 continue;
344 }
345
346 /* Grab a batch of work off the queue. */
347 SDT_PROBE1(sdt, linux, work, batch__start, wq);
348 TAILQ_INIT(&queue);
349 TAILQ_INIT(&dqueue);
350 TAILQ_CONCAT(&queue, &wq->wq_queue, work_entry);
351 TAILQ_CONCAT(&dqueue, &wq->wq_dqueue, work_entry);
352
353 /* Process each work item in the batch. */
354 for (i = 0; i < 2; i++) {
355 while (!TAILQ_EMPTY(q[i])) {
356 struct work_struct *work = TAILQ_FIRST(q[i]);
357 void (*func)(struct work_struct *);
358
359 KASSERT(work_queue(work) == wq);
360 KASSERT(work_claimed(work, wq));
361 KASSERTMSG((q[i] != &dqueue ||
362 container_of(work, struct delayed_work,
363 work)->dw_state ==
364 DELAYED_WORK_IDLE),
365 "delayed work %p queued and scheduled",
366 work);
367
368 TAILQ_REMOVE(q[i], work, work_entry);
369 KASSERT(wq->wq_current_work == NULL);
370 wq->wq_current_work = work;
371 func = work->func;
372 release_work(work, wq);
373 /* Can't dereference work after this point. */
374
375 mutex_exit(&wq->wq_lock);
376 SDT_PROBE2(sdt, linux, work, run, work, wq);
377 (*func)(work);
378 SDT_PROBE2(sdt, linux, work, done, work, wq);
379 mutex_enter(&wq->wq_lock);
380
381 KASSERT(wq->wq_current_work == work);
382 wq->wq_current_work = NULL;
383 cv_broadcast(&wq->wq_cv);
384 }
385 }
386
387 /* Notify flush that we've completed a batch of work. */
388 wq->wq_gen++;
389 cv_broadcast(&wq->wq_cv);
390 SDT_PROBE1(sdt, linux, work, batch__done, wq);
391 }
392 mutex_exit(&wq->wq_lock);
393
394 kthread_exit(0);
395 }
396
397 /*
398 * linux_workqueue_timeout(cookie)
399 *
400 * Delayed work timeout callback.
401 *
402 * - If scheduled, queue it.
403 * - If rescheduled, callout_schedule ourselves again.
404 * - If cancelled, destroy the callout and release the work from
405 * the workqueue.
406 */
407 static void
408 linux_workqueue_timeout(void *cookie)
409 {
410 struct delayed_work *const dw = cookie;
411 struct workqueue_struct *const wq = work_queue(&dw->work);
412
413 KASSERTMSG(wq != NULL,
414 "delayed work %p state %d resched %d",
415 dw, dw->dw_state, dw->dw_resched);
416
417 SDT_PROBE2(sdt, linux, work, timer, dw, wq);
418
419 mutex_enter(&wq->wq_lock);
420 KASSERT(work_queue(&dw->work) == wq);
421 switch (dw->dw_state) {
422 case DELAYED_WORK_IDLE:
423 panic("delayed work callout uninitialized: %p", dw);
424 case DELAYED_WORK_SCHEDULED:
425 dw_callout_destroy(wq, dw);
426 TAILQ_INSERT_TAIL(&wq->wq_dqueue, &dw->work, work_entry);
427 cv_broadcast(&wq->wq_cv);
428 SDT_PROBE2(sdt, linux, work, queue, &dw->work, wq);
429 break;
430 case DELAYED_WORK_RESCHEDULED:
431 KASSERT(dw->dw_resched >= 0);
432 callout_schedule(&dw->dw_callout, dw->dw_resched);
433 dw->dw_state = DELAYED_WORK_SCHEDULED;
434 dw->dw_resched = -1;
435 break;
436 case DELAYED_WORK_CANCELLED:
437 cancel_delayed_work_done(wq, dw);
438 /* Can't dereference dw after this point. */
439 goto out;
440 default:
441 panic("delayed work callout in bad state: %p", dw);
442 }
443 KASSERT(dw->dw_state == DELAYED_WORK_IDLE ||
444 dw->dw_state == DELAYED_WORK_SCHEDULED);
445 out: mutex_exit(&wq->wq_lock);
446 }
447
448 /*
449 * current_work()
450 *
451 * If in a workqueue worker thread, return the work it is
452 * currently executing. Otherwise return NULL.
453 */
454 struct work_struct *
455 current_work(void)
456 {
457 struct workqueue_struct *wq = lwp_getspecific(workqueue_key);
458
459 /* If we're not a workqueue thread, then there's no work. */
460 if (wq == NULL)
461 return NULL;
462
463 /*
464 * Otherwise, this should be possible only while work is in
465 * progress. Return the current work item.
466 */
467 KASSERT(wq->wq_current_work != NULL);
468 return wq->wq_current_work;
469 }
470
471 /*
473 * Work
474 */
475
476 /*
477 * INIT_WORK(work, fn)
478 *
479 * Initialize work for use with a workqueue to call fn in a worker
480 * thread. There is no corresponding destruction operation.
481 */
482 void
483 INIT_WORK(struct work_struct *work, void (*fn)(struct work_struct *))
484 {
485
486 work->work_owner = 0;
487 work->func = fn;
488 }
489
490 /*
491 * work_claimed(work, wq)
492 *
493 * True if work is currently claimed by a workqueue, meaning it is
494 * either on the queue or scheduled in a callout. The workqueue
495 * must be wq, and caller must hold wq's lock.
496 */
497 static bool
498 work_claimed(struct work_struct *work, struct workqueue_struct *wq)
499 {
500
501 KASSERT(work_queue(work) == wq);
502 KASSERT(mutex_owned(&wq->wq_lock));
503
504 return work->work_owner & 1;
505 }
506
507 /*
508 * work_queue(work)
509 *
510 * Return the last queue that work was queued on, or NULL if it
511 * was never queued.
512 */
513 static struct workqueue_struct *
514 work_queue(struct work_struct *work)
515 {
516
517 return (struct workqueue_struct *)(work->work_owner & ~(uintptr_t)1);
518 }
519
520 /*
521 * acquire_work(work, wq)
522 *
523 * Try to claim work for wq. If work is already claimed, it must
524 * be claimed by wq; return false. If work is not already
525 * claimed, claim it, issue a memory barrier to match any prior
526 * release_work, and return true.
527 *
528 * Caller must hold wq's lock.
529 */
530 static bool
531 acquire_work(struct work_struct *work, struct workqueue_struct *wq)
532 {
533 uintptr_t owner0, owner;
534
535 KASSERT(mutex_owned(&wq->wq_lock));
536 KASSERT(((uintptr_t)wq & 1) == 0);
537
538 owner = (uintptr_t)wq | 1;
539 do {
540 owner0 = work->work_owner;
541 if (owner0 & 1) {
542 KASSERT((owner0 & ~(uintptr_t)1) == (uintptr_t)wq);
543 return false;
544 }
545 KASSERT(owner0 == (uintptr_t)NULL || owner0 == (uintptr_t)wq);
546 } while (atomic_cas_uintptr(&work->work_owner, owner0, owner) !=
547 owner0);
548
549 KASSERT(work_queue(work) == wq);
550 membar_enter();
551 SDT_PROBE2(sdt, linux, work, acquire, work, wq);
552 return true;
553 }
554
555 /*
556 * release_work(work, wq)
557 *
558 * Issue a memory barrier to match any subsequent acquire_work and
559 * dissociate work from wq.
560 *
561 * Caller must hold wq's lock and work must be associated with wq.
562 */
563 static void
564 release_work(struct work_struct *work, struct workqueue_struct *wq)
565 {
566
567 KASSERT(work_queue(work) == wq);
568 KASSERT(mutex_owned(&wq->wq_lock));
569
570 SDT_PROBE2(sdt, linux, work, release, work, wq);
571 membar_exit();
572
573 /*
574 * Non-interlocked r/m/w is safe here because nobody else can
575 * write to this while the claimed bit is setand the workqueue
576 * lock is held.
577 */
578 work->work_owner &= ~(uintptr_t)1;
579 }
580
581 /*
582 * schedule_work(work)
583 *
584 * If work is not already queued on system_wq, queue it to be run
585 * by system_wq's worker thread when it next can. True if it was
586 * newly queued, false if it was already queued. If the work was
587 * already running, queue it to run again.
588 *
589 * Caller must ensure work is not queued to run on a different
590 * workqueue.
591 */
592 bool
593 schedule_work(struct work_struct *work)
594 {
595
596 return queue_work(system_wq, work);
597 }
598
599 /*
600 * queue_work(wq, work)
601 *
602 * If work is not already queued on wq, queue it to be run by wq's
603 * worker thread when it next can. True if it was newly queued,
604 * false if it was already queued. If the work was already
605 * running, queue it to run again.
606 *
607 * Caller must ensure work is not queued to run on a different
608 * workqueue.
609 */
610 bool
611 queue_work(struct workqueue_struct *wq, struct work_struct *work)
612 {
613 bool newly_queued;
614
615 KASSERT(wq != NULL);
616
617 mutex_enter(&wq->wq_lock);
618 if (__predict_true(acquire_work(work, wq))) {
619 /*
620 * It wasn't on any workqueue at all. Put it on this
621 * one, and signal the worker thread that there is work
622 * to do.
623 */
624 TAILQ_INSERT_TAIL(&wq->wq_queue, work, work_entry);
625 cv_broadcast(&wq->wq_cv);
626 SDT_PROBE2(sdt, linux, work, queue, work, wq);
627 newly_queued = true;
628 } else {
629 /*
630 * It was already on this workqueue. Nothing to do
631 * since it is already queued.
632 */
633 newly_queued = false;
634 }
635 mutex_exit(&wq->wq_lock);
636
637 return newly_queued;
638 }
639
640 /*
641 * cancel_work(work)
642 *
643 * If work was queued, remove it from the queue and return true.
644 * If work was not queued, return false. Work may still be
645 * running when this returns.
646 */
647 bool
648 cancel_work(struct work_struct *work)
649 {
650 struct workqueue_struct *wq;
651 bool cancelled_p = false;
652
653 /* If there's no workqueue, nothing to cancel. */
654 if ((wq = work_queue(work)) == NULL)
655 goto out;
656
657 mutex_enter(&wq->wq_lock);
658 if (__predict_false(work_queue(work) != wq)) {
659 /*
660 * It has finished execution or been cancelled by
661 * another thread, and has been moved off the
662 * workqueue, so it's too to cancel.
663 */
664 cancelled_p = false;
665 } else {
666 /* Check whether it's on the queue. */
667 if (work_claimed(work, wq)) {
668 /*
669 * It is still on the queue. Take it off the
670 * queue and report successful cancellation.
671 */
672 TAILQ_REMOVE(&wq->wq_queue, work, work_entry);
673 SDT_PROBE2(sdt, linux, work, cancel, work, wq);
674 release_work(work, wq);
675 /* Can't dereference work after this point. */
676 cancelled_p = true;
677 } else {
678 /* Not on the queue. Couldn't cancel it. */
679 cancelled_p = false;
680 }
681 }
682 mutex_exit(&wq->wq_lock);
683
684 out: return cancelled_p;
685 }
686
687 /*
688 * cancel_work_sync(work)
689 *
690 * If work was queued, remove it from the queue and return true.
691 * If work was not queued, return false. Either way, if work is
692 * currently running, wait for it to complete.
693 *
694 * May sleep.
695 */
696 bool
697 cancel_work_sync(struct work_struct *work)
698 {
699 struct workqueue_struct *wq;
700 bool cancelled_p = false;
701
702 /* If there's no workqueue, nothing to cancel. */
703 if ((wq = work_queue(work)) == NULL)
704 goto out;
705
706 mutex_enter(&wq->wq_lock);
707 if (__predict_false(work_queue(work) != wq)) {
708 /*
709 * It has finished execution or been cancelled by
710 * another thread, and has been moved off the
711 * workqueue, so it's too late to cancel.
712 */
713 cancelled_p = false;
714 } else {
715 /* Check whether it's on the queue. */
716 if (work_claimed(work, wq)) {
717 /*
718 * It is still on the queue. Take it off the
719 * queue and report successful cancellation.
720 */
721 TAILQ_REMOVE(&wq->wq_queue, work, work_entry);
722 SDT_PROBE2(sdt, linux, work, cancel, work, wq);
723 release_work(work, wq);
724 /* Can't dereference work after this point. */
725 cancelled_p = true;
726 } else {
727 /* Not on the queue. Couldn't cancel it. */
728 cancelled_p = false;
729 }
730 /* If it's still running, wait for it to complete. */
731 if (wq->wq_current_work == work)
732 wait_for_current_work(work, wq);
733 }
734 mutex_exit(&wq->wq_lock);
735
736 out: return cancelled_p;
737 }
738
739 /*
740 * wait_for_current_work(work, wq)
741 *
742 * wq must be currently executing work. Wait for it to finish.
743 *
744 * Does not dereference work.
745 */
746 static void
747 wait_for_current_work(struct work_struct *work, struct workqueue_struct *wq)
748 {
749 uint64_t gen;
750
751 KASSERT(mutex_owned(&wq->wq_lock));
752 KASSERT(wq->wq_current_work == work);
753
754 /* Wait only one generation in case it gets requeued quickly. */
755 SDT_PROBE2(sdt, linux, work, wait__start, work, wq);
756 gen = wq->wq_gen;
757 do {
758 cv_wait(&wq->wq_cv, &wq->wq_lock);
759 } while (wq->wq_current_work == work && wq->wq_gen == gen);
760 SDT_PROBE2(sdt, linux, work, wait__done, work, wq);
761 }
762
763 /*
765 * Delayed work
766 */
767
768 /*
769 * INIT_DELAYED_WORK(dw, fn)
770 *
771 * Initialize dw for use with a workqueue to call fn in a worker
772 * thread after a delay. There is no corresponding destruction
773 * operation.
774 */
775 void
776 INIT_DELAYED_WORK(struct delayed_work *dw, void (*fn)(struct work_struct *))
777 {
778
779 INIT_WORK(&dw->work, fn);
780 dw->dw_state = DELAYED_WORK_IDLE;
781 dw->dw_resched = -1;
782
783 /*
784 * Defer callout_init until we are going to schedule the
785 * callout, which can then callout_destroy it, because
786 * otherwise since there's no DESTROY_DELAYED_WORK or anything
787 * we have no opportunity to call callout_destroy.
788 */
789 }
790
791 /*
792 * schedule_delayed_work(dw, ticks)
793 *
794 * If it is not currently scheduled, schedule dw to run after
795 * ticks on system_wq. If currently executing and not already
796 * rescheduled, reschedule it. True if it was newly scheduled,
797 * false if it was already scheduled.
798 *
799 * If ticks == 0, queue it to run as soon as the worker can,
800 * without waiting for the next callout tick to run.
801 */
802 bool
803 schedule_delayed_work(struct delayed_work *dw, unsigned long ticks)
804 {
805
806 return queue_delayed_work(system_wq, dw, ticks);
807 }
808
809 /*
810 * dw_callout_init(wq, dw)
811 *
812 * Initialize the callout of dw and transition to
813 * DELAYED_WORK_SCHEDULED. Caller must use callout_schedule.
814 */
815 static void
816 dw_callout_init(struct workqueue_struct *wq, struct delayed_work *dw)
817 {
818
819 KASSERT(mutex_owned(&wq->wq_lock));
820 KASSERT(work_queue(&dw->work) == wq);
821 KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
822
823 callout_init(&dw->dw_callout, CALLOUT_MPSAFE);
824 callout_setfunc(&dw->dw_callout, &linux_workqueue_timeout, dw);
825 TAILQ_INSERT_HEAD(&wq->wq_delayed, dw, dw_entry);
826 dw->dw_state = DELAYED_WORK_SCHEDULED;
827 }
828
829 /*
830 * dw_callout_destroy(wq, dw)
831 *
832 * Destroy the callout of dw and transition to DELAYED_WORK_IDLE.
833 */
834 static void
835 dw_callout_destroy(struct workqueue_struct *wq, struct delayed_work *dw)
836 {
837
838 KASSERT(mutex_owned(&wq->wq_lock));
839 KASSERT(work_queue(&dw->work) == wq);
840 KASSERT(dw->dw_state == DELAYED_WORK_SCHEDULED ||
841 dw->dw_state == DELAYED_WORK_RESCHEDULED ||
842 dw->dw_state == DELAYED_WORK_CANCELLED);
843
844 TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
845 callout_destroy(&dw->dw_callout);
846 dw->dw_resched = -1;
847 dw->dw_state = DELAYED_WORK_IDLE;
848 }
849
850 /*
851 * cancel_delayed_work_done(wq, dw)
852 *
853 * Complete cancellation of a delayed work: transition from
854 * DELAYED_WORK_CANCELLED to DELAYED_WORK_IDLE and off the
855 * workqueue. Caller must not dereference dw after this returns.
856 */
857 static void
858 cancel_delayed_work_done(struct workqueue_struct *wq, struct delayed_work *dw)
859 {
860
861 KASSERT(mutex_owned(&wq->wq_lock));
862 KASSERT(work_queue(&dw->work) == wq);
863 KASSERT(dw->dw_state == DELAYED_WORK_CANCELLED);
864
865 dw_callout_destroy(wq, dw);
866 release_work(&dw->work, wq);
867 /* Can't dereference dw after this point. */
868 }
869
870 /*
871 * queue_delayed_work(wq, dw, ticks)
872 *
873 * If it is not currently scheduled, schedule dw to run after
874 * ticks on wq. If currently queued, remove it from the queue
875 * first.
876 *
877 * If ticks == 0, queue it to run as soon as the worker can,
878 * without waiting for the next callout tick to run.
879 */
880 bool
881 queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw,
882 unsigned long ticks)
883 {
884 bool newly_queued;
885
886 mutex_enter(&wq->wq_lock);
887 if (__predict_true(acquire_work(&dw->work, wq))) {
888 /*
889 * It wasn't on any workqueue at all. Schedule it to
890 * run on this one.
891 */
892 KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
893 if (ticks == 0) {
894 TAILQ_INSERT_TAIL(&wq->wq_dqueue, &dw->work,
895 work_entry);
896 cv_broadcast(&wq->wq_cv);
897 SDT_PROBE2(sdt, linux, work, queue, &dw->work, wq);
898 } else {
899 /*
900 * Initialize a callout and schedule to run
901 * after a delay.
902 */
903 dw_callout_init(wq, dw);
904 callout_schedule(&dw->dw_callout, MIN(INT_MAX, ticks));
905 SDT_PROBE3(sdt, linux, work, schedule, dw, wq, ticks);
906 }
907 newly_queued = true;
908 } else {
909 /* It was already on this workqueue. */
910 switch (dw->dw_state) {
911 case DELAYED_WORK_IDLE:
912 case DELAYED_WORK_SCHEDULED:
913 case DELAYED_WORK_RESCHEDULED:
914 /* On the queue or already scheduled. Leave it. */
915 newly_queued = false;
916 break;
917 case DELAYED_WORK_CANCELLED:
918 /*
919 * Scheduled and the callout began, but it was
920 * cancelled. Reschedule it.
921 */
922 if (ticks == 0) {
923 dw->dw_state = DELAYED_WORK_SCHEDULED;
924 SDT_PROBE2(sdt, linux, work, queue,
925 &dw->work, wq);
926 } else {
927 dw->dw_state = DELAYED_WORK_RESCHEDULED;
928 dw->dw_resched = MIN(INT_MAX, ticks);
929 SDT_PROBE3(sdt, linux, work, schedule,
930 dw, wq, ticks);
931 }
932 newly_queued = true;
933 break;
934 default:
935 panic("invalid delayed work state: %d",
936 dw->dw_state);
937 }
938 }
939 mutex_exit(&wq->wq_lock);
940
941 return newly_queued;
942 }
943
944 /*
945 * mod_delayed_work(wq, dw, ticks)
946 *
947 * Schedule dw to run after ticks. If scheduled or queued,
948 * reschedule. If ticks == 0, run without delay.
949 *
950 * True if it modified the timer of an already scheduled work,
951 * false if it newly scheduled the work.
952 */
953 bool
954 mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw,
955 unsigned long ticks)
956 {
957 bool timer_modified;
958
959 mutex_enter(&wq->wq_lock);
960 if (acquire_work(&dw->work, wq)) {
961 /*
962 * It wasn't on any workqueue at all. Schedule it to
963 * run on this one.
964 */
965 KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
966 if (ticks == 0) {
967 /*
968 * Run immediately: put it on the queue and
969 * signal the worker thread.
970 */
971 TAILQ_INSERT_TAIL(&wq->wq_dqueue, &dw->work,
972 work_entry);
973 cv_broadcast(&wq->wq_cv);
974 SDT_PROBE2(sdt, linux, work, queue, &dw->work, wq);
975 } else {
976 /*
977 * Initialize a callout and schedule to run
978 * after a delay.
979 */
980 dw_callout_init(wq, dw);
981 callout_schedule(&dw->dw_callout, MIN(INT_MAX, ticks));
982 SDT_PROBE3(sdt, linux, work, schedule, dw, wq, ticks);
983 }
984 timer_modified = false;
985 } else {
986 /* It was already on this workqueue. */
987 switch (dw->dw_state) {
988 case DELAYED_WORK_IDLE:
989 /* On the queue. */
990 if (ticks == 0) {
991 /* Leave it be. */
992 SDT_PROBE2(sdt, linux, work, cancel,
993 &dw->work, wq);
994 SDT_PROBE2(sdt, linux, work, queue,
995 &dw->work, wq);
996 } else {
997 /* Remove from the queue and schedule. */
998 TAILQ_REMOVE(&wq->wq_dqueue, &dw->work,
999 work_entry);
1000 dw_callout_init(wq, dw);
1001 callout_schedule(&dw->dw_callout,
1002 MIN(INT_MAX, ticks));
1003 SDT_PROBE2(sdt, linux, work, cancel,
1004 &dw->work, wq);
1005 SDT_PROBE3(sdt, linux, work, schedule,
1006 dw, wq, ticks);
1007 }
1008 timer_modified = true;
1009 break;
1010 case DELAYED_WORK_SCHEDULED:
1011 /*
1012 * It is scheduled to run after a delay. Try
1013 * to stop it and reschedule it; if we can't,
1014 * either reschedule it or cancel it to put it
1015 * on the queue, and inform the callout.
1016 */
1017 if (callout_stop(&dw->dw_callout)) {
1018 /* Can't stop, callout has begun. */
1019 if (ticks == 0) {
1020 /*
1021 * We don't actually need to do
1022 * anything. The callout will
1023 * queue it as soon as it gets
1024 * the lock.
1025 */
1026 SDT_PROBE2(sdt, linux, work, cancel,
1027 &dw->work, wq);
1028 SDT_PROBE2(sdt, linux, work, queue,
1029 &dw->work, wq);
1030 } else {
1031 /* Ask the callout to reschedule. */
1032 dw->dw_state = DELAYED_WORK_RESCHEDULED;
1033 dw->dw_resched = MIN(INT_MAX, ticks);
1034 SDT_PROBE2(sdt, linux, work, cancel,
1035 &dw->work, wq);
1036 SDT_PROBE3(sdt, linux, work, schedule,
1037 dw, wq, ticks);
1038 }
1039 } else {
1040 /* We stopped the callout before it began. */
1041 if (ticks == 0) {
1042 /*
1043 * Run immediately: destroy the
1044 * callout, put it on the
1045 * queue, and signal the worker
1046 * thread.
1047 */
1048 dw_callout_destroy(wq, dw);
1049 TAILQ_INSERT_TAIL(&wq->wq_dqueue,
1050 &dw->work, work_entry);
1051 cv_broadcast(&wq->wq_cv);
1052 SDT_PROBE2(sdt, linux, work, cancel,
1053 &dw->work, wq);
1054 SDT_PROBE2(sdt, linux, work, queue,
1055 &dw->work, wq);
1056 } else {
1057 /*
1058 * Reschedule the callout. No
1059 * state change.
1060 */
1061 callout_schedule(&dw->dw_callout,
1062 MIN(INT_MAX, ticks));
1063 SDT_PROBE2(sdt, linux, work, cancel,
1064 &dw->work, wq);
1065 SDT_PROBE3(sdt, linux, work, schedule,
1066 dw, wq, ticks);
1067 }
1068 }
1069 timer_modified = true;
1070 break;
1071 case DELAYED_WORK_RESCHEDULED:
1072 /*
1073 * Someone rescheduled it after the callout
1074 * started but before the poor thing even had a
1075 * chance to acquire the lock.
1076 */
1077 if (ticks == 0) {
1078 /*
1079 * We can just switch back to
1080 * DELAYED_WORK_SCHEDULED so that the
1081 * callout will queue the work as soon
1082 * as it gets the lock.
1083 */
1084 dw->dw_state = DELAYED_WORK_SCHEDULED;
1085 dw->dw_resched = -1;
1086 SDT_PROBE2(sdt, linux, work, cancel,
1087 &dw->work, wq);
1088 SDT_PROBE2(sdt, linux, work, queue,
1089 &dw->work, wq);
1090 } else {
1091 /* Change the rescheduled time. */
1092 dw->dw_resched = ticks;
1093 SDT_PROBE2(sdt, linux, work, cancel,
1094 &dw->work, wq);
1095 SDT_PROBE3(sdt, linux, work, schedule,
1096 dw, wq, ticks);
1097 }
1098 timer_modified = true;
1099 break;
1100 case DELAYED_WORK_CANCELLED:
1101 /*
1102 * Someone cancelled it after the callout
1103 * started but before the poor thing even had a
1104 * chance to acquire the lock.
1105 */
1106 if (ticks == 0) {
1107 /*
1108 * We can just switch back to
1109 * DELAYED_WORK_SCHEDULED so that the
1110 * callout will queue the work as soon
1111 * as it gets the lock.
1112 */
1113 dw->dw_state = DELAYED_WORK_SCHEDULED;
1114 SDT_PROBE2(sdt, linux, work, queue,
1115 &dw->work, wq);
1116 } else {
1117 /* Ask it to reschedule. */
1118 dw->dw_state = DELAYED_WORK_RESCHEDULED;
1119 dw->dw_resched = MIN(INT_MAX, ticks);
1120 SDT_PROBE3(sdt, linux, work, schedule,
1121 dw, wq, ticks);
1122 }
1123 timer_modified = false;
1124 break;
1125 default:
1126 panic("invalid delayed work state: %d", dw->dw_state);
1127 }
1128 }
1129 mutex_exit(&wq->wq_lock);
1130
1131 return timer_modified;
1132 }
1133
1134 /*
1135 * cancel_delayed_work(dw)
1136 *
1137 * If work was scheduled or queued, remove it from the schedule or
1138 * queue and return true. If work was not scheduled or queued,
1139 * return false. Note that work may already be running; if it
1140 * hasn't been rescheduled or requeued, then cancel_delayed_work
1141 * will return false, and either way, cancel_delayed_work will NOT
1142 * wait for the work to complete.
1143 */
1144 bool
1145 cancel_delayed_work(struct delayed_work *dw)
1146 {
1147 struct workqueue_struct *wq;
1148 bool cancelled_p;
1149
1150 /* If there's no workqueue, nothing to cancel. */
1151 if ((wq = work_queue(&dw->work)) == NULL)
1152 return false;
1153
1154 mutex_enter(&wq->wq_lock);
1155 if (__predict_false(work_queue(&dw->work) != wq)) {
1156 cancelled_p = false;
1157 } else {
1158 switch (dw->dw_state) {
1159 case DELAYED_WORK_IDLE:
1160 /*
1161 * It is either on the queue or already running
1162 * or both.
1163 */
1164 if (work_claimed(&dw->work, wq)) {
1165 /* On the queue. Remove and release. */
1166 TAILQ_REMOVE(&wq->wq_dqueue, &dw->work,
1167 work_entry);
1168 SDT_PROBE2(sdt, linux, work, cancel,
1169 &dw->work, wq);
1170 release_work(&dw->work, wq);
1171 /* Can't dereference dw after this point. */
1172 cancelled_p = true;
1173 } else {
1174 /* Not on the queue, so didn't cancel. */
1175 cancelled_p = false;
1176 }
1177 break;
1178 case DELAYED_WORK_SCHEDULED:
1179 /*
1180 * If it is scheduled, mark it cancelled and
1181 * try to stop the callout before it starts.
1182 *
1183 * If it's too late and the callout has already
1184 * begun to execute, tough.
1185 *
1186 * If we stopped the callout before it started,
1187 * however, then destroy the callout and
1188 * dissociate it from the workqueue ourselves.
1189 */
1190 dw->dw_state = DELAYED_WORK_CANCELLED;
1191 cancelled_p = true;
1192 SDT_PROBE2(sdt, linux, work, cancel, &dw->work, wq);
1193 if (!callout_stop(&dw->dw_callout))
1194 cancel_delayed_work_done(wq, dw);
1195 break;
1196 case DELAYED_WORK_RESCHEDULED:
1197 /*
1198 * If it is being rescheduled, the callout has
1199 * already fired. We must ask it to cancel.
1200 */
1201 dw->dw_state = DELAYED_WORK_CANCELLED;
1202 dw->dw_resched = -1;
1203 cancelled_p = true;
1204 SDT_PROBE2(sdt, linux, work, cancel, &dw->work, wq);
1205 break;
1206 case DELAYED_WORK_CANCELLED:
1207 /*
1208 * If it is being cancelled, the callout has
1209 * already fired. There is nothing more for us
1210 * to do. Someone else claims credit for
1211 * cancelling it.
1212 */
1213 cancelled_p = false;
1214 break;
1215 default:
1216 panic("invalid delayed work state: %d",
1217 dw->dw_state);
1218 }
1219 }
1220 mutex_exit(&wq->wq_lock);
1221
1222 return cancelled_p;
1223 }
1224
1225 /*
1226 * cancel_delayed_work_sync(dw)
1227 *
1228 * If work was scheduled or queued, remove it from the schedule or
1229 * queue and return true. If work was not scheduled or queued,
1230 * return false. Note that work may already be running; if it
1231 * hasn't been rescheduled or requeued, then cancel_delayed_work
1232 * will return false; either way, wait for it to complete.
1233 */
1234 bool
1235 cancel_delayed_work_sync(struct delayed_work *dw)
1236 {
1237 struct workqueue_struct *wq;
1238 bool cancelled_p;
1239
1240 /* If there's no workqueue, nothing to cancel. */
1241 if ((wq = work_queue(&dw->work)) == NULL)
1242 return false;
1243
1244 mutex_enter(&wq->wq_lock);
1245 if (__predict_false(work_queue(&dw->work) != wq)) {
1246 cancelled_p = false;
1247 } else {
1248 switch (dw->dw_state) {
1249 case DELAYED_WORK_IDLE:
1250 /*
1251 * It is either on the queue or already running
1252 * or both.
1253 */
1254 if (work_claimed(&dw->work, wq)) {
1255 /* On the queue. Remove and release. */
1256 TAILQ_REMOVE(&wq->wq_dqueue, &dw->work,
1257 work_entry);
1258 SDT_PROBE2(sdt, linux, work, cancel,
1259 &dw->work, wq);
1260 release_work(&dw->work, wq);
1261 /* Can't dereference dw after this point. */
1262 cancelled_p = true;
1263 } else {
1264 /* Not on the queue, so didn't cancel. */
1265 cancelled_p = false;
1266 }
1267 /* If it's still running, wait for it to complete. */
1268 if (wq->wq_current_work == &dw->work)
1269 wait_for_current_work(&dw->work, wq);
1270 break;
1271 case DELAYED_WORK_SCHEDULED:
1272 /*
1273 * If it is scheduled, mark it cancelled and
1274 * try to stop the callout before it starts.
1275 *
1276 * If it's too late and the callout has already
1277 * begun to execute, we must wait for it to
1278 * complete. But we got in soon enough to ask
1279 * the callout not to run, so we successfully
1280 * cancelled it in that case.
1281 *
1282 * If we stopped the callout before it started,
1283 * then we must destroy the callout and
1284 * dissociate it from the workqueue ourselves.
1285 */
1286 dw->dw_state = DELAYED_WORK_CANCELLED;
1287 SDT_PROBE2(sdt, linux, work, cancel, &dw->work, wq);
1288 if (!callout_halt(&dw->dw_callout, &wq->wq_lock))
1289 cancel_delayed_work_done(wq, dw);
1290 cancelled_p = true;
1291 break;
1292 case DELAYED_WORK_RESCHEDULED:
1293 /*
1294 * If it is being rescheduled, the callout has
1295 * already fired. We must ask it to cancel and
1296 * wait for it to complete.
1297 */
1298 dw->dw_state = DELAYED_WORK_CANCELLED;
1299 dw->dw_resched = -1;
1300 SDT_PROBE2(sdt, linux, work, cancel, &dw->work, wq);
1301 (void)callout_halt(&dw->dw_callout, &wq->wq_lock);
1302 cancelled_p = true;
1303 break;
1304 case DELAYED_WORK_CANCELLED:
1305 /*
1306 * If it is being cancelled, the callout has
1307 * already fired. We need only wait for it to
1308 * complete. Someone else, however, claims
1309 * credit for cancelling it.
1310 */
1311 (void)callout_halt(&dw->dw_callout, &wq->wq_lock);
1312 cancelled_p = false;
1313 break;
1314 default:
1315 panic("invalid delayed work state: %d",
1316 dw->dw_state);
1317 }
1318 }
1319 mutex_exit(&wq->wq_lock);
1320
1321 return cancelled_p;
1322 }
1323
1324 /*
1326 * Flush
1327 */
1328
1329 /*
1330 * flush_scheduled_work()
1331 *
1332 * Wait for all work queued on system_wq to complete. This does
1333 * not include delayed work.
1334 */
1335 void
1336 flush_scheduled_work(void)
1337 {
1338
1339 flush_workqueue(system_wq);
1340 }
1341
1342 /*
1343 * flush_workqueue_locked(wq)
1344 *
1345 * Wait for all work queued on wq to complete. This does not
1346 * include delayed work.
1347 *
1348 * Caller must hold wq's lock.
1349 */
1350 static void
1351 flush_workqueue_locked(struct workqueue_struct *wq)
1352 {
1353 uint64_t gen;
1354
1355 KASSERT(mutex_owned(&wq->wq_lock));
1356
1357 /* Get the current generation number. */
1358 gen = wq->wq_gen;
1359
1360 /*
1361 * If there's a batch of work in progress, we must wait for the
1362 * worker thread to finish that batch.
1363 */
1364 if (wq->wq_current_work != NULL)
1365 gen++;
1366
1367 /*
1368 * If there's any work yet to be claimed from the queue by the
1369 * worker thread, we must wait for it to finish one more batch
1370 * too.
1371 */
1372 if (!TAILQ_EMPTY(&wq->wq_queue) || !TAILQ_EMPTY(&wq->wq_dqueue))
1373 gen++;
1374
1375 /* Wait until the generation number has caught up. */
1376 SDT_PROBE1(sdt, linux, work, flush__start, wq);
1377 while (wq->wq_gen < gen)
1378 cv_wait(&wq->wq_cv, &wq->wq_lock);
1379 SDT_PROBE1(sdt, linux, work, flush__done, wq);
1380 }
1381
1382 /*
1383 * flush_workqueue(wq)
1384 *
1385 * Wait for all work queued on wq to complete. This does not
1386 * include delayed work.
1387 */
1388 void
1389 flush_workqueue(struct workqueue_struct *wq)
1390 {
1391
1392 mutex_enter(&wq->wq_lock);
1393 flush_workqueue_locked(wq);
1394 mutex_exit(&wq->wq_lock);
1395 }
1396
1397 /*
1398 * flush_work(work)
1399 *
1400 * If work is queued or currently executing, wait for it to
1401 * complete.
1402 */
1403 void
1404 flush_work(struct work_struct *work)
1405 {
1406 struct workqueue_struct *wq;
1407
1408 /* If there's no workqueue, nothing to flush. */
1409 if ((wq = work_queue(work)) == NULL)
1410 return;
1411
1412 flush_workqueue(wq);
1413 }
1414
1415 /*
1416 * flush_delayed_work(dw)
1417 *
1418 * If dw is scheduled to run after a delay, queue it immediately
1419 * instead. Then, if dw is queued or currently executing, wait
1420 * for it to complete.
1421 */
1422 void
1423 flush_delayed_work(struct delayed_work *dw)
1424 {
1425 struct workqueue_struct *wq;
1426
1427 /* If there's no workqueue, nothing to flush. */
1428 if ((wq = work_queue(&dw->work)) == NULL)
1429 return;
1430
1431 mutex_enter(&wq->wq_lock);
1432 if (__predict_false(work_queue(&dw->work) != wq)) {
1433 /*
1434 * Moved off the queue already (and possibly to another
1435 * queue, though that would be ill-advised), so it must
1436 * have completed, and we have nothing more to do.
1437 */
1438 } else {
1439 switch (dw->dw_state) {
1440 case DELAYED_WORK_IDLE:
1441 /*
1442 * It has a workqueue assigned and the callout
1443 * is idle, so it must be in progress or on the
1444 * queue. In that case, we'll wait for it to
1445 * complete.
1446 */
1447 break;
1448 case DELAYED_WORK_SCHEDULED:
1449 case DELAYED_WORK_RESCHEDULED:
1450 case DELAYED_WORK_CANCELLED:
1451 /*
1452 * The callout is scheduled, and may have even
1453 * started. Mark it as scheduled so that if
1454 * the callout has fired it will queue the work
1455 * itself. Try to stop the callout -- if we
1456 * can, queue the work now; if we can't, wait
1457 * for the callout to complete, which entails
1458 * queueing it.
1459 */
1460 dw->dw_state = DELAYED_WORK_SCHEDULED;
1461 if (!callout_halt(&dw->dw_callout, &wq->wq_lock)) {
1462 /*
1463 * We stopped it before it ran. No
1464 * state change in the interim is
1465 * possible. Destroy the callout and
1466 * queue it ourselves.
1467 */
1468 KASSERT(dw->dw_state ==
1469 DELAYED_WORK_SCHEDULED);
1470 dw_callout_destroy(wq, dw);
1471 TAILQ_INSERT_TAIL(&wq->wq_dqueue, &dw->work,
1472 work_entry);
1473 cv_broadcast(&wq->wq_cv);
1474 SDT_PROBE2(sdt, linux, work, queue,
1475 &dw->work, wq);
1476 }
1477 break;
1478 default:
1479 panic("invalid delayed work state: %d", dw->dw_state);
1480 }
1481 /*
1482 * Waiting for the whole queue to flush is overkill,
1483 * but doesn't hurt.
1484 */
1485 flush_workqueue_locked(wq);
1486 }
1487 mutex_exit(&wq->wq_lock);
1488 }
1489