Home | History | Annotate | Download | only in linux

Lines Matching defs:wq

90     "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/);
92 "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/);
94 "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/);
96 "struct rcu_work *"/*work*/, "struct workqueue_struct *"/*wq*/);
98 "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/);
100 "struct delayed_work *"/*dw*/, "struct workqueue_struct *"/*wq*/,
103 "struct delayed_work *"/*dw*/, "struct workqueue_struct *"/*wq*/);
105 "struct delayed_work *"/*dw*/, "struct workqueue_struct *"/*wq*/);
107 "struct delayed_work *"/*dw*/, "struct workqueue_struct *"/*wq*/);
109 "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/);
111 "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/);
113 "struct workqueue_struct *"/*wq*/);
115 "struct workqueue_struct *"/*wq*/);
117 "struct workqueue_struct *"/*wq*/);
119 "struct workqueue_struct *"/*wq*/);
121 "struct workqueue_struct *"/*wq*/);
259 struct workqueue_struct *wq;
264 wq = kmem_zalloc(sizeof(*wq), KM_SLEEP);
266 mutex_init(&wq->wq_lock, MUTEX_DEFAULT, IPL_VM);
267 cv_init(&wq->wq_cv, name);
268 TAILQ_INIT(&wq->wq_delayed);
269 TAILQ_INIT(&wq->wq_rcu);
270 TAILQ_INIT(&wq->wq_queue);
271 TAILQ_INIT(&wq->wq_dqueue);
272 wq->wq_current_work = NULL;
273 wq->wq_flags = 0;
274 wq->wq_dying = false;
275 wq->wq_gen = 0;
276 wq->wq_lwp = NULL;
277 wq->wq_name = name;
281 &linux_workqueue_thread, wq, &wq->wq_lwp, "%s", name);
285 return wq;
287 fail0: KASSERT(TAILQ_EMPTY(&wq->wq_dqueue));
288 KASSERT(TAILQ_EMPTY(&wq->wq_queue));
289 KASSERT(TAILQ_EMPTY(&wq->wq_rcu));
290 KASSERT(TAILQ_EMPTY(&wq->wq_delayed));
291 cv_destroy(&wq->wq_cv);
292 mutex_destroy(&wq->wq_lock);
293 kmem_free(wq, sizeof(*wq));
310 * destroy_workqueue(wq)
312 * Destroy a workqueue created with wq. Cancel any pending
318 destroy_workqueue(struct workqueue_struct *wq)
326 mutex_enter(&wq->wq_lock);
327 while (!TAILQ_EMPTY(&wq->wq_delayed)) {
328 struct delayed_work *const dw = TAILQ_FIRST(&wq->wq_delayed);
330 KASSERT(work_queue(&dw->work) == wq);
350 SDT_PROBE2(sdt, linux, work, cancel, &dw->work, wq);
352 if (!callout_halt(&dw->dw_callout, &wq->wq_lock))
353 cancel_delayed_work_done(wq, dw);
355 mutex_exit(&wq->wq_lock);
358 mutex_enter(&wq->wq_lock);
359 while (!TAILQ_EMPTY(&wq->wq_rcu))
360 cv_wait(&wq->wq_cv, &wq->wq_lock);
361 mutex_exit(&wq->wq_lock);
368 mutex_enter(&wq->wq_lock);
369 wq->wq_dying = true;
370 cv_broadcast(&wq->wq_cv);
371 mutex_exit(&wq->wq_lock);
374 (void)kthread_join(wq->wq_lwp);
376 KASSERT(wq->wq_dying);
377 KASSERT(wq->wq_flags == 0);
378 KASSERT(wq->wq_current_work == NULL);
379 KASSERT(TAILQ_EMPTY(&wq->wq_dqueue));
380 KASSERT(TAILQ_EMPTY(&wq->wq_queue));
381 KASSERT(TAILQ_EMPTY(&wq->wq_rcu));
382 KASSERT(TAILQ_EMPTY(&wq->wq_delayed));
383 cv_destroy(&wq->wq_cv);
384 mutex_destroy(&wq->wq_lock);
386 kmem_free(wq, sizeof(*wq));
405 struct workqueue_struct *const wq = cookie;
406 struct work_head *const q[2] = { &wq->wq_queue, &wq->wq_dqueue };
410 lwp_setspecific(workqueue_key, wq);
412 mutex_enter(&wq->wq_lock);
418 if (TAILQ_EMPTY(&wq->wq_queue) &&
419 TAILQ_EMPTY(&wq->wq_dqueue)) {
420 if (wq->wq_dying)
422 cv_wait(&wq->wq_cv, &wq->wq_lock);
431 SDT_PROBE1(sdt, linux, work, batch__start, wq);
439 KASSERT(work_queue(work) == wq);
440 KASSERT(work_claimed(work, wq));
441 KASSERTMSG((q[i] != &wq->wq_dqueue ||
449 KASSERT(wq->wq_current_work == NULL);
450 wq->wq_current_work = work;
452 release_work(work, wq);
455 mutex_exit(&wq->wq_lock);
456 SDT_PROBE2(sdt, linux, work, run, work, wq);
458 SDT_PROBE2(sdt, linux, work, done, work, wq);
459 mutex_enter(&wq->wq_lock);
461 KASSERT(wq->wq_current_work == work);
462 wq->wq_current_work = NULL;
463 cv_broadcast(&wq->wq_cv);
469 wq->wq_gen++;
470 cv_broadcast(&wq->wq_cv);
471 SDT_PROBE1(sdt, linux, work, batch__done, wq);
473 mutex_exit(&wq->wq_lock);
492 struct workqueue_struct *const wq = work_queue(&dw->work);
494 KASSERTMSG(wq != NULL,
498 SDT_PROBE2(sdt, linux, work, timer, dw, wq);
500 mutex_enter(&wq->wq_lock);
501 KASSERT(work_queue(&dw->work) == wq);
506 dw_callout_destroy(wq, dw);
507 TAILQ_INSERT_TAIL(&wq->wq_dqueue, &dw->work, work_entry);
508 cv_broadcast(&wq->wq_cv);
509 SDT_PROBE2(sdt, linux, work, queue, &dw->work, wq);
518 cancel_delayed_work_done(wq, dw);
526 out: mutex_exit(&wq->wq_lock);
538 struct workqueue_struct *wq = lwp_getspecific(workqueue_key);
541 if (wq == NULL)
548 KASSERT(wq->wq_current_work != NULL);
549 return wq->wq_current_work;
572 * work_claimed(work, wq)
576 * must be wq, and caller must hold wq's lock.
579 work_claimed(struct work_struct *work, struct workqueue_struct *wq)
582 KASSERT(work_queue(work) == wq);
583 KASSERT(mutex_owned(&wq->wq_lock));
616 * acquire_work(work, wq)
618 * Try to claim work for wq. If work is already claimed, it must
619 * be claimed by wq; return false. If work is not already
623 * Caller must hold wq's lock.
626 acquire_work(struct work_struct *work, struct workqueue_struct *wq)
630 KASSERT(mutex_owned(&wq->wq_lock));
631 KASSERT(((uintptr_t)wq & 1) == 0);
633 owner = (uintptr_t)wq | 1;
637 KASSERT((owner0 & ~(uintptr_t)1) == (uintptr_t)wq);
640 KASSERT(owner0 == (uintptr_t)NULL || owner0 == (uintptr_t)wq);
644 KASSERT(work_queue(work) == wq);
646 SDT_PROBE2(sdt, linux, work, acquire, work, wq);
651 * release_work(work, wq)
654 * dissociate work from wq.
656 * Caller must hold wq's lock and work must be associated with wq.
659 release_work(struct work_struct *work, struct workqueue_struct *wq)
662 KASSERT(work_queue(work) == wq);
663 KASSERT(mutex_owned(&wq->wq_lock));
665 SDT_PROBE2(sdt, linux, work, release, work, wq);
696 * queue_work(wq, work)
698 * If work is not already queued on wq, queue it to be run by wq's
707 queue_work(struct workqueue_struct *wq, struct work_struct *work)
711 KASSERT(wq != NULL);
713 mutex_enter(&wq->wq_lock);
714 if (__predict_true(acquire_work(work, wq))) {
720 TAILQ_INSERT_TAIL(&wq->wq_queue, work, work_entry);
721 cv_broadcast(&wq->wq_cv);
722 SDT_PROBE2(sdt, linux, work, queue, work, wq);
731 mutex_exit(&wq->wq_lock);
746 struct workqueue_struct *wq;
750 if ((wq = work_queue(work)) == NULL)
753 mutex_enter(&wq->wq_lock);
754 if (__predict_false(work_queue(work) != wq)) {
763 if (work_claimed(work, wq)) {
768 TAILQ_REMOVE(&wq->wq_queue, work, work_entry);
769 SDT_PROBE2(sdt, linux, work, cancel, work, wq);
770 release_work(work, wq);
778 mutex_exit(&wq->wq_lock);
795 struct workqueue_struct *wq;
799 if ((wq = work_queue(work)) == NULL)
802 mutex_enter(&wq->wq_lock);
803 if (__predict_false(work_queue(work) != wq)) {
812 if (work_claimed(work, wq)) {
817 TAILQ_REMOVE(&wq->wq_queue, work, work_entry);
818 SDT_PROBE2(sdt, linux, work, cancel, work, wq);
819 release_work(work, wq);
827 if (wq->wq_current_work == work)
828 wait_for_current_work(work, wq);
830 mutex_exit(&wq->wq_lock);
836 * wait_for_current_work(work, wq)
838 * wq must be currently executing work. Wait for it to finish.
843 wait_for_current_work(struct work_struct *work, struct workqueue_struct *wq)
847 KASSERT(mutex_owned(&wq->wq_lock));
848 KASSERT(wq->wq_current_work == work);
851 SDT_PROBE2(sdt, linux, work, wait__start, work, wq);
852 gen = wq->wq_gen;
854 cv_wait(&wq->wq_cv, &wq->wq_lock);
855 } while (wq->wq_current_work == work && wq->wq_gen == gen);
856 SDT_PROBE2(sdt, linux, work, wait__done, work, wq);
906 * dw_callout_init(wq, dw)
912 dw_callout_init(struct workqueue_struct *wq, struct delayed_work *dw)
915 KASSERT(mutex_owned(&wq->wq_lock));
916 KASSERT(work_queue(&dw->work) == wq);
921 TAILQ_INSERT_HEAD(&wq->wq_delayed, dw, dw_entry);
926 * dw_callout_destroy(wq, dw)
931 dw_callout_destroy(struct workqueue_struct *wq, struct delayed_work *dw)
934 KASSERT(mutex_owned(&wq->wq_lock));
935 KASSERT(work_queue(&dw->work) == wq);
940 TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
947 * cancel_delayed_work_done(wq, dw)
954 cancel_delayed_work_done(struct workqueue_struct *wq, struct delayed_work *dw)
957 KASSERT(mutex_owned(&wq->wq_lock));
958 KASSERT(work_queue(&dw->work) == wq);
961 dw_callout_destroy(wq, dw);
962 release_work(&dw->work, wq);
967 * queue_delayed_work(wq, dw, ticks)
970 * ticks on wq. If currently queued, remove it from the queue
977 queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw,
982 mutex_enter(&wq->wq_lock);
983 if (__predict_true(acquire_work(&dw->work, wq))) {
990 TAILQ_INSERT_TAIL(&wq->wq_dqueue, &dw->work,
992 cv_broadcast(&wq->wq_cv);
993 SDT_PROBE2(sdt, linux, work, queue, &dw->work, wq);
999 dw_callout_init(wq, dw);
1001 SDT_PROBE3(sdt, linux, work, schedule, dw, wq, ticks);
1021 &dw->work, wq);
1026 dw, wq, ticks);
1035 mutex_exit(&wq->wq_lock);
1041 * mod_delayed_work(wq, dw, ticks)
1050 mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw,
1055 mutex_enter(&wq->wq_lock);
1056 if (acquire_work(&dw->work, wq)) {
1067 TAILQ_INSERT_TAIL(&wq->wq_dqueue, &dw->work,
1069 cv_broadcast(&wq->wq_cv);
1070 SDT_PROBE2(sdt, linux, work, queue, &dw->work, wq);
1076 dw_callout_init(wq, dw);
1078 SDT_PROBE3(sdt, linux, work, schedule, dw, wq, ticks);
1089 &dw->work, wq);
1091 &dw->work, wq);
1094 TAILQ_REMOVE(&wq->wq_dqueue, &dw->work,
1096 dw_callout_init(wq, dw);
1100 &dw->work, wq);
1102 dw, wq, ticks);
1123 &dw->work, wq);
1125 &dw->work, wq);
1131 &dw->work, wq);
1133 dw, wq, ticks);
1144 dw_callout_destroy(wq, dw);
1145 TAILQ_INSERT_TAIL(&wq->wq_dqueue,
1147 cv_broadcast(&wq->wq_cv);
1149 &dw->work, wq);
1151 &dw->work, wq);
1160 &dw->work, wq);
1162 dw, wq, ticks);
1183 &dw->work, wq);
1185 &dw->work, wq);
1190 &dw->work, wq);
1192 dw, wq, ticks);
1211 &dw->work, wq);
1217 dw, wq, ticks);
1225 mutex_exit(&wq->wq_lock);
1243 struct workqueue_struct *wq;
1247 if ((wq = work_queue(&dw->work)) == NULL)
1250 mutex_enter(&wq->wq_lock);
1251 if (__predict_false(work_queue(&dw->work) != wq)) {
1260 if (work_claimed(&dw->work, wq)) {
1262 TAILQ_REMOVE(&wq->wq_dqueue, &dw->work,
1265 &dw->work, wq);
1266 release_work(&dw->work, wq);
1288 SDT_PROBE2(sdt, linux, work, cancel, &dw->work, wq);
1290 cancel_delayed_work_done(wq, dw);
1300 SDT_PROBE2(sdt, linux, work, cancel, &dw->work, wq);
1316 mutex_exit(&wq->wq_lock);
1333 struct workqueue_struct *wq;
1337 if ((wq = work_queue(&dw->work)) == NULL)
1340 mutex_enter(&wq->wq_lock);
1341 if (__predict_false(work_queue(&dw->work) != wq)) {
1350 if (work_claimed(&dw->work, wq)) {
1352 TAILQ_REMOVE(&wq->wq_dqueue, &dw->work,
1355 &dw->work, wq);
1356 release_work(&dw->work, wq);
1364 if (wq->wq_current_work == &dw->work)
1365 wait_for_current_work(&dw->work, wq);
1383 SDT_PROBE2(sdt, linux, work, cancel, &dw->work, wq);
1384 if (!callout_halt(&dw->dw_callout, &wq->wq_lock))
1385 cancel_delayed_work_done(wq, dw);
1396 SDT_PROBE2(sdt, linux, work, cancel, &dw->work, wq);
1397 (void)callout_halt(&dw->dw_callout, &wq->wq_lock);
1407 (void)callout_halt(&dw->dw_callout, &wq->wq_lock);
1415 mutex_exit(&wq->wq_lock);
1457 * flush_workqueue(wq)
1459 * Wait for all work queued on wq to complete. This does not
1463 flush_workqueue(struct workqueue_struct *wq)
1467 if (lwp_getspecific(workqueue_key) == wq) {
1468 SDT_PROBE1(sdt, linux, work, flush__self, wq);
1477 SDT_PROBE1(sdt, linux, work, flush__start, wq);
1478 queue_work(wq, &fw.fw_work);
1484 SDT_PROBE1(sdt, linux, work, flush__done, wq);
1493 * drain_workqueue(wq)
1495 * Repeatedly flush wq until there is no more work.
1498 drain_workqueue(struct workqueue_struct *wq)
1507 wq->wq_name, ntries);
1508 flush_workqueue(wq);
1509 mutex_enter(&wq->wq_lock);
1510 done = wq->wq_current_work == NULL;
1511 done &= TAILQ_EMPTY(&wq->wq_queue);
1512 done &= TAILQ_EMPTY(&wq->wq_dqueue);
1513 mutex_exit(&wq->wq_lock);
1529 struct workqueue_struct *wq;
1532 if ((wq = work_queue(work)) == NULL)
1535 flush_workqueue(wq);
1549 struct workqueue_struct *wq;
1553 if ((wq = work_queue(&dw->work)) == NULL)
1556 mutex_enter(&wq->wq_lock);
1557 if (__predict_false(work_queue(&dw->work) != wq)) {
1587 if (!callout_halt(&dw->dw_callout, &wq->wq_lock)) {
1596 dw_callout_destroy(wq, dw);
1597 TAILQ_INSERT_TAIL(&wq->wq_dqueue, &dw->work,
1599 cv_broadcast(&wq->wq_cv);
1601 &dw->work, wq);
1611 mutex_exit(&wq->wq_lock);
1612 flush_workqueue(wq);
1613 mutex_enter(&wq->wq_lock);
1616 mutex_exit(&wq->wq_lock);
1651 struct workqueue_struct *wq = work_queue(&rw->work);
1653 mutex_enter(&wq->wq_lock);
1655 KASSERT(work_queue(&rw->work) == wq);
1657 TAILQ_REMOVE(&wq->wq_rcu, &rw->work, work_entry);
1658 TAILQ_INSERT_TAIL(&wq->wq_queue, &rw->work, work_entry);
1659 cv_broadcast(&wq->wq_cv);
1660 SDT_PROBE2(sdt, linux, work, queue, &rw->work, wq);
1661 mutex_exit(&wq->wq_lock);
1665 * queue_rcu_work(wq, rw)
1667 * Schedule rw to run on wq after an RCU grace period.
1670 queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rw)
1673 mutex_enter(&wq->wq_lock);
1674 if (acquire_work(&rw->work, wq)) {
1676 SDT_PROBE2(sdt, linux, work, rcu, rw, wq);
1677 TAILQ_INSERT_TAIL(&wq->wq_rcu, &rw->work, work_entry);
1680 mutex_exit(&wq->wq_lock);