Home | History | Annotate | Download | only in linux

Lines Matching refs:work

58 	struct dwork_head	wq_delayed; /* delayed work scheduled */
59 struct work_head wq_rcu; /* RCU work scheduled */
60 struct work_head wq_queue; /* work to run */
61 struct work_head wq_dqueue; /* delayed work to run now */
89 SDT_PROBE_DEFINE2(sdt, linux, work, acquire,
90 "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/);
91 SDT_PROBE_DEFINE2(sdt, linux, work, release,
92 "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/);
93 SDT_PROBE_DEFINE2(sdt, linux, work, queue,
94 "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/);
95 SDT_PROBE_DEFINE2(sdt, linux, work, rcu,
96 "struct rcu_work *"/*work*/, "struct workqueue_struct *"/*wq*/);
97 SDT_PROBE_DEFINE2(sdt, linux, work, cancel,
98 "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/);
99 SDT_PROBE_DEFINE3(sdt, linux, work, schedule,
102 SDT_PROBE_DEFINE2(sdt, linux, work, timer,
104 SDT_PROBE_DEFINE2(sdt, linux, work, wait__start,
106 SDT_PROBE_DEFINE2(sdt, linux, work, wait__done,
108 SDT_PROBE_DEFINE2(sdt, linux, work, run,
109 "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/);
110 SDT_PROBE_DEFINE2(sdt, linux, work, done,
111 "struct work_struct *"/*work*/, "struct workqueue_struct *"/*wq*/);
112 SDT_PROBE_DEFINE1(sdt, linux, work, batch__start,
114 SDT_PROBE_DEFINE1(sdt, linux, work, batch__done,
116 SDT_PROBE_DEFINE1(sdt, linux, work, flush__self,
118 SDT_PROBE_DEFINE1(sdt, linux, work, flush__start,
120 SDT_PROBE_DEFINE1(sdt, linux, work, flush__done,
252 * maximum number of work items in flight, or 0 for the default.
313 * delayed work. Wait for all queued work to complete.
322 * Cancel all delayed work. We do this first because any
323 * delayed work that that has already timed out, which we can't
324 * cancel, may have queued new work.
330 KASSERT(work_queue(&dw->work) == wq);
334 "delayed work %p in bad state: %d",
350 SDT_PROBE2(sdt, linux, work, cancel, &dw->work, wq);
357 /* Wait for all scheduled RCU work to complete. */
364 * At this point, no new work can be put on the queue.
391 * Work thread and callout
398 * there is work queued, grabs a batch of work off the queue,
407 struct work_struct marker, *work;
415 * Wait until there's activity. If there's no work and
427 * Start a batch of work. Use a marker to delimit when
431 SDT_PROBE1(sdt, linux, work, batch__start, wq);
436 while ((work = TAILQ_FIRST(q[i])) != &marker) {
439 KASSERT(work_queue(work) == wq);
440 KASSERT(work_claimed(work, wq));
442 container_of(work, struct delayed_work,
443 work)->dw_state ==
445 "delayed work %p queued and scheduled",
446 work);
448 TAILQ_REMOVE(q[i], work, work_entry);
450 wq->wq_current_work = work;
451 func = work->func;
452 release_work(work, wq);
453 /* Can't dereference work after this point. */
456 SDT_PROBE2(sdt, linux, work, run, work, wq);
457 (*func)(work);
458 SDT_PROBE2(sdt, linux, work, done, work, wq);
461 KASSERT(wq->wq_current_work == work);
468 /* Notify cancel that we've completed a batch of work. */
471 SDT_PROBE1(sdt, linux, work, batch__done, wq);
481 * Delayed work timeout callback.
485 * - If cancelled, destroy the callout and release the work from
492 struct workqueue_struct *const wq = work_queue(&dw->work);
495 "delayed work %p state %d resched %d",
498 SDT_PROBE2(sdt, linux, work, timer, dw, wq);
501 KASSERT(work_queue(&dw->work) == wq);
504 panic("delayed work callout uninitialized: %p", dw);
507 TAILQ_INSERT_TAIL(&wq->wq_dqueue, &dw->work, work_entry);
509 SDT_PROBE2(sdt, linux, work, queue, &dw->work, wq);
522 panic("delayed work callout in bad state: %p", dw);
532 * If in a workqueue worker thread, return the work it is
540 /* If we're not a workqueue thread, then there's no work. */
545 * Otherwise, this should be possible only while work is in
546 * progress. Return the current work item.
554 * Work
558 * INIT_WORK(work, fn)
560 * Initialize work for use with a workqueue to call fn in a worker
564 INIT_WORK(struct work_struct *work, void (*fn)(struct work_struct *))
567 work->work_owner = 0;
568 work->func = fn;
572 * work_claimed(work, wq)
574 * True if work is currently claimed by a workqueue, meaning it is
579 work_claimed(struct work_struct *work, struct workqueue_struct *wq)
582 KASSERT(work_queue(work) == wq);
585 return atomic_load_relaxed(&work->work_owner) & 1;
589 * work_pending(work)
591 * True if work
595 work_pending(const struct work_struct *work)
598 return atomic_load_relaxed(&work->work_owner) & 1;
602 * work_queue(work)
604 * Return the last queue that work was queued on, or NULL if it
608 work_queue(struct work_struct *work)
612 (atomic_load_relaxed(&work->work_owner) & ~(uintptr_t)1);
616 * acquire_work(work, wq)
618 * Try to claim work for wq. If work is already claimed, it must
619 * be claimed by wq; return false. If work is not already
626 acquire_work(struct work_struct *work, struct workqueue_struct *wq)
635 owner0 = atomic_load_relaxed(&work->work_owner);
641 } while (atomic_cas_uintptr(&work->work_owner, owner0, owner) !=
644 KASSERT(work_queue(work) == wq);
646 SDT_PROBE2(sdt, linux, work, acquire, work, wq);
651 * release_work(work, wq)
654 * dissociate work from wq.
656 * Caller must hold wq's lock and work must be associated with wq.
659 release_work(struct work_struct *work, struct workqueue_struct *wq)
662 KASSERT(work_queue(work) == wq);
665 SDT_PROBE2(sdt, linux, work, release, work, wq);
673 atomic_store_relaxed(&work->work_owner,
674 atomic_load_relaxed(&work->work_owner) & ~(uintptr_t)1);
678 * schedule_work(work)
680 * If work is not already queued on system_wq, queue it to be run
682 * newly queued, false if it was already queued. If the work was
685 * Caller must ensure work is not queued to run on a different
689 schedule_work(struct work_struct *work)
692 return queue_work(system_wq, work);
696 * queue_work(wq, work)
698 * If work is not already queued on wq, queue it to be run by wq's
700 * false if it was already queued. If the work was already
703 * Caller must ensure work is not queued to run on a different
707 queue_work(struct workqueue_struct *wq, struct work_struct *work)
714 if (__predict_true(acquire_work(work, wq))) {
717 * one, and signal the worker thread that there is work
720 TAILQ_INSERT_TAIL(&wq->wq_queue, work, work_entry);
722 SDT_PROBE2(sdt, linux, work, queue, work, wq);
737 * cancel_work(work)
739 * If work was queued, remove it from the queue and return true.
740 * If work was not queued, return false. Work may still be
744 cancel_work(struct work_struct *work)
750 if ((wq = work_queue(work)) == NULL)
754 if (__predict_false(work_queue(work) != wq)) {
763 if (work_claimed(work, wq)) {
768 TAILQ_REMOVE(&wq->wq_queue, work, work_entry);
769 SDT_PROBE2(sdt, linux, work, cancel, work, wq);
770 release_work(work, wq);
771 /* Can't dereference work after this point. */
784 * cancel_work_sync(work)
786 * If work was queued, remove it from the queue and return true.
787 * If work was not queued, return false. Either way, if work is
793 cancel_work_sync(struct work_struct *work)
799 if ((wq = work_queue(work)) == NULL)
803 if (__predict_false(work_queue(work) != wq)) {
812 if (work_claimed(work, wq)) {
817 TAILQ_REMOVE(&wq->wq_queue, work, work_entry);
818 SDT_PROBE2(sdt, linux, work, cancel, work, wq);
819 release_work(work, wq);
820 /* Can't dereference work after this point. */
827 if (wq->wq_current_work == work)
828 wait_for_current_work(work, wq);
836 * wait_for_current_work(work, wq)
838 * wq must be currently executing work. Wait for it to finish.
840 * Does not dereference work.
843 wait_for_current_work(struct work_struct *work, struct workqueue_struct *wq)
848 KASSERT(wq->wq_current_work == work);
851 SDT_PROBE2(sdt, linux, work, wait__start, work, wq);
855 } while (wq->wq_current_work == work && wq->wq_gen == gen);
856 SDT_PROBE2(sdt, linux, work, wait__done, work, wq);
861 * Delayed work
875 INIT_WORK(&dw->work, fn);
916 KASSERT(work_queue(&dw->work) == wq);
935 KASSERT(work_queue(&dw->work) == wq);
949 * Complete cancellation of a delayed work: transition from
958 KASSERT(work_queue(&dw->work) == wq);
962 release_work(&dw->work, wq);
983 if (__predict_true(acquire_work(&dw->work, wq))) {
990 TAILQ_INSERT_TAIL(&wq->wq_dqueue, &dw->work,
993 SDT_PROBE2(sdt, linux, work, queue, &dw->work, wq);
1001 SDT_PROBE3(sdt, linux, work, schedule, dw, wq, ticks);
1020 SDT_PROBE2(sdt, linux, work, queue,
1021 &dw->work, wq);
1025 SDT_PROBE3(sdt, linux, work, schedule,
1031 panic("invalid delayed work state: %d",
1046 * True if it modified the timer of an already scheduled work,
1047 * false if it newly scheduled the work.
1056 if (acquire_work(&dw->work, wq)) {
1067 TAILQ_INSERT_TAIL(&wq->wq_dqueue, &dw->work,
1070 SDT_PROBE2(sdt, linux, work, queue, &dw->work, wq);
1078 SDT_PROBE3(sdt, linux, work, schedule, dw, wq, ticks);
1088 SDT_PROBE2(sdt, linux, work, cancel,
1089 &dw->work, wq);
1090 SDT_PROBE2(sdt, linux, work, queue,
1091 &dw->work, wq);
1094 TAILQ_REMOVE(&wq->wq_dqueue, &dw->work,
1099 SDT_PROBE2(sdt, linux, work, cancel,
1100 &dw->work, wq);
1101 SDT_PROBE3(sdt, linux, work, schedule,
1122 SDT_PROBE2(sdt, linux, work, cancel,
1123 &dw->work, wq);
1124 SDT_PROBE2(sdt, linux, work, queue,
1125 &dw->work, wq);
1130 SDT_PROBE2(sdt, linux, work, cancel,
1131 &dw->work, wq);
1132 SDT_PROBE3(sdt, linux, work, schedule,
1146 &dw->work, work_entry);
1148 SDT_PROBE2(sdt, linux, work, cancel,
1149 &dw->work, wq);
1150 SDT_PROBE2(sdt, linux, work, queue,
1151 &dw->work, wq);
1159 SDT_PROBE2(sdt, linux, work, cancel,
1160 &dw->work, wq);
1161 SDT_PROBE3(sdt, linux, work, schedule,
1177 * callout will queue the work as soon
1182 SDT_PROBE2(sdt, linux, work, cancel,
1183 &dw->work, wq);
1184 SDT_PROBE2(sdt, linux, work, queue,
1185 &dw->work, wq);
1189 SDT_PROBE2(sdt, linux, work, cancel,
1190 &dw->work, wq);
1191 SDT_PROBE3(sdt, linux, work, schedule,
1206 * callout will queue the work as soon
1210 SDT_PROBE2(sdt, linux, work, queue,
1211 &dw->work, wq);
1216 SDT_PROBE3(sdt, linux, work, schedule,
1222 panic("invalid delayed work state: %d", dw->dw_state);
1233 * If work was scheduled or queued, remove it from the schedule or
1234 * queue and return true. If work was not scheduled or queued,
1235 * return false. Note that work may already be running; if it
1238 * wait for the work to complete.
1247 if ((wq = work_queue(&dw->work)) == NULL)
1251 if (__predict_false(work_queue(&dw->work) != wq)) {
1260 if (work_claimed(&dw->work, wq)) {
1262 TAILQ_REMOVE(&wq->wq_dqueue, &dw->work,
1264 SDT_PROBE2(sdt, linux, work, cancel,
1265 &dw->work, wq);
1266 release_work(&dw->work, wq);
1288 SDT_PROBE2(sdt, linux, work, cancel, &dw->work, wq);
1300 SDT_PROBE2(sdt, linux, work, cancel, &dw->work, wq);
1312 panic("invalid delayed work state: %d",
1324 * If work was scheduled or queued, remove it from the schedule or
1325 * queue and return true. If work was not scheduled or queued,
1326 * return false. Note that work may already be running; if it
1337 if ((wq = work_queue(&dw->work)) == NULL)
1341 if (__predict_false(work_queue(&dw->work) != wq)) {
1350 if (work_claimed(&dw->work, wq)) {
1352 TAILQ_REMOVE(&wq->wq_dqueue, &dw->work,
1354 SDT_PROBE2(sdt, linux, work, cancel,
1355 &dw->work, wq);
1356 release_work(&dw->work, wq);
1364 if (wq->wq_current_work == &dw->work)
1365 wait_for_current_work(&dw->work, wq);
1383 SDT_PROBE2(sdt, linux, work, cancel, &dw->work, wq);
1396 SDT_PROBE2(sdt, linux, work, cancel, &dw->work, wq);
1411 panic("invalid delayed work state: %d",
1428 * Wait for all work queued on system_wq to complete. This does
1429 * not include delayed work.
1446 flush_work_cb(struct work_struct *work)
1448 struct flush_work *fw = container_of(work, struct flush_work, fw_work);
1459 * Wait for all work queued on wq to complete. This does not
1460 * include delayed work.
1468 SDT_PROBE1(sdt, linux, work, flush__self, wq);
1477 SDT_PROBE1(sdt, linux, work, flush__start, wq);
1484 SDT_PROBE1(sdt, linux, work, flush__done, wq);
1495 * Repeatedly flush wq until there is no more work.
1518 * flush_work(work)
1520 * If work is queued or currently executing, wait for it to
1527 flush_work(struct work_struct *work)
1532 if ((wq = work_queue(work)) == NULL)
1553 if ((wq = work_queue(&dw->work)) == NULL)
1557 if (__predict_false(work_queue(&dw->work) != wq)) {
1580 * the callout has fired it will queue the work
1582 * can, queue the work now; if we can't, wait
1597 TAILQ_INSERT_TAIL(&wq->wq_dqueue, &dw->work,
1600 SDT_PROBE2(sdt, linux, work, queue,
1601 &dw->work, wq);
1605 panic("invalid delayed work state: %d", dw->dw_state);
1630 return work_pending(&dw->work);
1644 INIT_WORK(&rw->work, fn);
1651 struct workqueue_struct *wq = work_queue(&rw->work);
1654 KASSERT(work_pending(&rw->work));
1655 KASSERT(work_queue(&rw->work) == wq);
1657 TAILQ_REMOVE(&wq->wq_rcu, &rw->work, work_entry);
1658 TAILQ_INSERT_TAIL(&wq->wq_queue, &rw->work, work_entry);
1660 SDT_PROBE2(sdt, linux, work, queue, &rw->work, wq);
1674 if (acquire_work(&rw->work, wq)) {
1676 SDT_PROBE2(sdt, linux, work, rcu, rw, wq);
1677 TAILQ_INSERT_TAIL(&wq->wq_rcu, &rw->work, work_entry);