Lines Matching defs:dw
100 "struct delayed_work *"/*dw*/, "struct workqueue_struct *"/*wq*/,
103 "struct delayed_work *"/*dw*/, "struct workqueue_struct *"/*wq*/);
105 "struct delayed_work *"/*dw*/, "struct workqueue_struct *"/*wq*/);
107 "struct delayed_work *"/*dw*/, "struct workqueue_struct *"/*wq*/);
328 struct delayed_work *const dw = TAILQ_FIRST(&wq->wq_delayed);
330 KASSERT(work_queue(&dw->work) == wq);
331 KASSERTMSG((dw->dw_state == DELAYED_WORK_SCHEDULED ||
332 dw->dw_state == DELAYED_WORK_RESCHEDULED ||
333 dw->dw_state == DELAYED_WORK_CANCELLED),
335 dw, dw->dw_state);
350 SDT_PROBE2(sdt, linux, work, cancel, &dw->work, wq);
351 dw->dw_state = DELAYED_WORK_CANCELLED;
352 if (!callout_halt(&dw->dw_callout, &wq->wq_lock))
353 cancel_delayed_work_done(wq, dw);
491 struct delayed_work *const dw = cookie;
492 struct workqueue_struct *const wq = work_queue(&dw->work);
496 dw, dw->dw_state, dw->dw_resched);
498 SDT_PROBE2(sdt, linux, work, timer, dw, wq);
501 KASSERT(work_queue(&dw->work) == wq);
502 switch (dw->dw_state) {
504 panic("delayed work callout uninitialized: %p", dw);
506 dw_callout_destroy(wq, dw);
507 TAILQ_INSERT_TAIL(&wq->wq_dqueue, &dw->work, work_entry);
509 SDT_PROBE2(sdt, linux, work, queue, &dw->work, wq);
512 KASSERT(dw->dw_resched >= 0);
513 callout_schedule(&dw->dw_callout, dw->dw_resched);
514 dw->dw_state = DELAYED_WORK_SCHEDULED;
515 dw->dw_resched = -1;
518 cancel_delayed_work_done(wq, dw);
519 /* Can't dereference dw after this point. */
522 panic("delayed work callout in bad state: %p", dw);
524 KASSERT(dw->dw_state == DELAYED_WORK_IDLE ||
525 dw->dw_state == DELAYED_WORK_SCHEDULED);
865 * INIT_DELAYED_WORK(dw, fn)
867 * Initialize dw for use with a workqueue to call fn in a worker
872 INIT_DELAYED_WORK(struct delayed_work *dw, void (*fn)(struct work_struct *))
875 INIT_WORK(&dw->work, fn);
876 dw->dw_state = DELAYED_WORK_IDLE;
877 dw->dw_resched = -1;
888 * schedule_delayed_work(dw, ticks)
890 * If it is not currently scheduled, schedule dw to run after
899 schedule_delayed_work(struct delayed_work *dw, unsigned long ticks)
902 return queue_delayed_work(system_wq, dw, ticks);
906 * dw_callout_init(wq, dw)
908 * Initialize the callout of dw and transition to
912 dw_callout_init(struct workqueue_struct *wq, struct delayed_work *dw)
916 KASSERT(work_queue(&dw->work) == wq);
917 KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
919 callout_init(&dw->dw_callout, CALLOUT_MPSAFE);
920 callout_setfunc(&dw->dw_callout, &linux_workqueue_timeout, dw);
921 TAILQ_INSERT_HEAD(&wq->wq_delayed, dw, dw_entry);
922 dw->dw_state = DELAYED_WORK_SCHEDULED;
926 * dw_callout_destroy(wq, dw)
928 * Destroy the callout of dw and transition to DELAYED_WORK_IDLE.
931 dw_callout_destroy(struct workqueue_struct *wq, struct delayed_work *dw)
935 KASSERT(work_queue(&dw->work) == wq);
936 KASSERT(dw->dw_state == DELAYED_WORK_SCHEDULED ||
937 dw->dw_state == DELAYED_WORK_RESCHEDULED ||
938 dw->dw_state == DELAYED_WORK_CANCELLED);
940 TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
941 callout_destroy(&dw->dw_callout);
942 dw->dw_resched = -1;
943 dw->dw_state = DELAYED_WORK_IDLE;
947 * cancel_delayed_work_done(wq, dw)
951 * workqueue. Caller must not dereference dw after this returns.
954 cancel_delayed_work_done(struct workqueue_struct *wq, struct delayed_work *dw)
958 KASSERT(work_queue(&dw->work) == wq);
959 KASSERT(dw->dw_state == DELAYED_WORK_CANCELLED);
961 dw_callout_destroy(wq, dw);
962 release_work(&dw->work, wq);
963 /* Can't dereference dw after this point. */
967 * queue_delayed_work(wq, dw, ticks)
969 * If it is not currently scheduled, schedule dw to run after
977 queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw,
983 if (__predict_true(acquire_work(&dw->work, wq))) {
988 KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
990 TAILQ_INSERT_TAIL(&wq->wq_dqueue, &dw->work,
993 SDT_PROBE2(sdt, linux, work, queue, &dw->work, wq);
999 dw_callout_init(wq, dw);
1000 callout_schedule(&dw->dw_callout, MIN(INT_MAX, ticks));
1001 SDT_PROBE3(sdt, linux, work, schedule, dw, wq, ticks);
1006 switch (dw->dw_state) {
1019 dw->dw_state = DELAYED_WORK_SCHEDULED;
1021 &dw->work, wq);
1023 dw->dw_state = DELAYED_WORK_RESCHEDULED;
1024 dw->dw_resched = MIN(INT_MAX, ticks);
1026 dw, wq, ticks);
1032 dw->dw_state);
1041 * mod_delayed_work(wq, dw, ticks)
1043 * Schedule dw to run after ticks. If scheduled or queued,
1050 mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw,
1056 if (acquire_work(&dw->work, wq)) {
1061 KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
1067 TAILQ_INSERT_TAIL(&wq->wq_dqueue, &dw->work,
1070 SDT_PROBE2(sdt, linux, work, queue, &dw->work, wq);
1076 dw_callout_init(wq, dw);
1077 callout_schedule(&dw->dw_callout, MIN(INT_MAX, ticks));
1078 SDT_PROBE3(sdt, linux, work, schedule, dw, wq, ticks);
1083 switch (dw->dw_state) {
1089 &dw->work, wq);
1091 &dw->work, wq);
1094 TAILQ_REMOVE(&wq->wq_dqueue, &dw->work,
1096 dw_callout_init(wq, dw);
1097 callout_schedule(&dw->dw_callout,
1100 &dw->work, wq);
1102 dw, wq, ticks);
1113 if (callout_stop(&dw->dw_callout)) {
1123 &dw->work, wq);
1125 &dw->work, wq);
1128 dw->dw_state = DELAYED_WORK_RESCHEDULED;
1129 dw->dw_resched = MIN(INT_MAX, ticks);
1131 &dw->work, wq);
1133 dw, wq, ticks);
1144 dw_callout_destroy(wq, dw);
1146 &dw->work, work_entry);
1149 &dw->work, wq);
1151 &dw->work, wq);
1157 callout_schedule(&dw->dw_callout,
1160 &dw->work, wq);
1162 dw, wq, ticks);
1180 dw->dw_state = DELAYED_WORK_SCHEDULED;
1181 dw->dw_resched = -1;
1183 &dw->work, wq);
1185 &dw->work, wq);
1188 dw->dw_resched = ticks;
1190 &dw->work, wq);
1192 dw, wq, ticks);
1209 dw->dw_state = DELAYED_WORK_SCHEDULED;
1211 &dw->work, wq);
1214 dw->dw_state = DELAYED_WORK_RESCHEDULED;
1215 dw->dw_resched = MIN(INT_MAX, ticks);
1217 dw, wq, ticks);
1222 panic("invalid delayed work state: %d", dw->dw_state);
1231 * cancel_delayed_work(dw)
1241 cancel_delayed_work(struct delayed_work *dw)
1247 if ((wq = work_queue(&dw->work)) == NULL)
1251 if (__predict_false(work_queue(&dw->work) != wq)) {
1254 switch (dw->dw_state) {
1260 if (work_claimed(&dw->work, wq)) {
1262 TAILQ_REMOVE(&wq->wq_dqueue, &dw->work,
1265 &dw->work, wq);
1266 release_work(&dw->work, wq);
1267 /* Can't dereference dw after this point. */
1286 dw->dw_state = DELAYED_WORK_CANCELLED;
1288 SDT_PROBE2(sdt, linux, work, cancel, &dw->work, wq);
1289 if (!callout_stop(&dw->dw_callout))
1290 cancel_delayed_work_done(wq, dw);
1297 dw->dw_state = DELAYED_WORK_CANCELLED;
1298 dw->dw_resched = -1;
1300 SDT_PROBE2(sdt, linux, work, cancel, &dw->work, wq);
1313 dw->dw_state);
1322 * cancel_delayed_work_sync(dw)
1331 cancel_delayed_work_sync(struct delayed_work *dw)
1337 if ((wq = work_queue(&dw->work)) == NULL)
1341 if (__predict_false(work_queue(&dw->work) != wq)) {
1344 switch (dw->dw_state) {
1350 if (work_claimed(&dw->work, wq)) {
1352 TAILQ_REMOVE(&wq->wq_dqueue, &dw->work,
1355 &dw->work, wq);
1356 release_work(&dw->work, wq);
1357 /* Can't dereference dw after this point. */
1364 if (wq->wq_current_work == &dw->work)
1365 wait_for_current_work(&dw->work, wq);
1382 dw->dw_state = DELAYED_WORK_CANCELLED;
1383 SDT_PROBE2(sdt, linux, work, cancel, &dw->work, wq);
1384 if (!callout_halt(&dw->dw_callout, &wq->wq_lock))
1385 cancel_delayed_work_done(wq, dw);
1394 dw->dw_state = DELAYED_WORK_CANCELLED;
1395 dw->dw_resched = -1;
1396 SDT_PROBE2(sdt, linux, work, cancel, &dw->work, wq);
1397 (void)callout_halt(&dw->dw_callout, &wq->wq_lock);
1407 (void)callout_halt(&dw->dw_callout, &wq->wq_lock);
1412 dw->dw_state);
1540 * flush_delayed_work(dw)
1542 * If dw is scheduled to run after a delay, queue it immediately
1543 * instead. Then, if dw is queued or currently executing, wait
1547 flush_delayed_work(struct delayed_work *dw)
1553 if ((wq = work_queue(&dw->work)) == NULL)
1557 if (__predict_false(work_queue(&dw->work) != wq)) {
1565 switch (dw->dw_state) {
1586 dw->dw_state = DELAYED_WORK_SCHEDULED;
1587 if (!callout_halt(&dw->dw_callout, &wq->wq_lock)) {
1594 KASSERT(dw->dw_state ==
1596 dw_callout_destroy(wq, dw);
1597 TAILQ_INSERT_TAIL(&wq->wq_dqueue, &dw->work,
1601 &dw->work, wq);
1605 panic("invalid delayed work state: %d", dw->dw_state);
1622 * delayed_work_pending(dw)
1624 * True if dw is currently scheduled to execute, false if not.
1627 delayed_work_pending(const struct delayed_work *dw)
1630 return work_pending(&dw->work);