linux_work.c revision 1.16 1 1.2 riastrad /* $NetBSD: linux_work.c,v 1.16 2018/08/27 14:58:57 riastradh Exp $ */
2 1.1 skrll
3 1.1 skrll /*-
4 1.12 riastrad * Copyright (c) 2018 The NetBSD Foundation, Inc.
5 1.1 skrll * All rights reserved.
6 1.1 skrll *
7 1.1 skrll * This code is derived from software contributed to The NetBSD Foundation
8 1.1 skrll * by Taylor R. Campbell.
9 1.1 skrll *
10 1.1 skrll * Redistribution and use in source and binary forms, with or without
11 1.1 skrll * modification, are permitted provided that the following conditions
12 1.1 skrll * are met:
13 1.1 skrll * 1. Redistributions of source code must retain the above copyright
14 1.1 skrll * notice, this list of conditions and the following disclaimer.
15 1.1 skrll * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 skrll * notice, this list of conditions and the following disclaimer in the
17 1.1 skrll * documentation and/or other materials provided with the distribution.
18 1.1 skrll *
19 1.1 skrll * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 skrll * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 skrll * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 skrll * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 skrll * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 skrll * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 skrll * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 skrll * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 skrll * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 skrll * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 skrll * POSSIBILITY OF SUCH DAMAGE.
30 1.1 skrll */
31 1.1 skrll
32 1.1 skrll #include <sys/cdefs.h>
33 1.2 riastrad __KERNEL_RCSID(0, "$NetBSD: linux_work.c,v 1.16 2018/08/27 14:58:57 riastradh Exp $");
34 1.1 skrll
35 1.1 skrll #include <sys/types.h>
36 1.1 skrll #include <sys/atomic.h>
37 1.1 skrll #include <sys/callout.h>
38 1.1 skrll #include <sys/condvar.h>
39 1.1 skrll #include <sys/errno.h>
40 1.1 skrll #include <sys/kmem.h>
41 1.12 riastrad #include <sys/kthread.h>
42 1.12 riastrad #include <sys/lwp.h>
43 1.1 skrll #include <sys/mutex.h>
44 1.1 skrll #include <sys/queue.h>
45 1.1 skrll
46 1.1 skrll #include <linux/workqueue.h>
47 1.1 skrll
48 1.1 skrll struct workqueue_struct {
49 1.1 skrll kmutex_t wq_lock;
50 1.1 skrll kcondvar_t wq_cv;
51 1.1 skrll TAILQ_HEAD(, delayed_work) wq_delayed;
52 1.12 riastrad TAILQ_HEAD(, work_struct) wq_queue;
53 1.1 skrll struct work_struct *wq_current_work;
54 1.12 riastrad int wq_flags;
55 1.12 riastrad struct lwp *wq_lwp;
56 1.12 riastrad uint64_t wq_gen;
57 1.12 riastrad bool wq_requeued:1;
58 1.12 riastrad bool wq_dying:1;
59 1.1 skrll };
60 1.1 skrll
61 1.12 riastrad static void __dead linux_workqueue_thread(void *);
62 1.12 riastrad static void linux_workqueue_timeout(void *);
63 1.12 riastrad static void queue_delayed_work_anew(struct workqueue_struct *,
64 1.12 riastrad struct delayed_work *, unsigned long);
65 1.12 riastrad
66 1.12 riastrad static specificdata_key_t workqueue_key __read_mostly;
67 1.12 riastrad
68 1.12 riastrad struct workqueue_struct *system_wq __read_mostly;
69 1.12 riastrad struct workqueue_struct *system_long_wq __read_mostly;
70 1.12 riastrad struct workqueue_struct *system_power_efficient_wq __read_mostly;
71 1.3 riastrad
72 1.1 skrll int
73 1.1 skrll linux_workqueue_init(void)
74 1.1 skrll {
75 1.12 riastrad int error;
76 1.3 riastrad
77 1.12 riastrad error = lwp_specific_key_create(&workqueue_key, NULL);
78 1.12 riastrad if (error)
79 1.12 riastrad goto fail0;
80 1.1 skrll
81 1.1 skrll system_wq = alloc_ordered_workqueue("lnxsyswq", 0);
82 1.12 riastrad if (system_wq == NULL) {
83 1.12 riastrad error = ENOMEM;
84 1.12 riastrad goto fail1;
85 1.12 riastrad }
86 1.2 riastrad
87 1.2 riastrad system_long_wq = alloc_ordered_workqueue("lnxlngwq", 0);
88 1.12 riastrad if (system_long_wq == NULL) {
89 1.12 riastrad error = ENOMEM;
90 1.12 riastrad goto fail2;
91 1.12 riastrad }
92 1.1 skrll
93 1.6 riastrad system_power_efficient_wq = alloc_ordered_workqueue("lnxpwrwq", 0);
94 1.12 riastrad if (system_long_wq == NULL) {
95 1.12 riastrad error = ENOMEM;
96 1.12 riastrad goto fail3;
97 1.12 riastrad }
98 1.6 riastrad
99 1.1 skrll return 0;
100 1.2 riastrad
101 1.12 riastrad fail4: __unused
102 1.6 riastrad destroy_workqueue(system_power_efficient_wq);
103 1.12 riastrad fail3: destroy_workqueue(system_long_wq);
104 1.12 riastrad fail2: destroy_workqueue(system_wq);
105 1.12 riastrad fail1: lwp_specific_key_delete(workqueue_key);
106 1.12 riastrad fail0: KASSERT(error);
107 1.12 riastrad return error;
108 1.1 skrll }
109 1.1 skrll
110 1.1 skrll void
111 1.1 skrll linux_workqueue_fini(void)
112 1.1 skrll {
113 1.2 riastrad
114 1.12 riastrad destroy_workqueue(system_power_efficient_wq);
115 1.2 riastrad destroy_workqueue(system_long_wq);
116 1.1 skrll destroy_workqueue(system_wq);
117 1.12 riastrad lwp_specific_key_delete(workqueue_key);
118 1.1 skrll }
119 1.1 skrll
120 1.1 skrll /*
122 1.1 skrll * Workqueues
123 1.1 skrll */
124 1.1 skrll
125 1.12 riastrad struct workqueue_struct *
126 1.1 skrll alloc_ordered_workqueue(const char *name, int flags)
127 1.1 skrll {
128 1.1 skrll struct workqueue_struct *wq;
129 1.1 skrll int error;
130 1.12 riastrad
131 1.1 skrll KASSERT(flags == 0);
132 1.1 skrll
133 1.1 skrll wq = kmem_alloc(sizeof(*wq), KM_SLEEP);
134 1.12 riastrad
135 1.1 skrll mutex_init(&wq->wq_lock, MUTEX_DEFAULT, IPL_NONE);
136 1.1 skrll cv_init(&wq->wq_cv, name);
137 1.12 riastrad TAILQ_INIT(&wq->wq_delayed);
138 1.1 skrll TAILQ_INIT(&wq->wq_queue);
139 1.1 skrll wq->wq_current_work = NULL;
140 1.12 riastrad
141 1.12 riastrad error = kthread_create(PRI_NONE,
142 1.12 riastrad KTHREAD_MPSAFE|KTHREAD_TS|KTHREAD_MUSTJOIN, NULL,
143 1.12 riastrad &linux_workqueue_thread, wq, &wq->wq_lwp, "%s", name);
144 1.12 riastrad if (error)
145 1.3 riastrad goto fail0;
146 1.1 skrll
147 1.12 riastrad return wq;
148 1.12 riastrad
149 1.12 riastrad fail0: KASSERT(TAILQ_EMPTY(&wq->wq_queue));
150 1.12 riastrad KASSERT(TAILQ_EMPTY(&wq->wq_delayed));
151 1.12 riastrad cv_destroy(&wq->wq_cv);
152 1.12 riastrad mutex_destroy(&wq->wq_lock);
153 1.12 riastrad kmem_free(wq, sizeof(*wq));
154 1.1 skrll return NULL;
155 1.1 skrll }
156 1.1 skrll
157 1.1 skrll void
158 1.1 skrll destroy_workqueue(struct workqueue_struct *wq)
159 1.1 skrll {
160 1.1 skrll
161 1.12 riastrad /*
162 1.12 riastrad * Cancel all delayed work. We do this first because any
163 1.12 riastrad * delayed work that that has already timed out, which we can't
164 1.1 skrll * cancel, may have queued new work.
165 1.1 skrll */
166 1.12 riastrad for (;;) {
167 1.1 skrll struct delayed_work *dw = NULL;
168 1.1 skrll
169 1.12 riastrad mutex_enter(&wq->wq_lock);
170 1.1 skrll if (!TAILQ_EMPTY(&wq->wq_delayed)) {
171 1.12 riastrad dw = TAILQ_FIRST(&wq->wq_delayed);
172 1.12 riastrad if (!callout_halt(&dw->dw_callout, &wq->wq_lock))
173 1.1 skrll TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
174 1.1 skrll }
175 1.1 skrll mutex_exit(&wq->wq_lock);
176 1.1 skrll
177 1.1 skrll if (dw == NULL)
178 1.1 skrll break;
179 1.1 skrll cancel_delayed_work_sync(dw);
180 1.1 skrll }
181 1.12 riastrad
182 1.12 riastrad /* Tell the thread to exit. */
183 1.12 riastrad mutex_enter(&wq->wq_lock);
184 1.12 riastrad wq->wq_dying = true;
185 1.12 riastrad cv_broadcast(&wq->wq_cv);
186 1.12 riastrad mutex_exit(&wq->wq_lock);
187 1.12 riastrad
188 1.12 riastrad /* Wait for it to exit. */
189 1.12 riastrad (void)kthread_join(wq->wq_lwp);
190 1.1 skrll
191 1.12 riastrad KASSERT(wq->wq_current_work == NULL);
192 1.12 riastrad KASSERT(TAILQ_EMPTY(&wq->wq_queue));
193 1.1 skrll KASSERT(TAILQ_EMPTY(&wq->wq_delayed));
194 1.1 skrll cv_destroy(&wq->wq_cv);
195 1.1 skrll mutex_destroy(&wq->wq_lock);
196 1.1 skrll
197 1.1 skrll kmem_free(wq, sizeof(*wq));
198 1.1 skrll }
199 1.1 skrll
200 1.12 riastrad /*
202 1.1 skrll * Work thread and callout
203 1.12 riastrad */
204 1.12 riastrad
205 1.1 skrll static void __dead
206 1.12 riastrad linux_workqueue_thread(void *cookie)
207 1.12 riastrad {
208 1.1 skrll struct workqueue_struct *const wq = cookie;
209 1.12 riastrad TAILQ_HEAD(, work_struct) tmp;
210 1.1 skrll
211 1.12 riastrad lwp_setspecific(workqueue_key, wq);
212 1.12 riastrad
213 1.12 riastrad mutex_enter(&wq->wq_lock);
214 1.12 riastrad for (;;) {
215 1.12 riastrad /* Wait until there's activity. If we're dying, stop. */
216 1.12 riastrad while (TAILQ_EMPTY(&wq->wq_queue) && !wq->wq_dying)
217 1.12 riastrad cv_wait(&wq->wq_cv, &wq->wq_lock);
218 1.1 skrll if (wq->wq_dying)
219 1.12 riastrad break;
220 1.12 riastrad
221 1.12 riastrad /* Grab a batch of work off the queue. */
222 1.12 riastrad KASSERT(!TAILQ_EMPTY(&wq->wq_queue));
223 1.12 riastrad TAILQ_INIT(&tmp);
224 1.12 riastrad TAILQ_CONCAT(&tmp, &wq->wq_queue, work_entry);
225 1.12 riastrad
226 1.12 riastrad /* Process each work item in the batch. */
227 1.12 riastrad while (!TAILQ_EMPTY(&tmp)) {
228 1.12 riastrad struct work_struct *const work = TAILQ_FIRST(&tmp);
229 1.12 riastrad
230 1.12 riastrad TAILQ_REMOVE(&tmp, work, work_entry);
231 1.1 skrll KASSERT(wq->wq_current_work == NULL);
232 1.12 riastrad wq->wq_current_work = work;
233 1.12 riastrad
234 1.12 riastrad mutex_exit(&wq->wq_lock);
235 1.1 skrll (*work->func)(work);
236 1.12 riastrad mutex_enter(&wq->wq_lock);
237 1.12 riastrad
238 1.12 riastrad KASSERT(wq->wq_current_work == work);
239 1.12 riastrad KASSERT(work->work_queue == wq);
240 1.12 riastrad if (wq->wq_requeued)
241 1.12 riastrad wq->wq_requeued = false;
242 1.12 riastrad else
243 1.12 riastrad work->work_queue = NULL;
244 1.12 riastrad wq->wq_current_work = NULL;
245 1.1 skrll cv_broadcast(&wq->wq_cv);
246 1.12 riastrad }
247 1.12 riastrad
248 1.12 riastrad /* Notify flush that we've completed a batch of work. */
249 1.1 skrll wq->wq_gen++;
250 1.12 riastrad cv_broadcast(&wq->wq_cv);
251 1.1 skrll }
252 1.12 riastrad mutex_exit(&wq->wq_lock);
253 1.1 skrll
254 1.1 skrll kthread_exit(0);
255 1.1 skrll }
256 1.12 riastrad
257 1.1 skrll static void
258 1.12 riastrad linux_workqueue_timeout(void *cookie)
259 1.12 riastrad {
260 1.1 skrll struct delayed_work *const dw = cookie;
261 1.12 riastrad struct workqueue_struct *const wq = dw->work.work_queue;
262 1.14 riastrad
263 1.12 riastrad KASSERT(wq != NULL);
264 1.12 riastrad
265 1.12 riastrad mutex_enter(&wq->wq_lock);
266 1.12 riastrad switch (dw->dw_state) {
267 1.12 riastrad case DELAYED_WORK_IDLE:
268 1.12 riastrad panic("delayed work callout uninitialized: %p", dw);
269 1.12 riastrad case DELAYED_WORK_SCHEDULED:
270 1.12 riastrad dw->dw_state = DELAYED_WORK_IDLE;
271 1.12 riastrad callout_destroy(&dw->dw_callout);
272 1.12 riastrad TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
273 1.12 riastrad TAILQ_INSERT_TAIL(&wq->wq_queue, &dw->work, work_entry);
274 1.12 riastrad cv_broadcast(&wq->wq_cv);
275 1.12 riastrad break;
276 1.12 riastrad case DELAYED_WORK_RESCHEDULED:
277 1.12 riastrad dw->dw_state = DELAYED_WORK_SCHEDULED;
278 1.12 riastrad break;
279 1.12 riastrad case DELAYED_WORK_CANCELLED:
280 1.12 riastrad dw->dw_state = DELAYED_WORK_IDLE;
281 1.12 riastrad callout_destroy(&dw->dw_callout);
282 1.12 riastrad TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
283 1.12 riastrad break;
284 1.12 riastrad default:
285 1.15 riastrad panic("delayed work callout in bad state: %p", dw);
286 1.15 riastrad }
287 1.12 riastrad KASSERT(dw->dw_state == DELAYED_WORK_IDLE ||
288 1.1 skrll dw->dw_state == DELAYED_WORK_SCHEDULED);
289 1.1 skrll mutex_exit(&wq->wq_lock);
290 1.12 riastrad }
291 1.12 riastrad
292 1.1 skrll struct work_struct *
293 1.12 riastrad current_work(void)
294 1.1 skrll {
295 1.12 riastrad struct workqueue_struct *wq = lwp_getspecific(workqueue_key);
296 1.12 riastrad
297 1.12 riastrad /* If we're not a workqueue thread, then there's no work. */
298 1.1 skrll if (wq == NULL)
299 1.12 riastrad return NULL;
300 1.12 riastrad
301 1.12 riastrad /*
302 1.12 riastrad * Otherwise, this should be possible only while work is in
303 1.12 riastrad * progress. Return the current work item.
304 1.12 riastrad */
305 1.1 skrll KASSERT(wq->wq_current_work != NULL);
306 1.1 skrll return wq->wq_current_work;
307 1.1 skrll }
308 1.1 skrll
309 1.1 skrll /*
311 1.1 skrll * Work
312 1.1 skrll */
313 1.1 skrll
314 1.1 skrll void
315 1.12 riastrad INIT_WORK(struct work_struct *work, void (*fn)(struct work_struct *))
316 1.4 riastrad {
317 1.1 skrll
318 1.1 skrll work->work_queue = NULL;
319 1.1 skrll work->func = fn;
320 1.1 skrll }
321 1.1 skrll
322 1.12 riastrad bool
323 1.1 skrll schedule_work(struct work_struct *work)
324 1.1 skrll {
325 1.1 skrll
326 1.1 skrll return queue_work(system_wq, work);
327 1.1 skrll }
328 1.1 skrll
329 1.12 riastrad bool
330 1.1 skrll queue_work(struct workqueue_struct *wq, struct work_struct *work)
331 1.1 skrll {
332 1.1 skrll struct workqueue_struct *wq0;
333 1.1 skrll bool newly_queued;
334 1.12 riastrad
335 1.12 riastrad KASSERT(wq != NULL);
336 1.12 riastrad
337 1.12 riastrad mutex_enter(&wq->wq_lock);
338 1.1 skrll if (__predict_true((wq0 = atomic_cas_ptr(&work->work_queue, NULL, wq))
339 1.12 riastrad == NULL)) {
340 1.12 riastrad TAILQ_INSERT_TAIL(&wq->wq_queue, work, work_entry);
341 1.1 skrll newly_queued = true;
342 1.1 skrll } else {
343 1.12 riastrad KASSERT(wq0 == wq);
344 1.1 skrll newly_queued = false;
345 1.1 skrll }
346 1.1 skrll mutex_exit(&wq->wq_lock);
347 1.1 skrll
348 1.1 skrll return newly_queued;
349 1.12 riastrad }
350 1.1 skrll
351 1.12 riastrad bool
352 1.1 skrll cancel_work(struct work_struct *work)
353 1.1 skrll {
354 1.13 riastrad struct workqueue_struct *wq;
355 1.13 riastrad bool cancelled_p = false;
356 1.13 riastrad
357 1.13 riastrad /* If there's no workqueue, nothing to cancel. */
358 1.12 riastrad if ((wq = work->work_queue) == NULL)
359 1.12 riastrad goto out;
360 1.12 riastrad
361 1.12 riastrad mutex_enter(&wq->wq_lock);
362 1.12 riastrad if (__predict_false(work->work_queue != wq)) {
363 1.12 riastrad cancelled_p = false;
364 1.12 riastrad } else if (wq->wq_current_work == work) {
365 1.1 skrll cancelled_p = false;
366 1.1 skrll } else {
367 1.12 riastrad TAILQ_REMOVE(&wq->wq_queue, work, work_entry);
368 1.1 skrll cancelled_p = true;
369 1.13 riastrad }
370 1.1 skrll mutex_exit(&wq->wq_lock);
371 1.1 skrll
372 1.12 riastrad out: return cancelled_p;
373 1.12 riastrad }
374 1.1 skrll
375 1.1 skrll bool
376 1.12 riastrad cancel_work_sync(struct work_struct *work)
377 1.1 skrll {
378 1.13 riastrad struct workqueue_struct *wq;
379 1.13 riastrad bool cancelled_p = false;
380 1.13 riastrad
381 1.13 riastrad /* If there's no workqueue, nothing to cancel. */
382 1.1 skrll if ((wq = work->work_queue) == NULL)
383 1.12 riastrad goto out;
384 1.12 riastrad
385 1.12 riastrad mutex_enter(&wq->wq_lock);
386 1.12 riastrad if (__predict_false(work->work_queue != wq)) {
387 1.12 riastrad cancelled_p = false;
388 1.12 riastrad } else if (wq->wq_current_work == work) {
389 1.12 riastrad do {
390 1.12 riastrad cv_wait(&wq->wq_cv, &wq->wq_lock);
391 1.12 riastrad } while (wq->wq_current_work == work);
392 1.12 riastrad cancelled_p = false;
393 1.12 riastrad } else {
394 1.1 skrll TAILQ_REMOVE(&wq->wq_queue, work, work_entry);
395 1.1 skrll cancelled_p = true;
396 1.13 riastrad }
397 1.1 skrll mutex_exit(&wq->wq_lock);
398 1.1 skrll
399 1.1 skrll out: return cancelled_p;
400 1.1 skrll }
401 1.1 skrll
402 1.1 skrll /*
404 1.1 skrll * Delayed work
405 1.1 skrll */
406 1.12 riastrad
407 1.1 skrll void
408 1.12 riastrad INIT_DELAYED_WORK(struct delayed_work *dw, void (*fn)(struct work_struct *))
409 1.12 riastrad {
410 1.12 riastrad
411 1.12 riastrad INIT_WORK(&dw->work, fn);
412 1.12 riastrad dw->dw_state = DELAYED_WORK_IDLE;
413 1.12 riastrad
414 1.12 riastrad /*
415 1.12 riastrad * Defer callout_init until we are going to schedule the
416 1.1 skrll * callout, which can then callout_destroy it, because
417 1.1 skrll * otherwise since there's no DESTROY_DELAYED_WORK or anything
418 1.1 skrll * we have no opportunity to call callout_destroy.
419 1.1 skrll */
420 1.1 skrll }
421 1.12 riastrad
422 1.1 skrll bool
423 1.1 skrll schedule_delayed_work(struct delayed_work *dw, unsigned long ticks)
424 1.1 skrll {
425 1.12 riastrad
426 1.12 riastrad return queue_delayed_work(system_wq, dw, ticks);
427 1.1 skrll }
428 1.1 skrll
429 1.1 skrll static void
430 1.12 riastrad queue_delayed_work_anew(struct workqueue_struct *wq, struct delayed_work *dw,
431 1.12 riastrad unsigned long ticks)
432 1.12 riastrad {
433 1.12 riastrad
434 1.12 riastrad KASSERT(mutex_owned(&wq->wq_lock));
435 1.12 riastrad KASSERT(dw->work.work_queue == wq);
436 1.12 riastrad KASSERT((dw->dw_state == DELAYED_WORK_IDLE) ||
437 1.12 riastrad (dw->dw_state == DELAYED_WORK_SCHEDULED));
438 1.12 riastrad
439 1.1 skrll if (ticks == 0) {
440 1.12 riastrad if (dw->dw_state == DELAYED_WORK_SCHEDULED) {
441 1.12 riastrad callout_destroy(&dw->dw_callout);
442 1.12 riastrad TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
443 1.12 riastrad } else {
444 1.12 riastrad KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
445 1.12 riastrad }
446 1.1 skrll TAILQ_INSERT_TAIL(&wq->wq_queue, &dw->work, work_entry);
447 1.12 riastrad dw->dw_state = DELAYED_WORK_IDLE;
448 1.12 riastrad } else {
449 1.1 skrll if (dw->dw_state == DELAYED_WORK_IDLE) {
450 1.12 riastrad callout_init(&dw->dw_callout, CALLOUT_MPSAFE);
451 1.12 riastrad callout_reset(&dw->dw_callout, MIN(INT_MAX, ticks),
452 1.1 skrll &linux_workqueue_timeout, dw);
453 1.12 riastrad TAILQ_INSERT_HEAD(&wq->wq_delayed, dw, dw_entry);
454 1.12 riastrad } else {
455 1.12 riastrad KASSERT(dw->dw_state == DELAYED_WORK_SCHEDULED);
456 1.1 skrll }
457 1.12 riastrad dw->dw_state = DELAYED_WORK_SCHEDULED;
458 1.12 riastrad }
459 1.12 riastrad }
460 1.12 riastrad
461 1.12 riastrad bool
462 1.12 riastrad queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw,
463 1.1 skrll unsigned long ticks)
464 1.12 riastrad {
465 1.12 riastrad struct workqueue_struct *wq0;
466 1.12 riastrad bool newly_queued;
467 1.12 riastrad
468 1.12 riastrad mutex_enter(&wq->wq_lock);
469 1.12 riastrad if (__predict_true((wq0 = atomic_cas_ptr(&dw->work.work_queue, NULL,
470 1.12 riastrad wq)) == NULL)) {
471 1.12 riastrad KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
472 1.1 skrll queue_delayed_work_anew(wq, dw, ticks);
473 1.1 skrll newly_queued = true;
474 1.12 riastrad } else {
475 1.1 skrll KASSERT(wq0 == wq);
476 1.1 skrll newly_queued = false;
477 1.1 skrll }
478 1.1 skrll mutex_exit(&wq->wq_lock);
479 1.1 skrll
480 1.1 skrll return newly_queued;
481 1.1 skrll }
482 1.1 skrll
483 1.12 riastrad bool
484 1.1 skrll mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw,
485 1.1 skrll unsigned long ticks)
486 1.12 riastrad {
487 1.12 riastrad struct workqueue_struct *wq0;
488 1.12 riastrad bool timer_modified;
489 1.12 riastrad
490 1.12 riastrad mutex_enter(&wq->wq_lock);
491 1.12 riastrad if ((wq0 = atomic_cas_ptr(&dw->work.work_queue, NULL, wq)) == NULL) {
492 1.12 riastrad KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
493 1.12 riastrad queue_delayed_work_anew(wq, dw, ticks);
494 1.12 riastrad timer_modified = false;
495 1.12 riastrad } else {
496 1.12 riastrad KASSERT(wq0 == wq);
497 1.12 riastrad switch (dw->dw_state) {
498 1.12 riastrad case DELAYED_WORK_IDLE:
499 1.12 riastrad if (wq->wq_current_work != &dw->work) {
500 1.12 riastrad /* Work is queued, but hasn't started yet. */
501 1.12 riastrad TAILQ_REMOVE(&wq->wq_queue, &dw->work,
502 1.12 riastrad work_entry);
503 1.12 riastrad queue_delayed_work_anew(wq, dw, ticks);
504 1.12 riastrad timer_modified = true;
505 1.12 riastrad } else {
506 1.12 riastrad /*
507 1.12 riastrad * Too late. Queue it anew. If that
508 1.12 riastrad * would skip the callout because it's
509 1.12 riastrad * immediate, notify the workqueue.
510 1.12 riastrad */
511 1.12 riastrad wq->wq_requeued = ticks == 0;
512 1.12 riastrad queue_delayed_work_anew(wq, dw, ticks);
513 1.12 riastrad timer_modified = false;
514 1.12 riastrad }
515 1.12 riastrad break;
516 1.12 riastrad case DELAYED_WORK_SCHEDULED:
517 1.12 riastrad if (callout_stop(&dw->dw_callout)) {
518 1.12 riastrad /*
519 1.12 riastrad * Too late to stop, but we got in
520 1.12 riastrad * before the callout acquired the
521 1.12 riastrad * lock. Reschedule it and tell it
522 1.12 riastrad * we've done so.
523 1.12 riastrad */
524 1.12 riastrad dw->dw_state = DELAYED_WORK_RESCHEDULED;
525 1.12 riastrad callout_schedule(&dw->dw_callout,
526 1.12 riastrad MIN(INT_MAX, ticks));
527 1.12 riastrad } else {
528 1.12 riastrad /* Stopped it. Queue it anew. */
529 1.12 riastrad queue_delayed_work_anew(wq, dw, ticks);
530 1.12 riastrad }
531 1.12 riastrad timer_modified = true;
532 1.12 riastrad break;
533 1.12 riastrad case DELAYED_WORK_RESCHEDULED:
534 1.12 riastrad case DELAYED_WORK_CANCELLED:
535 1.12 riastrad /*
536 1.12 riastrad * Someone modified the timer _again_, or
537 1.12 riastrad * cancelled it, after the callout started but
538 1.12 riastrad * before the poor thing even had a chance to
539 1.12 riastrad * acquire the lock. Just reschedule it once
540 1.12 riastrad * more.
541 1.12 riastrad */
542 1.12 riastrad callout_schedule(&dw->dw_callout, MIN(INT_MAX, ticks));
543 1.12 riastrad dw->dw_state = DELAYED_WORK_RESCHEDULED;
544 1.12 riastrad timer_modified = true;
545 1.1 skrll break;
546 1.1 skrll default:
547 1.12 riastrad panic("invalid delayed work state: %d",
548 1.1 skrll dw->dw_state);
549 1.1 skrll }
550 1.1 skrll }
551 1.1 skrll mutex_exit(&wq->wq_lock);
552 1.1 skrll
553 1.1 skrll return timer_modified;
554 1.1 skrll }
555 1.12 riastrad
556 1.12 riastrad bool
557 1.1 skrll cancel_delayed_work(struct delayed_work *dw)
558 1.14 riastrad {
559 1.14 riastrad struct workqueue_struct *wq;
560 1.14 riastrad bool cancelled_p;
561 1.14 riastrad
562 1.12 riastrad /* If there's no workqueue, nothing to cancel. */
563 1.12 riastrad if ((wq = dw->work.work_queue) == NULL)
564 1.12 riastrad return false;
565 1.12 riastrad
566 1.12 riastrad mutex_enter(&wq->wq_lock);
567 1.12 riastrad if (__predict_false(dw->work.work_queue != wq)) {
568 1.12 riastrad cancelled_p = false;
569 1.12 riastrad } else {
570 1.12 riastrad switch (dw->dw_state) {
571 1.12 riastrad case DELAYED_WORK_IDLE:
572 1.12 riastrad if (wq->wq_current_work == &dw->work) {
573 1.12 riastrad /* Too late, it's already running. */
574 1.12 riastrad cancelled_p = false;
575 1.12 riastrad } else {
576 1.12 riastrad /* Got in before it started. Remove it. */
577 1.12 riastrad TAILQ_REMOVE(&wq->wq_queue, &dw->work,
578 1.12 riastrad work_entry);
579 1.12 riastrad cancelled_p = true;
580 1.12 riastrad }
581 1.12 riastrad break;
582 1.12 riastrad case DELAYED_WORK_SCHEDULED:
583 1.12 riastrad case DELAYED_WORK_RESCHEDULED:
584 1.12 riastrad case DELAYED_WORK_CANCELLED:
585 1.12 riastrad if (callout_stop(&dw->dw_callout)) {
586 1.12 riastrad /*
587 1.12 riastrad * Too late to stop, but we got in
588 1.12 riastrad * before the callout acquired the
589 1.12 riastrad * lock. Tell it to give up.
590 1.12 riastrad */
591 1.12 riastrad dw->dw_state = DELAYED_WORK_CANCELLED;
592 1.12 riastrad } else {
593 1.12 riastrad /* Stopped it. Kill it. */
594 1.12 riastrad callout_destroy(&dw->dw_callout);
595 1.16 riastrad TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
596 1.12 riastrad dw->dw_state = DELAYED_WORK_IDLE;
597 1.12 riastrad }
598 1.12 riastrad cancelled_p = true;
599 1.12 riastrad break;
600 1.1 skrll default:
601 1.12 riastrad panic("invalid delayed work state: %d",
602 1.1 skrll dw->dw_state);
603 1.1 skrll }
604 1.1 skrll }
605 1.1 skrll mutex_exit(&wq->wq_lock);
606 1.1 skrll
607 1.1 skrll return cancelled_p;
608 1.1 skrll }
609 1.12 riastrad
610 1.12 riastrad bool
611 1.1 skrll cancel_delayed_work_sync(struct delayed_work *dw)
612 1.14 riastrad {
613 1.14 riastrad struct workqueue_struct *wq;
614 1.14 riastrad bool cancelled_p;
615 1.14 riastrad
616 1.12 riastrad /* If there's no workqueue, nothing to cancel. */
617 1.12 riastrad if ((wq = dw->work.work_queue) == NULL)
618 1.12 riastrad return false;
619 1.12 riastrad
620 1.12 riastrad mutex_enter(&wq->wq_lock);
621 1.12 riastrad if (__predict_false(dw->work.work_queue != wq)) {
622 1.12 riastrad cancelled_p = false;
623 1.12 riastrad } else {
624 1.12 riastrad retry: switch (dw->dw_state) {
625 1.12 riastrad case DELAYED_WORK_IDLE:
626 1.12 riastrad if (wq->wq_current_work == &dw->work) {
627 1.12 riastrad /* Too late, it's already running. Wait. */
628 1.12 riastrad do {
629 1.12 riastrad cv_wait(&wq->wq_cv, &wq->wq_lock);
630 1.12 riastrad } while (wq->wq_current_work == &dw->work);
631 1.12 riastrad cancelled_p = false;
632 1.12 riastrad } else {
633 1.12 riastrad /* Got in before it started. Remove it. */
634 1.12 riastrad TAILQ_REMOVE(&wq->wq_queue, &dw->work,
635 1.12 riastrad work_entry);
636 1.12 riastrad cancelled_p = true;
637 1.12 riastrad }
638 1.12 riastrad break;
639 1.12 riastrad case DELAYED_WORK_SCHEDULED:
640 1.12 riastrad case DELAYED_WORK_RESCHEDULED:
641 1.12 riastrad case DELAYED_WORK_CANCELLED:
642 1.12 riastrad /*
643 1.12 riastrad * If it has started, tell it to stop, and wait
644 1.12 riastrad * for it to complete. We drop the lock, so by
645 1.12 riastrad * the time the callout has completed, we must
646 1.12 riastrad * review the state again.
647 1.12 riastrad */
648 1.12 riastrad dw->dw_state = DELAYED_WORK_CANCELLED;
649 1.12 riastrad callout_halt(&dw->dw_callout, &wq->wq_lock);
650 1.12 riastrad goto retry;
651 1.1 skrll default:
652 1.12 riastrad panic("invalid delayed work state: %d",
653 1.1 skrll dw->dw_state);
654 1.1 skrll }
655 1.1 skrll }
656 1.12 riastrad mutex_exit(&wq->wq_lock);
657 1.12 riastrad
658 1.12 riastrad return cancelled_p;
659 1.12 riastrad }
660 1.1 skrll
661 1.5 riastrad /*
663 1.5 riastrad * Flush
664 1.5 riastrad */
665 1.12 riastrad
666 1.5 riastrad void
667 1.5 riastrad flush_scheduled_work(void)
668 1.12 riastrad {
669 1.12 riastrad
670 1.1 skrll flush_workqueue(system_wq);
671 1.12 riastrad }
672 1.1 skrll
673 1.12 riastrad void
674 1.12 riastrad flush_workqueue(struct workqueue_struct *wq)
675 1.12 riastrad {
676 1.12 riastrad uint64_t gen;
677 1.12 riastrad
678 1.12 riastrad mutex_enter(&wq->wq_lock);
679 1.1 skrll gen = wq->wq_gen;
680 1.1 skrll do {
681 1.12 riastrad cv_wait(&wq->wq_cv, &wq->wq_lock);
682 1.12 riastrad } while (gen == wq->wq_gen);
683 1.1 skrll mutex_exit(&wq->wq_lock);
684 1.14 riastrad }
685 1.1 skrll
686 1.14 riastrad bool
687 1.14 riastrad flush_work(struct work_struct *work)
688 1.12 riastrad {
689 1.1 skrll struct workqueue_struct *wq;
690 1.12 riastrad
691 1.12 riastrad /* If there's no workqueue, nothing to flush. */
692 1.1 skrll if ((wq = work->work_queue) == NULL)
693 1.1 skrll return false;
694 1.12 riastrad
695 1.12 riastrad flush_workqueue(wq);
696 1.1 skrll return true;
697 1.14 riastrad }
698 1.12 riastrad
699 1.1 skrll bool
700 1.14 riastrad flush_delayed_work(struct delayed_work *dw)
701 1.14 riastrad {
702 1.12 riastrad struct workqueue_struct *wq;
703 1.1 skrll bool do_flush = false;
704 1.1 skrll
705 1.12 riastrad /* If there's no workqueue, nothing to flush. */
706 1.12 riastrad if ((wq = dw->work.work_queue) == NULL)
707 1.1 skrll return false;
708 1.12 riastrad
709 1.12 riastrad mutex_enter(&wq->wq_lock);
710 1.12 riastrad if (__predict_false(dw->work.work_queue != wq)) {
711 1.12 riastrad do_flush = true;
712 1.12 riastrad } else {
713 1.12 riastrad retry: switch (dw->dw_state) {
714 1.12 riastrad case DELAYED_WORK_IDLE:
715 1.12 riastrad if (wq->wq_current_work != &dw->work) {
716 1.12 riastrad TAILQ_REMOVE(&wq->wq_queue, &dw->work,
717 1.12 riastrad work_entry);
718 1.12 riastrad } else {
719 1.12 riastrad do_flush = true;
720 1.12 riastrad }
721 1.12 riastrad break;
722 1.12 riastrad case DELAYED_WORK_SCHEDULED:
723 1.12 riastrad case DELAYED_WORK_RESCHEDULED:
724 1.12 riastrad case DELAYED_WORK_CANCELLED:
725 1.12 riastrad dw->dw_state = DELAYED_WORK_CANCELLED;
726 1.12 riastrad callout_halt(&dw->dw_callout, &wq->wq_lock);
727 1.1 skrll goto retry;
728 1.12 riastrad default:
729 1.1 skrll panic("invalid delayed work state: %d",
730 1.12 riastrad dw->dw_state);
731 1.12 riastrad }
732 1.1 skrll }
733 1.12 riastrad mutex_exit(&wq->wq_lock);
734 1.1 skrll
735 if (do_flush)
736 flush_workqueue(wq);
737
738 return true;
739 }
740