Home | History | Annotate | Line # | Download | only in linux
linux_work.c revision 1.15
      1 /*	$NetBSD: linux_work.c,v 1.15 2018/08/27 14:58:24 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2018 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: linux_work.c,v 1.15 2018/08/27 14:58:24 riastradh Exp $");
     34 
     35 #include <sys/types.h>
     36 #include <sys/atomic.h>
     37 #include <sys/callout.h>
     38 #include <sys/condvar.h>
     39 #include <sys/errno.h>
     40 #include <sys/kmem.h>
     41 #include <sys/kthread.h>
     42 #include <sys/lwp.h>
     43 #include <sys/mutex.h>
     44 #include <sys/queue.h>
     45 
     46 #include <linux/workqueue.h>
     47 
     48 struct workqueue_struct {
     49 	kmutex_t			wq_lock;
     50 	kcondvar_t			wq_cv;
     51 	TAILQ_HEAD(, delayed_work)	wq_delayed;
     52 	TAILQ_HEAD(, work_struct)	wq_queue;
     53 	struct work_struct		*wq_current_work;
     54 	int				wq_flags;
     55 	struct lwp			*wq_lwp;
     56 	uint64_t			wq_gen;
     57 	bool				wq_requeued:1;
     58 	bool				wq_dying:1;
     59 };
     60 
     61 static void __dead	linux_workqueue_thread(void *);
     62 static void		linux_workqueue_timeout(void *);
     63 static void		queue_delayed_work_anew(struct workqueue_struct *,
     64 			    struct delayed_work *, unsigned long);
     65 
     66 static specificdata_key_t workqueue_key __read_mostly;
     67 
     68 struct workqueue_struct	*system_wq __read_mostly;
     69 struct workqueue_struct	*system_long_wq __read_mostly;
     70 struct workqueue_struct	*system_power_efficient_wq __read_mostly;
     71 
     72 int
     73 linux_workqueue_init(void)
     74 {
     75 	int error;
     76 
     77 	error = lwp_specific_key_create(&workqueue_key, NULL);
     78 	if (error)
     79 		goto fail0;
     80 
     81 	system_wq = alloc_ordered_workqueue("lnxsyswq", 0);
     82 	if (system_wq == NULL) {
     83 		error = ENOMEM;
     84 		goto fail1;
     85 	}
     86 
     87 	system_long_wq = alloc_ordered_workqueue("lnxlngwq", 0);
     88 	if (system_long_wq == NULL) {
     89 		error = ENOMEM;
     90 		goto fail2;
     91 	}
     92 
     93 	system_power_efficient_wq = alloc_ordered_workqueue("lnxpwrwq", 0);
     94 	if (system_long_wq == NULL) {
     95 		error = ENOMEM;
     96 		goto fail3;
     97 	}
     98 
     99 	return 0;
    100 
    101 fail4: __unused
    102 	destroy_workqueue(system_power_efficient_wq);
    103 fail3:	destroy_workqueue(system_long_wq);
    104 fail2:	destroy_workqueue(system_wq);
    105 fail1:	lwp_specific_key_delete(workqueue_key);
    106 fail0:	KASSERT(error);
    107 	return error;
    108 }
    109 
    110 void
    111 linux_workqueue_fini(void)
    112 {
    113 
    114 	destroy_workqueue(system_power_efficient_wq);
    115 	destroy_workqueue(system_long_wq);
    116 	destroy_workqueue(system_wq);
    117 	lwp_specific_key_delete(workqueue_key);
    118 }
    119 
    120 /*
    122  * Workqueues
    123  */
    124 
    125 struct workqueue_struct *
    126 alloc_ordered_workqueue(const char *name, int flags)
    127 {
    128 	struct workqueue_struct *wq;
    129 	int error;
    130 
    131 	KASSERT(flags == 0);
    132 
    133 	wq = kmem_alloc(sizeof(*wq), KM_SLEEP);
    134 
    135 	mutex_init(&wq->wq_lock, MUTEX_DEFAULT, IPL_NONE);
    136 	cv_init(&wq->wq_cv, name);
    137 	TAILQ_INIT(&wq->wq_delayed);
    138 	TAILQ_INIT(&wq->wq_queue);
    139 	wq->wq_current_work = NULL;
    140 
    141 	error = kthread_create(PRI_NONE,
    142 	    KTHREAD_MPSAFE|KTHREAD_TS|KTHREAD_MUSTJOIN, NULL,
    143 	    &linux_workqueue_thread, wq, &wq->wq_lwp, "%s", name);
    144 	if (error)
    145 		goto fail0;
    146 
    147 	return wq;
    148 
    149 fail0:	KASSERT(TAILQ_EMPTY(&wq->wq_queue));
    150 	KASSERT(TAILQ_EMPTY(&wq->wq_delayed));
    151 	cv_destroy(&wq->wq_cv);
    152 	mutex_destroy(&wq->wq_lock);
    153 	kmem_free(wq, sizeof(*wq));
    154 	return NULL;
    155 }
    156 
    157 void
    158 destroy_workqueue(struct workqueue_struct *wq)
    159 {
    160 
    161 	/*
    162 	 * Cancel all delayed work.  We do this first because any
    163 	 * delayed work that that has already timed out, which we can't
    164 	 * cancel, may have queued new work.
    165 	 */
    166 	for (;;) {
    167 		struct delayed_work *dw = NULL;
    168 
    169 		mutex_enter(&wq->wq_lock);
    170 		if (!TAILQ_EMPTY(&wq->wq_delayed)) {
    171 			dw = TAILQ_FIRST(&wq->wq_delayed);
    172 			if (!callout_halt(&dw->dw_callout, &wq->wq_lock))
    173 				TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
    174 		}
    175 		mutex_exit(&wq->wq_lock);
    176 
    177 		if (dw == NULL)
    178 			break;
    179 		cancel_delayed_work_sync(dw);
    180 	}
    181 
    182 	/* Tell the thread to exit.  */
    183 	mutex_enter(&wq->wq_lock);
    184 	wq->wq_dying = true;
    185 	cv_broadcast(&wq->wq_cv);
    186 	mutex_exit(&wq->wq_lock);
    187 
    188 	/* Wait for it to exit.  */
    189 	(void)kthread_join(wq->wq_lwp);
    190 
    191 	KASSERT(wq->wq_current_work == NULL);
    192 	KASSERT(TAILQ_EMPTY(&wq->wq_queue));
    193 	KASSERT(TAILQ_EMPTY(&wq->wq_delayed));
    194 	cv_destroy(&wq->wq_cv);
    195 	mutex_destroy(&wq->wq_lock);
    196 
    197 	kmem_free(wq, sizeof(*wq));
    198 }
    199 
    200 /*
    202  * Work thread and callout
    203  */
    204 
    205 static void __dead
    206 linux_workqueue_thread(void *cookie)
    207 {
    208 	struct workqueue_struct *const wq = cookie;
    209 	TAILQ_HEAD(, work_struct) tmp;
    210 
    211 	lwp_setspecific(workqueue_key, wq);
    212 
    213 	mutex_enter(&wq->wq_lock);
    214 	for (;;) {
    215 		/* Wait until there's activity.  If we're dying, stop.  */
    216 		while (TAILQ_EMPTY(&wq->wq_queue) && !wq->wq_dying)
    217 			cv_wait(&wq->wq_cv, &wq->wq_lock);
    218 		if (wq->wq_dying)
    219 			break;
    220 
    221 		/* Grab a batch of work off the queue.  */
    222 		KASSERT(!TAILQ_EMPTY(&wq->wq_queue));
    223 		TAILQ_INIT(&tmp);
    224 		TAILQ_CONCAT(&tmp, &wq->wq_queue, work_entry);
    225 
    226 		/* Process each work item in the batch.  */
    227 		while (!TAILQ_EMPTY(&tmp)) {
    228 			struct work_struct *const work = TAILQ_FIRST(&tmp);
    229 
    230 			TAILQ_REMOVE(&tmp, work, work_entry);
    231 			KASSERT(wq->wq_current_work == NULL);
    232 			wq->wq_current_work = work;
    233 
    234 			mutex_exit(&wq->wq_lock);
    235 			(*work->func)(work);
    236 			mutex_enter(&wq->wq_lock);
    237 
    238 			KASSERT(wq->wq_current_work == work);
    239 			KASSERT(work->work_queue == wq);
    240 			if (wq->wq_requeued)
    241 				wq->wq_requeued = false;
    242 			else
    243 				work->work_queue = NULL;
    244 			wq->wq_current_work = NULL;
    245 			cv_broadcast(&wq->wq_cv);
    246 		}
    247 
    248 		/* Notify flush that we've completed a batch of work.  */
    249 		wq->wq_gen++;
    250 		cv_broadcast(&wq->wq_cv);
    251 	}
    252 	mutex_exit(&wq->wq_lock);
    253 
    254 	kthread_exit(0);
    255 }
    256 
    257 static void
    258 linux_workqueue_timeout(void *cookie)
    259 {
    260 	struct delayed_work *const dw = cookie;
    261 	struct workqueue_struct *const wq = dw->work.work_queue;
    262 
    263 	KASSERT(wq != NULL);
    264 
    265 	mutex_enter(&wq->wq_lock);
    266 	switch (dw->dw_state) {
    267 	case DELAYED_WORK_IDLE:
    268 		panic("delayed work callout uninitialized: %p", dw);
    269 	case DELAYED_WORK_SCHEDULED:
    270 		dw->dw_state = DELAYED_WORK_IDLE;
    271 		callout_destroy(&dw->dw_callout);
    272 		TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
    273 		TAILQ_INSERT_TAIL(&wq->wq_queue, &dw->work, work_entry);
    274 		cv_broadcast(&wq->wq_cv);
    275 		break;
    276 	case DELAYED_WORK_RESCHEDULED:
    277 		dw->dw_state = DELAYED_WORK_SCHEDULED;
    278 		break;
    279 	case DELAYED_WORK_CANCELLED:
    280 		dw->dw_state = DELAYED_WORK_IDLE;
    281 		callout_destroy(&dw->dw_callout);
    282 		TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
    283 		break;
    284 	default:
    285 		panic("delayed work callout in bad state: %p", dw);
    286 	}
    287 	KASSERT(dw->dw_state == DELAYED_WORK_IDLE ||
    288 	    dw->dw_state == DELAYED_WORK_SCHEDULED);
    289 	mutex_exit(&wq->wq_lock);
    290 }
    291 
    292 struct work_struct *
    293 current_work(void)
    294 {
    295 	struct workqueue_struct *wq = lwp_getspecific(workqueue_key);
    296 
    297 	/* If we're not a workqueue thread, then there's no work.  */
    298 	if (wq == NULL)
    299 		return NULL;
    300 
    301 	/*
    302 	 * Otherwise, this should be possible only while work is in
    303 	 * progress.  Return the current work item.
    304 	 */
    305 	KASSERT(wq->wq_current_work != NULL);
    306 	return wq->wq_current_work;
    307 }
    308 
    309 /*
    311  * Work
    312  */
    313 
    314 void
    315 INIT_WORK(struct work_struct *work, void (*fn)(struct work_struct *))
    316 {
    317 
    318 	work->work_queue = NULL;
    319 	work->func = fn;
    320 }
    321 
    322 bool
    323 schedule_work(struct work_struct *work)
    324 {
    325 
    326 	return queue_work(system_wq, work);
    327 }
    328 
    329 bool
    330 queue_work(struct workqueue_struct *wq, struct work_struct *work)
    331 {
    332 	struct workqueue_struct *wq0;
    333 	bool newly_queued;
    334 
    335 	KASSERT(wq != NULL);
    336 
    337 	mutex_enter(&wq->wq_lock);
    338 	if (__predict_true((wq0 = atomic_cas_ptr(&work->work_queue, NULL, wq))
    339 		== NULL)) {
    340 		TAILQ_INSERT_TAIL(&wq->wq_queue, work, work_entry);
    341 		newly_queued = true;
    342 	} else {
    343 		KASSERT(wq0 == wq);
    344 		newly_queued = false;
    345 	}
    346 	mutex_exit(&wq->wq_lock);
    347 
    348 	return newly_queued;
    349 }
    350 
    351 bool
    352 cancel_work(struct work_struct *work)
    353 {
    354 	struct workqueue_struct *wq;
    355 	bool cancelled_p = false;
    356 
    357 	/* If there's no workqueue, nothing to cancel.   */
    358 	if ((wq = work->work_queue) == NULL)
    359 		goto out;
    360 
    361 	mutex_enter(&wq->wq_lock);
    362 	if (__predict_false(work->work_queue != wq)) {
    363 		cancelled_p = false;
    364 	} else if (wq->wq_current_work == work) {
    365 		cancelled_p = false;
    366 	} else {
    367 		TAILQ_REMOVE(&wq->wq_queue, work, work_entry);
    368 		cancelled_p = true;
    369 	}
    370 	mutex_exit(&wq->wq_lock);
    371 
    372 out:	return cancelled_p;
    373 }
    374 
    375 bool
    376 cancel_work_sync(struct work_struct *work)
    377 {
    378 	struct workqueue_struct *wq;
    379 	bool cancelled_p = false;
    380 
    381 	/* If there's no workqueue, nothing to cancel.   */
    382 	if ((wq = work->work_queue) == NULL)
    383 		goto out;
    384 
    385 	mutex_enter(&wq->wq_lock);
    386 	if (__predict_false(work->work_queue != wq)) {
    387 		cancelled_p = false;
    388 	} else if (wq->wq_current_work == work) {
    389 		do {
    390 			cv_wait(&wq->wq_cv, &wq->wq_lock);
    391 		} while (wq->wq_current_work == work);
    392 		cancelled_p = false;
    393 	} else {
    394 		TAILQ_REMOVE(&wq->wq_queue, work, work_entry);
    395 		cancelled_p = true;
    396 	}
    397 	mutex_exit(&wq->wq_lock);
    398 
    399 out:	return cancelled_p;
    400 }
    401 
    402 /*
    404  * Delayed work
    405  */
    406 
    407 void
    408 INIT_DELAYED_WORK(struct delayed_work *dw, void (*fn)(struct work_struct *))
    409 {
    410 
    411 	INIT_WORK(&dw->work, fn);
    412 	dw->dw_state = DELAYED_WORK_IDLE;
    413 
    414 	/*
    415 	 * Defer callout_init until we are going to schedule the
    416 	 * callout, which can then callout_destroy it, because
    417 	 * otherwise since there's no DESTROY_DELAYED_WORK or anything
    418 	 * we have no opportunity to call callout_destroy.
    419 	 */
    420 }
    421 
    422 bool
    423 schedule_delayed_work(struct delayed_work *dw, unsigned long ticks)
    424 {
    425 
    426 	return queue_delayed_work(system_wq, dw, ticks);
    427 }
    428 
    429 static void
    430 queue_delayed_work_anew(struct workqueue_struct *wq, struct delayed_work *dw,
    431     unsigned long ticks)
    432 {
    433 
    434 	KASSERT(mutex_owned(&wq->wq_lock));
    435 	KASSERT(dw->work.work_queue == wq);
    436 	KASSERT((dw->dw_state == DELAYED_WORK_IDLE) ||
    437 	    (dw->dw_state == DELAYED_WORK_SCHEDULED));
    438 
    439 	if (ticks == 0) {
    440 		if (dw->dw_state == DELAYED_WORK_SCHEDULED) {
    441 			callout_destroy(&dw->dw_callout);
    442 			TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
    443 		} else {
    444 			KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
    445 		}
    446 		TAILQ_INSERT_TAIL(&wq->wq_queue, &dw->work, work_entry);
    447 		dw->dw_state = DELAYED_WORK_IDLE;
    448 	} else {
    449 		if (dw->dw_state == DELAYED_WORK_IDLE) {
    450 			callout_init(&dw->dw_callout, CALLOUT_MPSAFE);
    451 			callout_reset(&dw->dw_callout, MIN(INT_MAX, ticks),
    452 			    &linux_workqueue_timeout, dw);
    453 			TAILQ_INSERT_HEAD(&wq->wq_delayed, dw, dw_entry);
    454 		} else {
    455 			KASSERT(dw->dw_state == DELAYED_WORK_SCHEDULED);
    456 		}
    457 		dw->dw_state = DELAYED_WORK_SCHEDULED;
    458 	}
    459 }
    460 
    461 bool
    462 queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw,
    463     unsigned long ticks)
    464 {
    465 	struct workqueue_struct *wq0;
    466 	bool newly_queued;
    467 
    468 	mutex_enter(&wq->wq_lock);
    469 	if (__predict_true((wq0 = atomic_cas_ptr(&dw->work.work_queue, NULL,
    470 			wq)) == NULL)) {
    471 		KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
    472 		queue_delayed_work_anew(wq, dw, ticks);
    473 		newly_queued = true;
    474 	} else {
    475 		KASSERT(wq0 == wq);
    476 		newly_queued = false;
    477 	}
    478 	mutex_exit(&wq->wq_lock);
    479 
    480 	return newly_queued;
    481 }
    482 
    483 bool
    484 mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw,
    485     unsigned long ticks)
    486 {
    487 	struct workqueue_struct *wq0;
    488 	bool timer_modified;
    489 
    490 	mutex_enter(&wq->wq_lock);
    491 	if ((wq0 = atomic_cas_ptr(&dw->work.work_queue, NULL, wq)) == NULL) {
    492 		KASSERT(dw->dw_state == DELAYED_WORK_IDLE);
    493 		queue_delayed_work_anew(wq, dw, ticks);
    494 		timer_modified = false;
    495 	} else {
    496 		KASSERT(wq0 == wq);
    497 		switch (dw->dw_state) {
    498 		case DELAYED_WORK_IDLE:
    499 			if (wq->wq_current_work != &dw->work) {
    500 				/* Work is queued, but hasn't started yet.  */
    501 				TAILQ_REMOVE(&wq->wq_queue, &dw->work,
    502 				    work_entry);
    503 				queue_delayed_work_anew(wq, dw, ticks);
    504 				timer_modified = true;
    505 			} else {
    506 				/*
    507 				 * Too late.  Queue it anew.  If that
    508 				 * would skip the callout because it's
    509 				 * immediate, notify the workqueue.
    510 				 */
    511 				wq->wq_requeued = ticks == 0;
    512 				queue_delayed_work_anew(wq, dw, ticks);
    513 				timer_modified = false;
    514 			}
    515 			break;
    516 		case DELAYED_WORK_SCHEDULED:
    517 			if (callout_stop(&dw->dw_callout)) {
    518 				/*
    519 				 * Too late to stop, but we got in
    520 				 * before the callout acquired the
    521 				 * lock.  Reschedule it and tell it
    522 				 * we've done so.
    523 				 */
    524 				dw->dw_state = DELAYED_WORK_RESCHEDULED;
    525 				callout_schedule(&dw->dw_callout,
    526 				    MIN(INT_MAX, ticks));
    527 			} else {
    528 				/* Stopped it.  Queue it anew.  */
    529 				queue_delayed_work_anew(wq, dw, ticks);
    530 			}
    531 			timer_modified = true;
    532 			break;
    533 		case DELAYED_WORK_RESCHEDULED:
    534 		case DELAYED_WORK_CANCELLED:
    535 			/*
    536 			 * Someone modified the timer _again_, or
    537 			 * cancelled it, after the callout started but
    538 			 * before the poor thing even had a chance to
    539 			 * acquire the lock.  Just reschedule it once
    540 			 * more.
    541 			 */
    542 			callout_schedule(&dw->dw_callout, MIN(INT_MAX, ticks));
    543 			dw->dw_state = DELAYED_WORK_RESCHEDULED;
    544 			timer_modified = true;
    545 			break;
    546 		default:
    547 			panic("invalid delayed work state: %d",
    548 			    dw->dw_state);
    549 		}
    550 	}
    551 	mutex_exit(&wq->wq_lock);
    552 
    553 	return timer_modified;
    554 }
    555 
    556 bool
    557 cancel_delayed_work(struct delayed_work *dw)
    558 {
    559 	struct workqueue_struct *wq;
    560 	bool cancelled_p;
    561 
    562 	/* If there's no workqueue, nothing to cancel.   */
    563 	if ((wq = dw->work.work_queue) == NULL)
    564 		return false;
    565 
    566 	mutex_enter(&wq->wq_lock);
    567 	if (__predict_false(dw->work.work_queue != wq)) {
    568 		cancelled_p = false;
    569 	} else {
    570 		switch (dw->dw_state) {
    571 		case DELAYED_WORK_IDLE:
    572 			if (wq->wq_current_work == &dw->work) {
    573 				/* Too late, it's already running.  */
    574 				cancelled_p = false;
    575 			} else {
    576 				/* Got in before it started.  Remove it.  */
    577 				TAILQ_REMOVE(&wq->wq_queue, &dw->work,
    578 				    work_entry);
    579 				cancelled_p = true;
    580 			}
    581 			break;
    582 		case DELAYED_WORK_SCHEDULED:
    583 		case DELAYED_WORK_RESCHEDULED:
    584 		case DELAYED_WORK_CANCELLED:
    585 			if (callout_stop(&dw->dw_callout)) {
    586 				/*
    587 				 * Too late to stop, but we got in
    588 				 * before the callout acquired the
    589 				 * lock.  Tell it to give up.
    590 				 */
    591 				dw->dw_state = DELAYED_WORK_CANCELLED;
    592 			} else {
    593 				/* Stopped it.  Kill it.  */
    594 				callout_destroy(&dw->dw_callout);
    595 				TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
    596 				dw->dw_state = DELAYED_WORK_IDLE;
    597 			}
    598 			cancelled_p = true;
    599 		default:
    600 			panic("invalid delayed work state: %d",
    601 			    dw->dw_state);
    602 		}
    603 	}
    604 	mutex_exit(&wq->wq_lock);
    605 
    606 	return cancelled_p;
    607 }
    608 
    609 bool
    610 cancel_delayed_work_sync(struct delayed_work *dw)
    611 {
    612 	struct workqueue_struct *wq;
    613 	bool cancelled_p;
    614 
    615 	/* If there's no workqueue, nothing to cancel.   */
    616 	if ((wq = dw->work.work_queue) == NULL)
    617 		return false;
    618 
    619 	mutex_enter(&wq->wq_lock);
    620 	if (__predict_false(dw->work.work_queue != wq)) {
    621 		cancelled_p = false;
    622 	} else {
    623 retry:		switch (dw->dw_state) {
    624 		case DELAYED_WORK_IDLE:
    625 			if (wq->wq_current_work == &dw->work) {
    626 				/* Too late, it's already running.  Wait.  */
    627 				do {
    628 					cv_wait(&wq->wq_cv, &wq->wq_lock);
    629 				} while (wq->wq_current_work == &dw->work);
    630 				cancelled_p = false;
    631 			} else {
    632 				/* Got in before it started.  Remove it.  */
    633 				TAILQ_REMOVE(&wq->wq_queue, &dw->work,
    634 				    work_entry);
    635 				cancelled_p = true;
    636 			}
    637 			break;
    638 		case DELAYED_WORK_SCHEDULED:
    639 		case DELAYED_WORK_RESCHEDULED:
    640 		case DELAYED_WORK_CANCELLED:
    641 			/*
    642 			 * If it has started, tell it to stop, and wait
    643 			 * for it to complete.  We drop the lock, so by
    644 			 * the time the callout has completed, we must
    645 			 * review the state again.
    646 			 */
    647 			dw->dw_state = DELAYED_WORK_CANCELLED;
    648 			callout_halt(&dw->dw_callout, &wq->wq_lock);
    649 			goto retry;
    650 		default:
    651 			panic("invalid delayed work state: %d",
    652 			    dw->dw_state);
    653 		}
    654 	}
    655 	mutex_exit(&wq->wq_lock);
    656 
    657 	return cancelled_p;
    658 }
    659 
    660 /*
    662  * Flush
    663  */
    664 
    665 void
    666 flush_scheduled_work(void)
    667 {
    668 
    669 	flush_workqueue(system_wq);
    670 }
    671 
    672 void
    673 flush_workqueue(struct workqueue_struct *wq)
    674 {
    675 	uint64_t gen;
    676 
    677 	mutex_enter(&wq->wq_lock);
    678 	gen = wq->wq_gen;
    679 	do {
    680 		cv_wait(&wq->wq_cv, &wq->wq_lock);
    681 	} while (gen == wq->wq_gen);
    682 	mutex_exit(&wq->wq_lock);
    683 }
    684 
    685 bool
    686 flush_work(struct work_struct *work)
    687 {
    688 	struct workqueue_struct *wq;
    689 
    690 	/* If there's no workqueue, nothing to flush.  */
    691 	if ((wq = work->work_queue) == NULL)
    692 		return false;
    693 
    694 	flush_workqueue(wq);
    695 	return true;
    696 }
    697 
    698 bool
    699 flush_delayed_work(struct delayed_work *dw)
    700 {
    701 	struct workqueue_struct *wq;
    702 	bool do_flush = false;
    703 
    704 	/* If there's no workqueue, nothing to flush.  */
    705 	if ((wq = dw->work.work_queue) == NULL)
    706 		return false;
    707 
    708 	mutex_enter(&wq->wq_lock);
    709 	if (__predict_false(dw->work.work_queue != wq)) {
    710 		do_flush = true;
    711 	} else {
    712 retry:		switch (dw->dw_state) {
    713 		case DELAYED_WORK_IDLE:
    714 			if (wq->wq_current_work != &dw->work) {
    715 				TAILQ_REMOVE(&wq->wq_queue, &dw->work,
    716 				    work_entry);
    717 			} else {
    718 				do_flush = true;
    719 			}
    720 			break;
    721 		case DELAYED_WORK_SCHEDULED:
    722 		case DELAYED_WORK_RESCHEDULED:
    723 		case DELAYED_WORK_CANCELLED:
    724 			dw->dw_state = DELAYED_WORK_CANCELLED;
    725 			callout_halt(&dw->dw_callout, &wq->wq_lock);
    726 			goto retry;
    727 		default:
    728 			panic("invalid delayed work state: %d",
    729 			    dw->dw_state);
    730 		}
    731 	}
    732 	mutex_exit(&wq->wq_lock);
    733 
    734 	if (do_flush)
    735 		flush_workqueue(wq);
    736 
    737 	return true;
    738 }
    739