Home | History | Annotate | Line # | Download | only in linux
linux_work.c revision 1.2
      1 /*	$NetBSD: linux_work.c,v 1.2 2018/08/27 06:55:23 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: linux_work.c,v 1.2 2018/08/27 06:55:23 riastradh Exp $");
     34 
     35 #include <sys/types.h>
     36 #include <sys/param.h>
     37 #include <sys/atomic.h>
     38 #include <sys/callout.h>
     39 #include <sys/condvar.h>
     40 #include <sys/errno.h>
     41 #include <sys/intr.h>
     42 #include <sys/kmem.h>
     43 #include <sys/mutex.h>
     44 #include <sys/queue.h>
     45 #include <sys/systm.h>
     46 #include <sys/workqueue.h>
     47 #include <sys/cpu.h>
     48 
     49 #include <machine/lock.h>
     50 
     51 #include <linux/workqueue.h>
     52 
     53 /* XXX Kludge until we sync with HEAD.  */
     54 #if DIAGNOSTIC
     55 #define	__diagused
     56 #else
     57 #define	__diagused	__unused
     58 #endif
     59 
     60 struct workqueue_struct {
     61 	struct workqueue		*wq_workqueue;
     62 
     63 	/* XXX The following should all be per-CPU.  */
     64 	kmutex_t			wq_lock;
     65 
     66 	/*
     67 	 * Condvar for when any state related to this workqueue
     68 	 * changes.  XXX Could split this into multiple condvars for
     69 	 * different purposes, but whatever...
     70 	 */
     71 	kcondvar_t			wq_cv;
     72 
     73 	TAILQ_HEAD(, delayed_work)	wq_delayed;
     74 	struct work_struct		*wq_current_work;
     75 };
     76 
     77 static void	linux_work_lock_init(struct work_struct *);
     78 static void	linux_work_lock(struct work_struct *);
     79 static void	linux_work_unlock(struct work_struct *);
     80 static bool	linux_work_locked(struct work_struct *) __diagused;
     81 
     82 static void	linux_wq_barrier(struct work_struct *);
     83 
     84 static void	linux_wait_for_cancelled_work(struct work_struct *);
     85 static void	linux_wait_for_invoked_work(struct work_struct *);
     86 static void	linux_worker(struct work *, void *);
     87 
     88 static void	linux_cancel_delayed_work_callout(struct delayed_work *, bool);
     89 static void	linux_wait_for_delayed_cancelled_work(struct delayed_work *);
     90 static void	linux_worker_intr(void *);
     91 
     92 struct workqueue_struct		*system_wq;
     93 struct workqueue_struct		*system_long_wq;
     94 
     95 int
     96 linux_workqueue_init(void)
     97 {
     98 	int error;
     99 
    100 	system_wq = alloc_ordered_workqueue("lnxsyswq", 0);
    101 	if (system_wq == NULL)
    102 		goto fail0;
    103 
    104 	system_long_wq = alloc_ordered_workqueue("lnxlngwq", 0);
    105 	if (system_long_wq == NULL)
    106 		goto fail1;
    107 
    108 	return 0;
    109 
    110 fail2: __unused
    111 	destroy_workqueue(system_long_wq);
    112 fail1:	destroy_workqueue(system_wq);
    113 fail0:	return ENOMEM;
    114 }
    115 
    116 void
    117 linux_workqueue_fini(void)
    118 {
    119 
    120 	destroy_workqueue(system_long_wq);
    121 	system_long_wq = NULL;
    122 	destroy_workqueue(system_wq);
    123 	system_wq = NULL;
    124 }
    125 
    126 /*
    128  * Workqueues
    129  */
    130 
    131 struct workqueue_struct *
    132 alloc_ordered_workqueue(const char *name, int linux_flags)
    133 {
    134 	struct workqueue_struct *wq;
    135 	int flags = WQ_MPSAFE;
    136 	int error;
    137 
    138 	KASSERT(linux_flags == 0);
    139 
    140 	wq = kmem_alloc(sizeof(*wq), KM_SLEEP);
    141 	error = workqueue_create(&wq->wq_workqueue, name, &linux_worker,
    142 	    wq, PRI_NONE, IPL_VM, flags);
    143 	if (error) {
    144 		kmem_free(wq, sizeof(*wq));
    145 		return NULL;
    146 	}
    147 
    148 	mutex_init(&wq->wq_lock, MUTEX_DEFAULT, IPL_VM);
    149 	cv_init(&wq->wq_cv, name);
    150 	TAILQ_INIT(&wq->wq_delayed);
    151 	wq->wq_current_work = NULL;
    152 
    153 	return wq;
    154 }
    155 
    156 void
    157 destroy_workqueue(struct workqueue_struct *wq)
    158 {
    159 
    160 	/*
    161 	 * Cancel all delayed work.
    162 	 */
    163 	for (;;) {
    164 		struct delayed_work *dw;
    165 
    166 		mutex_enter(&wq->wq_lock);
    167 		if (TAILQ_EMPTY(&wq->wq_delayed)) {
    168 			dw = NULL;
    169 		} else {
    170 			dw = TAILQ_FIRST(&wq->wq_delayed);
    171 			TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
    172 		}
    173 		mutex_exit(&wq->wq_lock);
    174 
    175 		if (dw == NULL)
    176 			break;
    177 
    178 		cancel_delayed_work_sync(dw);
    179 	}
    180 
    181 	/*
    182 	 * workqueue_destroy empties the queue; we need not wait for
    183 	 * completion explicitly.  However, we can't destroy the
    184 	 * condvar or mutex until this is done.
    185 	 */
    186 	workqueue_destroy(wq->wq_workqueue);
    187 	KASSERT(wq->wq_current_work == NULL);
    188 	wq->wq_workqueue = NULL;
    189 
    190 	cv_destroy(&wq->wq_cv);
    191 	mutex_destroy(&wq->wq_lock);
    192 
    193 	kmem_free(wq, sizeof(*wq));
    194 }
    195 
    196 /*
    198  * Flush
    199  *
    200  * Note:  This doesn't cancel or wait for delayed work.  This seems to
    201  * match what Linux does (or, doesn't do).
    202  */
    203 
    204 void
    205 flush_scheduled_work(void)
    206 {
    207 	flush_workqueue(system_wq);
    208 }
    209 
    210 struct wq_flush_work {
    211 	struct work_struct	wqfw_work;
    212 	struct wq_flush		*wqfw_flush;
    213 };
    214 
    215 struct wq_flush {
    216 	kmutex_t	wqf_lock;
    217 	kcondvar_t	wqf_cv;
    218 	unsigned int	wqf_n;
    219 };
    220 
    221 void
    222 flush_work(struct work_struct *work)
    223 {
    224 	struct workqueue_struct *const wq = work->w_wq;
    225 
    226 	if (wq != NULL)
    227 		flush_workqueue(wq);
    228 }
    229 
    230 void
    231 flush_workqueue(struct workqueue_struct *wq)
    232 {
    233 	static const struct wq_flush zero_wqf;
    234 	struct wq_flush wqf = zero_wqf;
    235 
    236 	mutex_init(&wqf.wqf_lock, MUTEX_DEFAULT, IPL_NONE);
    237 	cv_init(&wqf.wqf_cv, "lnxwflsh");
    238 
    239 	if (1) {
    240 		struct wq_flush_work *const wqfw = kmem_zalloc(sizeof(*wqfw),
    241 		    KM_SLEEP);
    242 
    243 		wqf.wqf_n = 1;
    244 		wqfw->wqfw_flush = &wqf;
    245 		INIT_WORK(&wqfw->wqfw_work, &linux_wq_barrier);
    246 		wqfw->wqfw_work.w_wq = wq;
    247 		wqfw->wqfw_work.w_state = WORK_PENDING;
    248 		workqueue_enqueue(wq->wq_workqueue, &wqfw->wqfw_work.w_wk,
    249 		    NULL);
    250 	} else {
    251 		struct cpu_info *ci;
    252 		CPU_INFO_ITERATOR cii;
    253 		struct wq_flush_work *wqfw;
    254 
    255 		panic("per-CPU Linux workqueues don't work yet!");
    256 
    257 		wqf.wqf_n = 0;
    258 		for (CPU_INFO_FOREACH(cii, ci)) {
    259 			wqfw = kmem_zalloc(sizeof(*wqfw), KM_SLEEP);
    260 			mutex_enter(&wqf.wqf_lock);
    261 			wqf.wqf_n++;
    262 			mutex_exit(&wqf.wqf_lock);
    263 			wqfw->wqfw_flush = &wqf;
    264 			INIT_WORK(&wqfw->wqfw_work, &linux_wq_barrier);
    265 			wqfw->wqfw_work.w_state = WORK_PENDING;
    266 			wqfw->wqfw_work.w_wq = wq;
    267 			workqueue_enqueue(wq->wq_workqueue,
    268 			    &wqfw->wqfw_work.w_wk, ci);
    269 		}
    270 	}
    271 
    272 	mutex_enter(&wqf.wqf_lock);
    273 	while (0 < wqf.wqf_n)
    274 		cv_wait(&wqf.wqf_cv, &wqf.wqf_lock);
    275 	mutex_exit(&wqf.wqf_lock);
    276 
    277 	cv_destroy(&wqf.wqf_cv);
    278 	mutex_destroy(&wqf.wqf_lock);
    279 }
    280 
    281 static void
    282 linux_wq_barrier(struct work_struct *work)
    283 {
    284 	struct wq_flush_work *const wqfw = container_of(work,
    285 	    struct wq_flush_work, wqfw_work);
    286 	struct wq_flush *const wqf = wqfw->wqfw_flush;
    287 
    288 	mutex_enter(&wqf->wqf_lock);
    289 	if (--wqf->wqf_n == 0)
    290 		cv_broadcast(&wqf->wqf_cv);
    291 	mutex_exit(&wqf->wqf_lock);
    292 
    293 	kmem_free(wqfw, sizeof(*wqfw));
    294 }
    295 
    296 /*
    298  * Work locking
    299  *
    300  * We use __cpu_simple_lock(9) rather than mutex(9) because Linux code
    301  * does not destroy work, so there is nowhere to call mutex_destroy.
    302  *
    303  * XXX This is getting out of hand...  Really, work items shouldn't
    304  * have locks in them at all; instead the workqueues should.
    305  */
    306 
    307 static void
    308 linux_work_lock_init(struct work_struct *work)
    309 {
    310 
    311 	__cpu_simple_lock_init(&work->w_lock);
    312 }
    313 
    314 static void
    315 linux_work_lock(struct work_struct *work)
    316 {
    317 	struct cpu_info *ci;
    318 	int cnt, s;
    319 
    320 	/* XXX Copypasta of MUTEX_SPIN_SPLRAISE.  */
    321 	s = splvm();
    322 	ci = curcpu();
    323 	cnt = ci->ci_mtx_count--;
    324 	__insn_barrier();
    325 	if (cnt == 0)
    326 		ci->ci_mtx_oldspl = s;
    327 
    328 	__cpu_simple_lock(&work->w_lock);
    329 }
    330 
    331 static void
    332 linux_work_unlock(struct work_struct *work)
    333 {
    334 	struct cpu_info *ci;
    335 	int s;
    336 
    337 	__cpu_simple_unlock(&work->w_lock);
    338 
    339 	/* XXX Copypasta of MUTEX_SPIN_SPLRESTORE.  */
    340 	ci = curcpu();
    341 	s = ci->ci_mtx_oldspl;
    342 	__insn_barrier();
    343 	if (++ci->ci_mtx_count == 0)
    344 		splx(s);
    345 }
    346 
    347 static bool __diagused
    348 linux_work_locked(struct work_struct *work)
    349 {
    350 	return __SIMPLELOCK_LOCKED_P(&work->w_lock);
    351 }
    352 
    353 /*
    355  * Work
    356  */
    357 
    358 void
    359 INIT_WORK(struct work_struct *work, void (*fn)(struct work_struct *))
    360 {
    361 
    362 	linux_work_lock_init(work);
    363 	work->w_state = WORK_IDLE;
    364 	work->w_wq = NULL;
    365 	work->w_fn = fn;
    366 }
    367 
    368 bool
    369 schedule_work(struct work_struct *work)
    370 {
    371 	return queue_work(system_wq, work);
    372 }
    373 
    374 bool
    375 queue_work(struct workqueue_struct *wq, struct work_struct *work)
    376 {
    377 	/* True if we put it on the queue, false if it was already there.  */
    378 	bool newly_queued;
    379 
    380 	KASSERT(wq != NULL);
    381 
    382 	linux_work_lock(work);
    383 	switch (work->w_state) {
    384 	case WORK_IDLE:
    385 	case WORK_INVOKED:
    386 		work->w_state = WORK_PENDING;
    387 		work->w_wq = wq;
    388 		workqueue_enqueue(wq->wq_workqueue, &work->w_wk, NULL);
    389 		newly_queued = true;
    390 		break;
    391 
    392 	case WORK_DELAYED:
    393 		panic("queue_work(delayed work %p)", work);
    394 		break;
    395 
    396 	case WORK_PENDING:
    397 		KASSERT(work->w_wq == wq);
    398 		newly_queued = false;
    399 		break;
    400 
    401 	case WORK_CANCELLED:
    402 		newly_queued = false;
    403 		break;
    404 
    405 	case WORK_DELAYED_CANCELLED:
    406 		panic("queue_work(delayed work %p)", work);
    407 		break;
    408 
    409 	default:
    410 		panic("work %p in bad state: %d", work, (int)work->w_state);
    411 		break;
    412 	}
    413 	linux_work_unlock(work);
    414 
    415 	return newly_queued;
    416 }
    417 
    418 bool
    419 cancel_work_sync(struct work_struct *work)
    420 {
    421 	bool cancelled_p = false;
    422 
    423 	linux_work_lock(work);
    424 	switch (work->w_state) {
    425 	case WORK_IDLE:		/* Nothing to do.  */
    426 		break;
    427 
    428 	case WORK_DELAYED:
    429 		panic("cancel_work_sync(delayed work %p)", work);
    430 		break;
    431 
    432 	case WORK_PENDING:
    433 		work->w_state = WORK_CANCELLED;
    434 		linux_wait_for_cancelled_work(work);
    435 		cancelled_p = true;
    436 		break;
    437 
    438 	case WORK_INVOKED:
    439 		linux_wait_for_invoked_work(work);
    440 		break;
    441 
    442 	case WORK_CANCELLED:	/* Already done.  */
    443 		break;
    444 
    445 	case WORK_DELAYED_CANCELLED:
    446 		panic("cancel_work_sync(delayed work %p)", work);
    447 		break;
    448 
    449 	default:
    450 		panic("work %p in bad state: %d", work, (int)work->w_state);
    451 		break;
    452 	}
    453 	linux_work_unlock(work);
    454 
    455 	return cancelled_p;
    456 }
    457 
    458 static void
    459 linux_wait_for_cancelled_work(struct work_struct *work)
    460 {
    461 	struct workqueue_struct *wq;
    462 
    463 	KASSERT(linux_work_locked(work));
    464 	KASSERT(work->w_state == WORK_CANCELLED);
    465 
    466 	wq = work->w_wq;
    467 	do {
    468 		mutex_enter(&wq->wq_lock);
    469 		linux_work_unlock(work);
    470 		cv_wait(&wq->wq_cv, &wq->wq_lock);
    471 		mutex_exit(&wq->wq_lock);
    472 		linux_work_lock(work);
    473 	} while ((work->w_state == WORK_CANCELLED) && (work->w_wq == wq));
    474 }
    475 
    476 static void
    477 linux_wait_for_invoked_work(struct work_struct *work)
    478 {
    479 	struct workqueue_struct *wq;
    480 
    481 	KASSERT(linux_work_locked(work));
    482 	KASSERT(work->w_state == WORK_INVOKED);
    483 
    484 	wq = work->w_wq;
    485 	mutex_enter(&wq->wq_lock);
    486 	linux_work_unlock(work);
    487 	while (wq->wq_current_work == work)
    488 		cv_wait(&wq->wq_cv, &wq->wq_lock);
    489 	mutex_exit(&wq->wq_lock);
    490 
    491 	linux_work_lock(work);	/* XXX needless relock */
    492 }
    493 
    494 static void
    495 linux_worker(struct work *wk, void *arg)
    496 {
    497 	struct work_struct *const work = container_of(wk, struct work_struct,
    498 	    w_wk);
    499 	struct workqueue_struct *const wq = arg;
    500 
    501 	linux_work_lock(work);
    502 	switch (work->w_state) {
    503 	case WORK_IDLE:
    504 		panic("idle work %p got queued: %p", work, wq);
    505 		break;
    506 
    507 	case WORK_DELAYED:
    508 		panic("delayed work %p got queued: %p", work, wq);
    509 		break;
    510 
    511 	case WORK_PENDING:
    512 		KASSERT(work->w_wq == wq);
    513 
    514 		/* Get ready to invoke this one.  */
    515 		mutex_enter(&wq->wq_lock);
    516 		work->w_state = WORK_INVOKED;
    517 		KASSERT(wq->wq_current_work == NULL);
    518 		wq->wq_current_work = work;
    519 		mutex_exit(&wq->wq_lock);
    520 
    521 		/* Unlock it and do it.  Can't use work after this.  */
    522 		linux_work_unlock(work);
    523 		(*work->w_fn)(work);
    524 
    525 		/* All done.  Notify anyone waiting for completion.  */
    526 		mutex_enter(&wq->wq_lock);
    527 		KASSERT(wq->wq_current_work == work);
    528 		wq->wq_current_work = NULL;
    529 		cv_broadcast(&wq->wq_cv);
    530 		mutex_exit(&wq->wq_lock);
    531 		return;
    532 
    533 	case WORK_INVOKED:
    534 		panic("invoked work %p got requeued: %p", work, wq);
    535 		break;
    536 
    537 	case WORK_CANCELLED:
    538 		KASSERT(work->w_wq == wq);
    539 
    540 		/* Return to idle; notify anyone waiting for cancellation.  */
    541 		mutex_enter(&wq->wq_lock);
    542 		work->w_state = WORK_IDLE;
    543 		work->w_wq = NULL;
    544 		cv_broadcast(&wq->wq_cv);
    545 		mutex_exit(&wq->wq_lock);
    546 		break;
    547 
    548 	case WORK_DELAYED_CANCELLED:
    549 		panic("cancelled delayed work %p got uqeued: %p", work, wq);
    550 		break;
    551 
    552 	default:
    553 		panic("work %p in bad state: %d", work, (int)work->w_state);
    554 		break;
    555 	}
    556 	linux_work_unlock(work);
    557 }
    558 
    559 /*
    561  * Delayed work
    562  */
    563 
    564 void
    565 INIT_DELAYED_WORK(struct delayed_work *dw, void (*fn)(struct work_struct *))
    566 {
    567 	INIT_WORK(&dw->work, fn);
    568 }
    569 
    570 bool
    571 schedule_delayed_work(struct delayed_work *dw, unsigned long ticks)
    572 {
    573 	return queue_delayed_work(system_wq, dw, ticks);
    574 }
    575 
    576 bool
    577 queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw,
    578     unsigned long ticks)
    579 {
    580 	bool newly_queued;
    581 
    582 	KASSERT(wq != NULL);
    583 
    584 	linux_work_lock(&dw->work);
    585 	switch (dw->work.w_state) {
    586 	case WORK_IDLE:
    587 	case WORK_INVOKED:
    588 		if (ticks == 0) {
    589 			/* Skip the delay and queue it now.  */
    590 			dw->work.w_state = WORK_PENDING;
    591 			dw->work.w_wq = wq;
    592 			workqueue_enqueue(wq->wq_workqueue, &dw->work.w_wk,
    593 			    NULL);
    594 		} else {
    595 			callout_init(&dw->dw_callout, CALLOUT_MPSAFE);
    596 			callout_reset(&dw->dw_callout, ticks,
    597 			    &linux_worker_intr, dw);
    598 			dw->work.w_state = WORK_DELAYED;
    599 			dw->work.w_wq = wq;
    600 			mutex_enter(&wq->wq_lock);
    601 			TAILQ_INSERT_HEAD(&wq->wq_delayed, dw, dw_entry);
    602 			mutex_exit(&wq->wq_lock);
    603 		}
    604 		newly_queued = true;
    605 		break;
    606 
    607 	case WORK_DELAYED:
    608 		/*
    609 		 * Timer is already ticking.  Leave it to time out
    610 		 * whenever it was going to time out, as Linux does --
    611 		 * neither speed it up nor postpone it.
    612 		 */
    613 		newly_queued = false;
    614 		break;
    615 
    616 	case WORK_PENDING:
    617 		KASSERT(dw->work.w_wq == wq);
    618 		newly_queued = false;
    619 		break;
    620 
    621 	case WORK_CANCELLED:
    622 	case WORK_DELAYED_CANCELLED:
    623 		/* XXX Wait for cancellation and then queue?  */
    624 		newly_queued = false;
    625 		break;
    626 
    627 	default:
    628 		panic("delayed work %p in bad state: %d", dw,
    629 		    (int)dw->work.w_state);
    630 		break;
    631 	}
    632 	linux_work_unlock(&dw->work);
    633 
    634 	return newly_queued;
    635 }
    636 
    637 bool
    638 mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw,
    639     unsigned long ticks)
    640 {
    641 	bool timer_modified;
    642 
    643 	KASSERT(wq != NULL);
    644 
    645 	linux_work_lock(&dw->work);
    646 	switch (dw->work.w_state) {
    647 	case WORK_IDLE:
    648 	case WORK_INVOKED:
    649 		if (ticks == 0) {
    650 			/* Skip the delay and queue it now.  */
    651 			dw->work.w_state = WORK_PENDING;
    652 			dw->work.w_wq = wq;
    653 			workqueue_enqueue(wq->wq_workqueue, &dw->work.w_wk,
    654 			    NULL);
    655 		} else {
    656 			callout_init(&dw->dw_callout, CALLOUT_MPSAFE);
    657 			callout_reset(&dw->dw_callout, ticks,
    658 			    &linux_worker_intr, dw);
    659 			dw->work.w_state = WORK_DELAYED;
    660 			dw->work.w_wq = wq;
    661 			mutex_enter(&wq->wq_lock);
    662 			TAILQ_INSERT_HEAD(&wq->wq_delayed, dw, dw_entry);
    663 			mutex_exit(&wq->wq_lock);
    664 		}
    665 		timer_modified = false;
    666 		break;
    667 
    668 	case WORK_DELAYED:
    669 		/*
    670 		 * Timer is already ticking.  Reschedule it.
    671 		 */
    672 		callout_schedule(&dw->dw_callout, ticks);
    673 		timer_modified = true;
    674 		break;
    675 
    676 	case WORK_PENDING:
    677 		KASSERT(dw->work.w_wq == wq);
    678 		timer_modified = false;
    679 		break;
    680 
    681 	case WORK_CANCELLED:
    682 	case WORK_DELAYED_CANCELLED:
    683 		/* XXX Wait for cancellation and then queue?  */
    684 		timer_modified = false;
    685 		break;
    686 
    687 	default:
    688 		panic("delayed work %p in bad state: %d", dw,
    689 		    (int)dw->work.w_state);
    690 		break;
    691 	}
    692 	linux_work_unlock(&dw->work);
    693 
    694 	return timer_modified;
    695 }
    696 
    697 bool
    698 cancel_delayed_work(struct delayed_work *dw)
    699 {
    700 	bool cancelled_p = false;
    701 
    702 	linux_work_lock(&dw->work);
    703 	switch (dw->work.w_state) {
    704 	case WORK_IDLE:		/* Nothing to do.  */
    705 		break;
    706 
    707 	case WORK_DELAYED:
    708 		dw->work.w_state = WORK_DELAYED_CANCELLED;
    709 		linux_cancel_delayed_work_callout(dw, false);
    710 		cancelled_p = true;
    711 		break;
    712 
    713 	case WORK_PENDING:
    714 		dw->work.w_state = WORK_CANCELLED;
    715 		cancelled_p = true;
    716 		break;
    717 
    718 	case WORK_INVOKED:	/* Don't wait!  */
    719 		break;
    720 
    721 	case WORK_CANCELLED:	/* Already done.  */
    722 	case WORK_DELAYED_CANCELLED:
    723 		break;
    724 
    725 	default:
    726 		panic("delayed work %p in bad state: %d", dw,
    727 		    (int)dw->work.w_state);
    728 		break;
    729 	}
    730 	linux_work_unlock(&dw->work);
    731 
    732 	return cancelled_p;
    733 }
    734 
    735 bool
    736 cancel_delayed_work_sync(struct delayed_work *dw)
    737 {
    738 	bool cancelled_p = false;
    739 
    740 	linux_work_lock(&dw->work);
    741 	switch (dw->work.w_state) {
    742 	case WORK_IDLE:		/* Nothing to do.  */
    743 		break;
    744 
    745 	case WORK_DELAYED:
    746 		dw->work.w_state = WORK_DELAYED_CANCELLED;
    747 		linux_cancel_delayed_work_callout(dw, true);
    748 		cancelled_p = true;
    749 		break;
    750 
    751 	case WORK_PENDING:
    752 		dw->work.w_state = WORK_CANCELLED;
    753 		linux_wait_for_cancelled_work(&dw->work);
    754 		cancelled_p = true;
    755 		break;
    756 
    757 	case WORK_INVOKED:
    758 		linux_wait_for_invoked_work(&dw->work);
    759 		break;
    760 
    761 	case WORK_CANCELLED:	/* Already done.  */
    762 		break;
    763 
    764 	case WORK_DELAYED_CANCELLED:
    765 		linux_wait_for_delayed_cancelled_work(dw);
    766 		break;
    767 
    768 	default:
    769 		panic("delayed work %p in bad state: %d", dw,
    770 		    (int)dw->work.w_state);
    771 		break;
    772 	}
    773 	linux_work_unlock(&dw->work);
    774 
    775 	return cancelled_p;
    776 }
    777 
    778 static void
    779 linux_cancel_delayed_work_callout(struct delayed_work *dw, bool wait)
    780 {
    781 	bool fired_p;
    782 
    783 	KASSERT(linux_work_locked(&dw->work));
    784 	KASSERT(dw->work.w_state == WORK_DELAYED_CANCELLED);
    785 
    786 	if (wait) {
    787 		/*
    788 		 * We unlock, halt, and then relock, rather than
    789 		 * passing an interlock to callout_halt, for two
    790 		 * reasons:
    791 		 *
    792 		 * (1) The work lock is not a mutex(9), so we can't use it.
    793 		 * (2) The WORK_DELAYED_CANCELLED state serves as an interlock.
    794 		 */
    795 		linux_work_unlock(&dw->work);
    796 		fired_p = callout_halt(&dw->dw_callout, NULL);
    797 		linux_work_lock(&dw->work);
    798 	} else {
    799 		fired_p = callout_stop(&dw->dw_callout);
    800 	}
    801 
    802 	/*
    803 	 * fired_p means we didn't cancel the callout, so it must have
    804 	 * already begun and will clean up after itself.
    805 	 *
    806 	 * !fired_p means we cancelled it so we have to clean up after
    807 	 * it.  Nobody else should have changed the state in that case.
    808 	 */
    809 	if (!fired_p) {
    810 		struct workqueue_struct *wq;
    811 
    812 		KASSERT(linux_work_locked(&dw->work));
    813 		KASSERT(dw->work.w_state == WORK_DELAYED_CANCELLED);
    814 
    815 		wq = dw->work.w_wq;
    816 		mutex_enter(&wq->wq_lock);
    817 		TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
    818 		callout_destroy(&dw->dw_callout);
    819 		dw->work.w_state = WORK_IDLE;
    820 		dw->work.w_wq = NULL;
    821 		cv_broadcast(&wq->wq_cv);
    822 		mutex_exit(&wq->wq_lock);
    823 	}
    824 }
    825 
    826 static void
    827 linux_wait_for_delayed_cancelled_work(struct delayed_work *dw)
    828 {
    829 	struct workqueue_struct *wq;
    830 
    831 	KASSERT(linux_work_locked(&dw->work));
    832 	KASSERT(dw->work.w_state == WORK_DELAYED_CANCELLED);
    833 
    834 	wq = dw->work.w_wq;
    835 	do {
    836 		mutex_enter(&wq->wq_lock);
    837 		linux_work_unlock(&dw->work);
    838 		cv_wait(&wq->wq_cv, &wq->wq_lock);
    839 		mutex_exit(&wq->wq_lock);
    840 		linux_work_lock(&dw->work);
    841 	} while ((dw->work.w_state == WORK_DELAYED_CANCELLED) &&
    842 	    (dw->work.w_wq == wq));
    843 }
    844 
    845 static void
    846 linux_worker_intr(void *arg)
    847 {
    848 	struct delayed_work *dw = arg;
    849 	struct workqueue_struct *wq;
    850 
    851 	linux_work_lock(&dw->work);
    852 
    853 	KASSERT((dw->work.w_state == WORK_DELAYED) ||
    854 	    (dw->work.w_state == WORK_DELAYED_CANCELLED));
    855 
    856 	wq = dw->work.w_wq;
    857 	mutex_enter(&wq->wq_lock);
    858 
    859 	/* Queue the work, or return it to idle and alert any cancellers.  */
    860 	if (__predict_true(dw->work.w_state == WORK_DELAYED)) {
    861 		dw->work.w_state = WORK_PENDING;
    862 		workqueue_enqueue(dw->work.w_wq->wq_workqueue, &dw->work.w_wk,
    863 		    NULL);
    864 	} else {
    865 		KASSERT(dw->work.w_state == WORK_DELAYED_CANCELLED);
    866 		dw->work.w_state = WORK_IDLE;
    867 		dw->work.w_wq = NULL;
    868 		cv_broadcast(&wq->wq_cv);
    869 	}
    870 
    871 	/* Either way, the callout is done.  */
    872 	TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry);
    873 	callout_destroy(&dw->dw_callout);
    874 
    875 	mutex_exit(&wq->wq_lock);
    876 	linux_work_unlock(&dw->work);
    877 }
    878