Home | History | Annotate | Line # | Download | only in linux
linux_tasklet.c revision 1.10
      1 /*	$NetBSD: linux_tasklet.c,v 1.10 2021/12/27 14:57:30 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2018, 2020, 2021 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: linux_tasklet.c,v 1.10 2021/12/27 14:57:30 riastradh Exp $");
     34 
     35 #include <sys/param.h>
     36 #include <sys/types.h>
     37 
     38 #include <sys/atomic.h>
     39 #include <sys/cpu.h>
     40 #include <sys/errno.h>
     41 #include <sys/intr.h>
     42 #include <sys/kmem.h>
     43 #include <sys/lock.h>
     44 #include <sys/percpu.h>
     45 #include <sys/queue.h>
     46 
     47 #include <lib/libkern/libkern.h>
     48 
     49 #include <machine/limits.h>
     50 
     51 #include <linux/tasklet.h>
     52 
     53 #define	TASKLET_SCHEDULED	((unsigned)__BIT(0))
     54 #define	TASKLET_RUNNING		((unsigned)__BIT(1))
     55 
     56 struct tasklet_queue {
     57 	struct percpu	*tq_percpu;	/* struct tasklet_cpu * */
     58 	void		*tq_sih;
     59 };
     60 
     61 SIMPLEQ_HEAD(tasklet_head, tasklet_struct);
     62 
     63 struct tasklet_cpu {
     64 	struct tasklet_head	tc_head;
     65 };
     66 
     67 static struct tasklet_queue	tasklet_queue __read_mostly;
     68 static struct tasklet_queue	tasklet_hi_queue __read_mostly;
     69 
     70 static void	tasklet_softintr(void *);
     71 static int	tasklet_queue_init(struct tasklet_queue *, unsigned);
     72 static void	tasklet_queue_fini(struct tasklet_queue *);
     73 static void	tasklet_queue_schedule(struct tasklet_queue *,
     74 		    struct tasklet_struct *);
     75 static void	tasklet_queue_enqueue(struct tasklet_queue *,
     76 		    struct tasklet_struct *);
     77 
     78 /*
     79  * linux_tasklets_init()
     80  *
     81  *	Initialize the Linux tasklets subsystem.  Return 0 on success,
     82  *	error code on failure.
     83  */
     84 int
     85 linux_tasklets_init(void)
     86 {
     87 	int error;
     88 
     89 	error = tasklet_queue_init(&tasklet_queue, SOFTINT_CLOCK);
     90 	if (error)
     91 		goto fail0;
     92 	error = tasklet_queue_init(&tasklet_hi_queue, SOFTINT_SERIAL);
     93 	if (error)
     94 		goto fail1;
     95 
     96 	/* Success!  */
     97 	return 0;
     98 
     99 fail2: __unused
    100 	tasklet_queue_fini(&tasklet_hi_queue);
    101 fail1:	tasklet_queue_fini(&tasklet_queue);
    102 fail0:	KASSERT(error);
    103 	return error;
    104 }
    105 
    106 /*
    107  * linux_tasklets_fini()
    108  *
    109  *	Finalize the Linux tasklets subsystem.  All use of tasklets
    110  *	must be done.
    111  */
    112 void
    113 linux_tasklets_fini(void)
    114 {
    115 
    116 	tasklet_queue_fini(&tasklet_hi_queue);
    117 	tasklet_queue_fini(&tasklet_queue);
    118 }
    119 
    120 static void
    121 tasklet_cpu_init(void *ptr, void *cookie, struct cpu_info *ci)
    122 {
    123 	struct tasklet_cpu **tcp = ptr, *tc;
    124 
    125 	*tcp = tc = kmem_zalloc(sizeof(*tc), KM_SLEEP);
    126 	SIMPLEQ_INIT(&tc->tc_head);
    127 }
    128 
    129 static void
    130 tasklet_cpu_fini(void *ptr, void *cookie, struct cpu_info *ci)
    131 {
    132 	struct tasklet_cpu **tcp = ptr, *tc = *tcp;
    133 
    134 	KASSERT(SIMPLEQ_EMPTY(&tc->tc_head));
    135 	kmem_free(tc, sizeof(*tc));
    136 	*tcp = NULL;		/* paranoia */
    137 }
    138 
    139 /*
    140  * tasklet_queue_init(tq, prio)
    141  *
    142  *	Initialize the tasklet queue tq for running tasklets at softint
    143  *	priority prio (SOFTINT_*).
    144  */
    145 static int
    146 tasklet_queue_init(struct tasklet_queue *tq, unsigned prio)
    147 {
    148 	int error;
    149 
    150 	/* Allocate per-CPU memory.  percpu_alloc cannot fail.  */
    151 	tq->tq_percpu = percpu_create(sizeof(struct tasklet_cpu),
    152 	    tasklet_cpu_init, tasklet_cpu_fini, NULL);
    153 	KASSERT(tq->tq_percpu != NULL);
    154 
    155 	/* Try to establish a softint.  softint_establish may fail.  */
    156 	tq->tq_sih = softint_establish(prio|SOFTINT_MPSAFE, &tasklet_softintr,
    157 	    tq);
    158 	if (tq->tq_sih == NULL) {
    159 		error = ENOMEM;
    160 		goto fail1;
    161 	}
    162 
    163 	/* Success!  */
    164 	return 0;
    165 
    166 fail2: __unused
    167 	softint_disestablish(tq->tq_sih);
    168 	tq->tq_sih = NULL;
    169 fail1:	percpu_free(tq->tq_percpu, sizeof(struct tasklet_cpu));
    170 	tq->tq_percpu = NULL;
    171 fail0: __unused
    172 	KASSERT(error);
    173 	return error;
    174 }
    175 
    176 /*
    177  * tasklet_queue_fini(tq)
    178  *
    179  *	Finalize the tasklet queue tq: free all resources associated
    180  *	with it.
    181  */
    182 static void
    183 tasklet_queue_fini(struct tasklet_queue *tq)
    184 {
    185 
    186 	softint_disestablish(tq->tq_sih);
    187 	tq->tq_sih = NULL;
    188 	percpu_free(tq->tq_percpu, sizeof(struct tasklet_cpu));
    189 	tq->tq_percpu = NULL;
    190 }
    191 
    192 /*
    193  * tasklet_softintr(cookie)
    194  *
    195  *	Soft interrupt handler: Process queued tasklets on the tasklet
    196  *	queue passed in as cookie.
    197  */
    198 static void
    199 tasklet_softintr(void *cookie)
    200 {
    201 	struct tasklet_queue *const tq = cookie;
    202 	struct tasklet_head th = SIMPLEQ_HEAD_INITIALIZER(th);
    203 	struct tasklet_cpu **tcp, *tc;
    204 	int s;
    205 
    206 	/*
    207 	 * With all interrupts deferred, transfer the current CPU's
    208 	 * queue of tasklets to a local variable in one swell foop.
    209 	 *
    210 	 * No memory barriers: CPU-local state only.
    211 	 */
    212 	tcp = percpu_getref(tq->tq_percpu);
    213 	tc = *tcp;
    214 	s = splhigh();
    215 	SIMPLEQ_CONCAT(&th, &tc->tc_head);
    216 	splx(s);
    217 	percpu_putref(tq->tq_percpu);
    218 
    219 	/* Go through the queue of tasklets we grabbed.  */
    220 	while (!SIMPLEQ_EMPTY(&th)) {
    221 		struct tasklet_struct *tasklet;
    222 
    223 		/* Remove the first tasklet from the queue.  */
    224 		tasklet = SIMPLEQ_FIRST(&th);
    225 		SIMPLEQ_REMOVE_HEAD(&th, tl_entry);
    226 
    227 		KASSERT(atomic_load_relaxed(&tasklet->tl_state) &
    228 		    TASKLET_SCHEDULED);
    229 
    230 		/*
    231 		 * Test and set RUNNING, in case it is already running
    232 		 * on another CPU and got scheduled again on this one
    233 		 * before it completed.
    234 		 */
    235 		if (!tasklet_trylock(tasklet)) {
    236 			/*
    237 			 * Put it back on the queue to run it again in
    238 			 * a sort of busy-wait, and move on to the next
    239 			 * one.
    240 			 */
    241 			tasklet_queue_enqueue(tq, tasklet);
    242 			continue;
    243 		}
    244 
    245 		/*
    246 		 * Check whether it's currently disabled.
    247 		 *
    248 		 * Pairs with membar_exit in __tasklet_enable.
    249 		 */
    250 		if (atomic_load_acquire(&tasklet->tl_disablecount)) {
    251 			/*
    252 			 * Disabled: clear the RUNNING bit and, requeue
    253 			 * it, but keep it SCHEDULED.
    254 			 */
    255 			tasklet_unlock(tasklet);
    256 			tasklet_queue_enqueue(tq, tasklet);
    257 			continue;
    258 		}
    259 
    260 		/* Not disabled.  Clear SCHEDULED and call func.  */
    261 		KASSERT(atomic_load_relaxed(&tasklet->tl_state) &
    262 		    TASKLET_SCHEDULED);
    263 		atomic_and_uint(&tasklet->tl_state, ~TASKLET_SCHEDULED);
    264 
    265 		(*tasklet->func)(tasklet->data);
    266 
    267 		/* Clear RUNNING to notify tasklet_disable.  */
    268 		tasklet_unlock(tasklet);
    269 	}
    270 }
    271 
    272 /*
    273  * tasklet_queue_schedule(tq, tasklet)
    274  *
    275  *	Schedule tasklet to run on tq.  If it was already scheduled and
    276  *	has not yet run, no effect.
    277  */
    278 static void
    279 tasklet_queue_schedule(struct tasklet_queue *tq,
    280     struct tasklet_struct *tasklet)
    281 {
    282 	unsigned ostate, nstate;
    283 
    284 	/* Test and set the SCHEDULED bit.  If already set, we're done.  */
    285 	do {
    286 		ostate = atomic_load_relaxed(&tasklet->tl_state);
    287 		if (ostate & TASKLET_SCHEDULED)
    288 			return;
    289 		nstate = ostate | TASKLET_SCHEDULED;
    290 	} while (atomic_cas_uint(&tasklet->tl_state, ostate, nstate)
    291 	    != ostate);
    292 
    293 	/*
    294 	 * Not already set and we have set it now.  Put it on the queue
    295 	 * and kick off a softint.
    296 	 */
    297 	tasklet_queue_enqueue(tq, tasklet);
    298 }
    299 
    300 /*
    301  * tasklet_queue_enqueue(tq, tasklet)
    302  *
    303  *	Put tasklet on the queue tq and ensure it will run.  tasklet
    304  *	must be marked SCHEDULED.
    305  */
    306 static void
    307 tasklet_queue_enqueue(struct tasklet_queue *tq, struct tasklet_struct *tasklet)
    308 {
    309 	struct tasklet_cpu **tcp, *tc;
    310 	int s;
    311 
    312 	KASSERT(atomic_load_relaxed(&tasklet->tl_state) & TASKLET_SCHEDULED);
    313 
    314 	/*
    315 	 * Insert on the current CPU's queue while all interrupts are
    316 	 * blocked, and schedule a soft interrupt to process it.  No
    317 	 * memory barriers: CPU-local state only.
    318 	 */
    319 	tcp = percpu_getref(tq->tq_percpu);
    320 	tc = *tcp;
    321 	s = splhigh();
    322 	SIMPLEQ_INSERT_TAIL(&tc->tc_head, tasklet, tl_entry);
    323 	splx(s);
    324 	softint_schedule(tq->tq_sih);
    325 	percpu_putref(tq->tq_percpu);
    326 }
    327 
    328 /*
    329  * tasklet_init(tasklet, func, data)
    330  *
    331  *	Initialize tasklet to call func(data) when scheduled.
    332  *
    333  *	Caller is responsible for issuing the appropriate memory
    334  *	barriers or store releases to publish the tasklet to other CPUs
    335  *	before use.
    336  */
    337 void
    338 tasklet_init(struct tasklet_struct *tasklet, void (*func)(unsigned long),
    339     unsigned long data)
    340 {
    341 
    342 	atomic_store_relaxed(&tasklet->tl_state, 0);
    343 	atomic_store_relaxed(&tasklet->tl_disablecount, 0);
    344 	tasklet->func = func;
    345 	tasklet->data = data;
    346 }
    347 
    348 /*
    349  * tasklet_schedule(tasklet)
    350  *
    351  *	Schedule tasklet to run at regular priority.  If it was already
    352  *	scheduled and has not yet run, no effect.
    353  */
    354 void
    355 tasklet_schedule(struct tasklet_struct *tasklet)
    356 {
    357 
    358 	tasklet_queue_schedule(&tasklet_queue, tasklet);
    359 }
    360 
    361 /*
    362  * tasklet_hi_schedule(tasklet)
    363  *
    364  *	Schedule tasklet to run at high priority.  If it was already
    365  *	scheduled and has not yet run, no effect.
    366  */
    367 void
    368 tasklet_hi_schedule(struct tasklet_struct *tasklet)
    369 {
    370 
    371 	tasklet_queue_schedule(&tasklet_hi_queue, tasklet);
    372 }
    373 
    374 /*
    375  * tasklet_disable_nosync(tasklet)
    376  *
    377  *	Increment the disable count of tasklet, but don't wait for it
    378  *	to complete -- it may remain running after this returns.
    379  *
    380  *	As long as the disable count is nonzero, the tasklet's function
    381  *	will not run, but if already scheduled, the tasklet will remain
    382  *	so and the softint will repeatedly trigger itself in a sort of
    383  *	busy-wait, so this should be used only for short durations.
    384  *
    385  *	Load-acquire semantics.
    386  */
    387 void
    388 tasklet_disable_nosync(struct tasklet_struct *tasklet)
    389 {
    390 	unsigned int disablecount __diagused;
    391 
    392 	/* Increment the disable count.  */
    393 	disablecount = atomic_inc_uint_nv(&tasklet->tl_disablecount);
    394 	KASSERT(disablecount < UINT_MAX);
    395 	KASSERT(disablecount != 0);
    396 
    397 	/* Pairs with membar_exit in __tasklet_enable.  */
    398 #ifndef __HAVE_ATOMIC_AS_MEMBAR
    399 	membar_enter();
    400 #endif
    401 }
    402 
    403 /*
    404  * tasklet_disable(tasklet)
    405  *
    406  *	Increment the disable count of tasklet, and if it was already
    407  *	running, busy-wait for it to complete.
    408  *
    409  *	As long as the disable count is nonzero, the tasklet's function
    410  *	will not run, but if already scheduled, the tasklet will remain
    411  *	so and the softint will repeatedly trigger itself in a sort of
    412  *	busy-wait, so this should be used only for short durations.
    413  *
    414  *	If tasklet is guaranteed not to be scheduled, e.g. if you have
    415  *	just invoked tasklet_kill, then tasklet_disable serves to wait
    416  *	for it to complete in case it might already be running.
    417  *
    418  *	Load-acquire semantics.
    419  */
    420 void
    421 tasklet_disable(struct tasklet_struct *tasklet)
    422 {
    423 
    424 	/* Increment the disable count.  */
    425 	tasklet_disable_nosync(tasklet);
    426 
    427 	/* Wait for it to finish running, if it was running.  */
    428 	tasklet_unlock_wait(tasklet);
    429 }
    430 
    431 /*
    432  * tasklet_enable(tasklet)
    433  *
    434  *	Decrement tasklet's disable count.  If it was previously
    435  *	scheduled to run, it may now run.
    436  *
    437  *	Store-release semantics.
    438  */
    439 void
    440 tasklet_enable(struct tasklet_struct *tasklet)
    441 {
    442 
    443 	(void)__tasklet_enable(tasklet);
    444 }
    445 
    446 /*
    447  * tasklet_kill(tasklet)
    448  *
    449  *	Busy-wait for tasklet to run, if it is currently scheduled.
    450  *	Caller must guarantee it does not get scheduled again for this
    451  *	to be useful.
    452  */
    453 void
    454 tasklet_kill(struct tasklet_struct *tasklet)
    455 {
    456 
    457 	KASSERTMSG(!cpu_intr_p(),
    458 	    "deadlock: soft interrupts are blocked in interrupt context");
    459 
    460 	/* Wait for it to be removed from the queue.  */
    461 	while (atomic_load_relaxed(&tasklet->tl_state) & TASKLET_SCHEDULED)
    462 		SPINLOCK_BACKOFF_HOOK;
    463 
    464 	/*
    465 	 * No need for a memory barrier here because writes to the
    466 	 * single state word are globally ordered, and RUNNING is set
    467 	 * before SCHEDULED is cleared, so as long as the caller
    468 	 * guarantees no scheduling, the only possible transitions we
    469 	 * can witness are:
    470 	 *
    471 	 *	0                 -> 0
    472 	 *	SCHEDULED         -> 0
    473 	 *	SCHEDULED         -> RUNNING
    474 	 *	RUNNING           -> 0
    475 	 *	RUNNING           -> RUNNING
    476 	 *	SCHEDULED|RUNNING -> 0
    477 	 *	SCHEDULED|RUNNING -> RUNNING
    478 	 */
    479 
    480 	/* Wait for it to finish running.  */
    481 	tasklet_unlock_wait(tasklet);
    482 }
    483 
    484 /*
    485  * tasklet_is_locked(tasklet)
    486  *
    487  *	True if tasklet is currently locked.  Caller must use it only
    488  *	for positive assertions.
    489  */
    490 bool
    491 tasklet_is_locked(const struct tasklet_struct *tasklet)
    492 {
    493 
    494 	return atomic_load_relaxed(&tasklet->tl_state) & TASKLET_RUNNING;
    495 }
    496 
    497 /*
    498  * tasklet_trylock(tasklet)
    499  *
    500  *	Try to lock tasklet, i.e., set TASKLET_RUNNING.  Return true if
    501  *	we locked it, false if already locked.
    502  *
    503  *	Load-acquire semantics.
    504  */
    505 bool
    506 tasklet_trylock(struct tasklet_struct *tasklet)
    507 {
    508 	unsigned state;
    509 
    510 	do {
    511 		state = atomic_load_relaxed(&tasklet->tl_state);
    512 		if (state & TASKLET_RUNNING)
    513 			return false;
    514 	} while (atomic_cas_uint(&tasklet->tl_state, state,
    515 		state | TASKLET_RUNNING) != state);
    516 
    517 	/* Pairs with membar_exit in tasklet_unlock.  */
    518 #ifndef __HAVE_ATOMIC_AS_MEMBAR
    519 	membar_enter();
    520 #endif
    521 
    522 	return true;
    523 }
    524 
    525 /*
    526  * tasklet_unlock(tasklet)
    527  *
    528  *	Unlock tasklet, i.e., clear TASKLET_RUNNING.
    529  *
    530  *	Store-release semantics.
    531  */
    532 void
    533 tasklet_unlock(struct tasklet_struct *tasklet)
    534 {
    535 
    536 	KASSERT(atomic_load_relaxed(&tasklet->tl_state) & TASKLET_RUNNING);
    537 
    538 	/*
    539 	 * Pairs with membar_enter in tasklet_trylock and with
    540 	 * atomic_load_acquire in tasklet_unlock_wait.
    541 	 */
    542 #ifndef __HAVE_ATOMIC_AS_MEMBAR
    543 	membar_exit();
    544 #endif
    545 	atomic_and_uint(&tasklet->tl_state, ~TASKLET_RUNNING);
    546 }
    547 
    548 /*
    549  * tasklet_unlock_wait(tasklet)
    550  *
    551  *	Busy-wait until tasklet is not running.
    552  *
    553  *	Load-acquire semantics.
    554  */
    555 void
    556 tasklet_unlock_wait(const struct tasklet_struct *tasklet)
    557 {
    558 
    559 	/* Pairs with membar_exit in tasklet_unlock.  */
    560 	while (atomic_load_acquire(&tasklet->tl_state) & TASKLET_RUNNING)
    561 		SPINLOCK_BACKOFF_HOOK;
    562 }
    563 
    564 /*
    565  * BEGIN I915 HACKS
    566  *
    567  * The i915 driver abuses the tasklet abstraction like a cop abuses his
    568  * wife.
    569  */
    570 
    571 /*
    572  * __tasklet_disable_sync_once(tasklet)
    573  *
    574  *	Increment the disable count of tasklet, and if this is the
    575  *	first time it was disabled and it was already running,
    576  *	busy-wait for it to complete.
    577  *
    578  *	Caller must not care about whether the tasklet is running, or
    579  *	about waiting for any side effects of the tasklet to complete,
    580  *	if this was not the first time it was disabled.
    581  */
    582 void
    583 __tasklet_disable_sync_once(struct tasklet_struct *tasklet)
    584 {
    585 	unsigned int disablecount;
    586 
    587 	/* Increment the disable count.  */
    588 	disablecount = atomic_inc_uint_nv(&tasklet->tl_disablecount);
    589 	KASSERT(disablecount < UINT_MAX);
    590 	KASSERT(disablecount != 0);
    591 
    592 	/* Pairs with membar_exit in __tasklet_enable_sync_once.  */
    593 #ifndef __HAVE_ATOMIC_AS_MEMBAR
    594 	membar_enter();
    595 #endif
    596 
    597 	/*
    598 	 * If it was zero, wait for it to finish running.  If it was
    599 	 * not zero, caller must not care whether it was running.
    600 	 */
    601 	if (disablecount == 1)
    602 		tasklet_unlock_wait(tasklet);
    603 }
    604 
    605 /*
    606  * __tasklet_enable_sync_once(tasklet)
    607  *
    608  *	Decrement the disable count of tasklet, and if it goes to zero,
    609  *	kill tasklet.
    610  */
    611 void
    612 __tasklet_enable_sync_once(struct tasklet_struct *tasklet)
    613 {
    614 	unsigned int disablecount;
    615 
    616 	/* Pairs with membar_enter in __tasklet_disable_sync_once.  */
    617 #ifndef __HAVE_ATOMIC_AS_MEMBAR
    618 	membar_exit();
    619 #endif
    620 
    621 	/* Decrement the disable count.  */
    622 	disablecount = atomic_dec_uint_nv(&tasklet->tl_disablecount);
    623 	KASSERT(disablecount < UINT_MAX);
    624 
    625 	/*
    626 	 * If it became zero, kill the tasklet.  If it was not zero,
    627 	 * caller must not care whether it was running.
    628 	 */
    629 	if (disablecount == 0)
    630 		tasklet_kill(tasklet);
    631 }
    632 
    633 /*
    634  * __tasklet_is_enabled(tasklet)
    635  *
    636  *	True if tasklet is not currently disabled.  Answer may be stale
    637  *	as soon as it is returned -- caller must use it only as a hint,
    638  *	or must arrange synchronization externally.
    639  */
    640 bool
    641 __tasklet_is_enabled(const struct tasklet_struct *tasklet)
    642 {
    643 	unsigned int disablecount;
    644 
    645 	disablecount = atomic_load_relaxed(&tasklet->tl_disablecount);
    646 
    647 	return (disablecount == 0);
    648 }
    649 
    650 /*
    651  * __tasklet_is_scheduled(tasklet)
    652  *
    653  *	True if tasklet is currently scheduled.  Answer may be stale as
    654  *	soon as it is returned -- caller must use it only as a hint, or
    655  *	must arrange synchronization externally.
    656  */
    657 bool
    658 __tasklet_is_scheduled(const struct tasklet_struct *tasklet)
    659 {
    660 
    661 	return atomic_load_relaxed(&tasklet->tl_state) & TASKLET_SCHEDULED;
    662 }
    663 
    664 /*
    665  * __tasklet_enable(tasklet)
    666  *
    667  *	Decrement tasklet's disable count.  If it was previously
    668  *	scheduled to run, it may now run.  Return true if the disable
    669  *	count went down to zero; otherwise return false.
    670  *
    671  *	Store-release semantics.
    672  */
    673 bool
    674 __tasklet_enable(struct tasklet_struct *tasklet)
    675 {
    676 	unsigned int disablecount;
    677 
    678 	/*
    679 	 * Guarantee all caller-relevant reads or writes have completed
    680 	 * before potentially allowing tasklet to run again by
    681 	 * decrementing the disable count.
    682 	 *
    683 	 * Pairs with atomic_load_acquire in tasklet_softintr and with
    684 	 * membar_enter in tasklet_disable.
    685 	 */
    686 #ifndef __HAVE_ATOMIC_AS_MEMBAR
    687 	membar_exit();
    688 #endif
    689 
    690 	/* Decrement the disable count.  */
    691 	disablecount = atomic_dec_uint_nv(&tasklet->tl_disablecount);
    692 	KASSERT(disablecount != UINT_MAX);
    693 
    694 	return (disablecount == 0);
    695 }
    696