Home | History | Annotate | Line # | Download | only in linux
linux_tasklet.c revision 1.9
      1  1.9  riastrad /*	$NetBSD: linux_tasklet.c,v 1.9 2021/12/19 12:44:43 riastradh Exp $	*/
      2  1.1  riastrad 
      3  1.1  riastrad /*-
      4  1.8  riastrad  * Copyright (c) 2018, 2020, 2021 The NetBSD Foundation, Inc.
      5  1.1  riastrad  * All rights reserved.
      6  1.1  riastrad  *
      7  1.1  riastrad  * This code is derived from software contributed to The NetBSD Foundation
      8  1.1  riastrad  * by Taylor R. Campbell.
      9  1.1  riastrad  *
     10  1.1  riastrad  * Redistribution and use in source and binary forms, with or without
     11  1.1  riastrad  * modification, are permitted provided that the following conditions
     12  1.1  riastrad  * are met:
     13  1.1  riastrad  * 1. Redistributions of source code must retain the above copyright
     14  1.1  riastrad  *    notice, this list of conditions and the following disclaimer.
     15  1.1  riastrad  * 2. Redistributions in binary form must reproduce the above copyright
     16  1.1  riastrad  *    notice, this list of conditions and the following disclaimer in the
     17  1.1  riastrad  *    documentation and/or other materials provided with the distribution.
     18  1.1  riastrad  *
     19  1.1  riastrad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  1.1  riastrad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  1.1  riastrad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  1.1  riastrad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  1.1  riastrad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  1.1  riastrad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  1.1  riastrad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  1.1  riastrad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  1.1  riastrad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  1.1  riastrad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  1.1  riastrad  * POSSIBILITY OF SUCH DAMAGE.
     30  1.1  riastrad  */
     31  1.1  riastrad 
     32  1.1  riastrad #include <sys/cdefs.h>
     33  1.9  riastrad __KERNEL_RCSID(0, "$NetBSD: linux_tasklet.c,v 1.9 2021/12/19 12:44:43 riastradh Exp $");
     34  1.1  riastrad 
     35  1.9  riastrad #include <sys/param.h>
     36  1.1  riastrad #include <sys/types.h>
     37  1.9  riastrad 
     38  1.1  riastrad #include <sys/atomic.h>
     39  1.1  riastrad #include <sys/cpu.h>
     40  1.1  riastrad #include <sys/errno.h>
     41  1.1  riastrad #include <sys/intr.h>
     42  1.8  riastrad #include <sys/kmem.h>
     43  1.1  riastrad #include <sys/lock.h>
     44  1.1  riastrad #include <sys/percpu.h>
     45  1.1  riastrad #include <sys/queue.h>
     46  1.1  riastrad 
     47  1.1  riastrad #include <lib/libkern/libkern.h>
     48  1.1  riastrad 
     49  1.1  riastrad #include <machine/limits.h>
     50  1.1  riastrad 
     51  1.1  riastrad #include <linux/tasklet.h>
     52  1.1  riastrad 
     53  1.1  riastrad #define	TASKLET_SCHEDULED	((unsigned)__BIT(0))
     54  1.1  riastrad #define	TASKLET_RUNNING		((unsigned)__BIT(1))
     55  1.1  riastrad 
     56  1.1  riastrad struct tasklet_queue {
     57  1.8  riastrad 	struct percpu	*tq_percpu;	/* struct tasklet_cpu * */
     58  1.1  riastrad 	void		*tq_sih;
     59  1.1  riastrad };
     60  1.1  riastrad 
     61  1.1  riastrad SIMPLEQ_HEAD(tasklet_head, tasklet_struct);
     62  1.1  riastrad 
     63  1.1  riastrad struct tasklet_cpu {
     64  1.1  riastrad 	struct tasklet_head	tc_head;
     65  1.1  riastrad };
     66  1.1  riastrad 
     67  1.1  riastrad static struct tasklet_queue	tasklet_queue __read_mostly;
     68  1.1  riastrad static struct tasklet_queue	tasklet_hi_queue __read_mostly;
     69  1.1  riastrad 
     70  1.1  riastrad static void	tasklet_softintr(void *);
     71  1.1  riastrad static int	tasklet_queue_init(struct tasklet_queue *, unsigned);
     72  1.1  riastrad static void	tasklet_queue_fini(struct tasklet_queue *);
     73  1.1  riastrad static void	tasklet_queue_schedule(struct tasklet_queue *,
     74  1.1  riastrad 		    struct tasklet_struct *);
     75  1.1  riastrad static void	tasklet_queue_enqueue(struct tasklet_queue *,
     76  1.1  riastrad 		    struct tasklet_struct *);
     77  1.1  riastrad 
     78  1.1  riastrad /*
     79  1.1  riastrad  * linux_tasklets_init()
     80  1.1  riastrad  *
     81  1.1  riastrad  *	Initialize the Linux tasklets subsystem.  Return 0 on success,
     82  1.1  riastrad  *	error code on failure.
     83  1.1  riastrad  */
     84  1.1  riastrad int
     85  1.1  riastrad linux_tasklets_init(void)
     86  1.1  riastrad {
     87  1.1  riastrad 	int error;
     88  1.1  riastrad 
     89  1.1  riastrad 	error = tasklet_queue_init(&tasklet_queue, SOFTINT_CLOCK);
     90  1.1  riastrad 	if (error)
     91  1.1  riastrad 		goto fail0;
     92  1.1  riastrad 	error = tasklet_queue_init(&tasklet_hi_queue, SOFTINT_SERIAL);
     93  1.1  riastrad 	if (error)
     94  1.1  riastrad 		goto fail1;
     95  1.1  riastrad 
     96  1.1  riastrad 	/* Success!  */
     97  1.1  riastrad 	return 0;
     98  1.1  riastrad 
     99  1.1  riastrad fail2: __unused
    100  1.1  riastrad 	tasklet_queue_fini(&tasklet_hi_queue);
    101  1.1  riastrad fail1:	tasklet_queue_fini(&tasklet_queue);
    102  1.1  riastrad fail0:	KASSERT(error);
    103  1.1  riastrad 	return error;
    104  1.1  riastrad }
    105  1.1  riastrad 
    106  1.1  riastrad /*
    107  1.1  riastrad  * linux_tasklets_fini()
    108  1.1  riastrad  *
    109  1.1  riastrad  *	Finalize the Linux tasklets subsystem.  All use of tasklets
    110  1.1  riastrad  *	must be done.
    111  1.1  riastrad  */
    112  1.1  riastrad void
    113  1.1  riastrad linux_tasklets_fini(void)
    114  1.1  riastrad {
    115  1.1  riastrad 
    116  1.1  riastrad 	tasklet_queue_fini(&tasklet_hi_queue);
    117  1.1  riastrad 	tasklet_queue_fini(&tasklet_queue);
    118  1.1  riastrad }
    119  1.1  riastrad 
    120  1.8  riastrad static void
    121  1.8  riastrad tasklet_cpu_init(void *ptr, void *cookie, struct cpu_info *ci)
    122  1.8  riastrad {
    123  1.8  riastrad 	struct tasklet_cpu **tcp = ptr, *tc;
    124  1.8  riastrad 
    125  1.8  riastrad 	*tcp = tc = kmem_zalloc(sizeof(*tc), KM_SLEEP);
    126  1.8  riastrad 	SIMPLEQ_INIT(&tc->tc_head);
    127  1.8  riastrad }
    128  1.8  riastrad 
    129  1.8  riastrad static void
    130  1.8  riastrad tasklet_cpu_fini(void *ptr, void *cookie, struct cpu_info *ci)
    131  1.8  riastrad {
    132  1.8  riastrad 	struct tasklet_cpu **tcp = ptr, *tc = *tcp;
    133  1.8  riastrad 
    134  1.8  riastrad 	KASSERT(SIMPLEQ_EMPTY(&tc->tc_head));
    135  1.8  riastrad 	kmem_free(tc, sizeof(*tc));
    136  1.8  riastrad 	*tcp = NULL;		/* paranoia */
    137  1.8  riastrad }
    138  1.8  riastrad 
    139  1.1  riastrad /*
    140  1.1  riastrad  * tasklet_queue_init(tq, prio)
    141  1.1  riastrad  *
    142  1.1  riastrad  *	Initialize the tasklet queue tq for running tasklets at softint
    143  1.1  riastrad  *	priority prio (SOFTINT_*).
    144  1.1  riastrad  */
    145  1.1  riastrad static int
    146  1.1  riastrad tasklet_queue_init(struct tasklet_queue *tq, unsigned prio)
    147  1.1  riastrad {
    148  1.1  riastrad 	int error;
    149  1.1  riastrad 
    150  1.1  riastrad 	/* Allocate per-CPU memory.  percpu_alloc cannot fail.  */
    151  1.8  riastrad 	tq->tq_percpu = percpu_create(sizeof(struct tasklet_cpu),
    152  1.8  riastrad 	    tasklet_cpu_init, tasklet_cpu_fini, NULL);
    153  1.1  riastrad 	KASSERT(tq->tq_percpu != NULL);
    154  1.1  riastrad 
    155  1.1  riastrad 	/* Try to establish a softint.  softint_establish may fail.  */
    156  1.1  riastrad 	tq->tq_sih = softint_establish(prio|SOFTINT_MPSAFE, &tasklet_softintr,
    157  1.1  riastrad 	    tq);
    158  1.1  riastrad 	if (tq->tq_sih == NULL) {
    159  1.1  riastrad 		error = ENOMEM;
    160  1.1  riastrad 		goto fail1;
    161  1.1  riastrad 	}
    162  1.1  riastrad 
    163  1.1  riastrad 	/* Success!  */
    164  1.1  riastrad 	return 0;
    165  1.1  riastrad 
    166  1.1  riastrad fail2: __unused
    167  1.1  riastrad 	softint_disestablish(tq->tq_sih);
    168  1.1  riastrad 	tq->tq_sih = NULL;
    169  1.1  riastrad fail1:	percpu_free(tq->tq_percpu, sizeof(struct tasklet_cpu));
    170  1.1  riastrad 	tq->tq_percpu = NULL;
    171  1.1  riastrad fail0: __unused
    172  1.1  riastrad 	KASSERT(error);
    173  1.1  riastrad 	return error;
    174  1.1  riastrad }
    175  1.1  riastrad 
    176  1.1  riastrad /*
    177  1.1  riastrad  * tasklet_queue_fini(tq)
    178  1.1  riastrad  *
    179  1.1  riastrad  *	Finalize the tasklet queue tq: free all resources associated
    180  1.1  riastrad  *	with it.
    181  1.1  riastrad  */
    182  1.1  riastrad static void
    183  1.1  riastrad tasklet_queue_fini(struct tasklet_queue *tq)
    184  1.1  riastrad {
    185  1.1  riastrad 
    186  1.1  riastrad 	softint_disestablish(tq->tq_sih);
    187  1.1  riastrad 	tq->tq_sih = NULL;
    188  1.1  riastrad 	percpu_free(tq->tq_percpu, sizeof(struct tasklet_cpu));
    189  1.1  riastrad 	tq->tq_percpu = NULL;
    190  1.1  riastrad }
    191  1.1  riastrad 
    192  1.1  riastrad /*
    193  1.1  riastrad  * tasklet_softintr(cookie)
    194  1.1  riastrad  *
    195  1.1  riastrad  *	Soft interrupt handler: Process queued tasklets on the tasklet
    196  1.1  riastrad  *	queue passed in as cookie.
    197  1.1  riastrad  */
    198  1.1  riastrad static void
    199  1.1  riastrad tasklet_softintr(void *cookie)
    200  1.1  riastrad {
    201  1.1  riastrad 	struct tasklet_queue *const tq = cookie;
    202  1.1  riastrad 	struct tasklet_head th = SIMPLEQ_HEAD_INITIALIZER(th);
    203  1.8  riastrad 	struct tasklet_cpu **tcp, *tc;
    204  1.1  riastrad 	int s;
    205  1.1  riastrad 
    206  1.1  riastrad 	/*
    207  1.1  riastrad 	 * With all interrupts deferred, transfer the current CPU's
    208  1.1  riastrad 	 * queue of tasklets to a local variable in one swell foop.
    209  1.1  riastrad 	 *
    210  1.1  riastrad 	 * No memory barriers: CPU-local state only.
    211  1.1  riastrad 	 */
    212  1.8  riastrad 	tcp = percpu_getref(tq->tq_percpu);
    213  1.8  riastrad 	tc = *tcp;
    214  1.1  riastrad 	s = splhigh();
    215  1.1  riastrad 	SIMPLEQ_CONCAT(&th, &tc->tc_head);
    216  1.1  riastrad 	splx(s);
    217  1.1  riastrad 	percpu_putref(tq->tq_percpu);
    218  1.1  riastrad 
    219  1.1  riastrad 	/* Go through the queue of tasklets we grabbed.  */
    220  1.1  riastrad 	while (!SIMPLEQ_EMPTY(&th)) {
    221  1.1  riastrad 		struct tasklet_struct *tasklet;
    222  1.1  riastrad 
    223  1.1  riastrad 		/* Remove the first tasklet from the queue.  */
    224  1.1  riastrad 		tasklet = SIMPLEQ_FIRST(&th);
    225  1.1  riastrad 		SIMPLEQ_REMOVE_HEAD(&th, tl_entry);
    226  1.1  riastrad 
    227  1.5  riastrad 		KASSERT(atomic_load_relaxed(&tasklet->tl_state) &
    228  1.5  riastrad 		    TASKLET_SCHEDULED);
    229  1.5  riastrad 
    230  1.1  riastrad 		/*
    231  1.1  riastrad 		 * Test and set RUNNING, in case it is already running
    232  1.1  riastrad 		 * on another CPU and got scheduled again on this one
    233  1.1  riastrad 		 * before it completed.
    234  1.1  riastrad 		 */
    235  1.5  riastrad 		if (!tasklet_trylock(tasklet)) {
    236  1.1  riastrad 			/*
    237  1.1  riastrad 			 * Put it back on the queue to run it again in
    238  1.1  riastrad 			 * a sort of busy-wait, and move on to the next
    239  1.1  riastrad 			 * one.
    240  1.1  riastrad 			 */
    241  1.1  riastrad 			tasklet_queue_enqueue(tq, tasklet);
    242  1.1  riastrad 			continue;
    243  1.1  riastrad 		}
    244  1.1  riastrad 
    245  1.5  riastrad 		/*
    246  1.5  riastrad 		 * Check whether it's currently disabled.
    247  1.5  riastrad 		 *
    248  1.5  riastrad 		 * Pairs with membar_exit in __tasklet_enable.
    249  1.5  riastrad 		 */
    250  1.5  riastrad 		if (atomic_load_acquire(&tasklet->tl_disablecount)) {
    251  1.1  riastrad 			/*
    252  1.1  riastrad 			 * Disabled: clear the RUNNING bit and, requeue
    253  1.1  riastrad 			 * it, but keep it SCHEDULED.
    254  1.1  riastrad 			 */
    255  1.5  riastrad 			tasklet_unlock(tasklet);
    256  1.1  riastrad 			tasklet_queue_enqueue(tq, tasklet);
    257  1.1  riastrad 			continue;
    258  1.1  riastrad 		}
    259  1.1  riastrad 
    260  1.1  riastrad 		/* Not disabled.  Clear SCHEDULED and call func.  */
    261  1.5  riastrad 		KASSERT(atomic_load_relaxed(&tasklet->tl_state) &
    262  1.5  riastrad 		    TASKLET_SCHEDULED);
    263  1.1  riastrad 		atomic_and_uint(&tasklet->tl_state, ~TASKLET_SCHEDULED);
    264  1.1  riastrad 
    265  1.1  riastrad 		(*tasklet->func)(tasklet->data);
    266  1.1  riastrad 
    267  1.1  riastrad 		/* Clear RUNNING to notify tasklet_disable.  */
    268  1.5  riastrad 		tasklet_unlock(tasklet);
    269  1.1  riastrad 	}
    270  1.1  riastrad }
    271  1.1  riastrad 
    272  1.1  riastrad /*
    273  1.1  riastrad  * tasklet_queue_schedule(tq, tasklet)
    274  1.1  riastrad  *
    275  1.1  riastrad  *	Schedule tasklet to run on tq.  If it was already scheduled and
    276  1.1  riastrad  *	has not yet run, no effect.
    277  1.1  riastrad  */
    278  1.1  riastrad static void
    279  1.1  riastrad tasklet_queue_schedule(struct tasklet_queue *tq,
    280  1.1  riastrad     struct tasklet_struct *tasklet)
    281  1.1  riastrad {
    282  1.1  riastrad 	unsigned ostate, nstate;
    283  1.1  riastrad 
    284  1.1  riastrad 	/* Test and set the SCHEDULED bit.  If already set, we're done.  */
    285  1.1  riastrad 	do {
    286  1.5  riastrad 		ostate = atomic_load_relaxed(&tasklet->tl_state);
    287  1.1  riastrad 		if (ostate & TASKLET_SCHEDULED)
    288  1.1  riastrad 			return;
    289  1.1  riastrad 		nstate = ostate | TASKLET_SCHEDULED;
    290  1.1  riastrad 	} while (atomic_cas_uint(&tasklet->tl_state, ostate, nstate)
    291  1.1  riastrad 	    != ostate);
    292  1.1  riastrad 
    293  1.1  riastrad 	/*
    294  1.1  riastrad 	 * Not already set and we have set it now.  Put it on the queue
    295  1.1  riastrad 	 * and kick off a softint.
    296  1.1  riastrad 	 */
    297  1.1  riastrad 	tasklet_queue_enqueue(tq, tasklet);
    298  1.1  riastrad }
    299  1.1  riastrad 
    300  1.1  riastrad /*
    301  1.1  riastrad  * tasklet_queue_enqueue(tq, tasklet)
    302  1.1  riastrad  *
    303  1.1  riastrad  *	Put tasklet on the queue tq and ensure it will run.  tasklet
    304  1.1  riastrad  *	must be marked SCHEDULED.
    305  1.1  riastrad  */
    306  1.1  riastrad static void
    307  1.1  riastrad tasklet_queue_enqueue(struct tasklet_queue *tq, struct tasklet_struct *tasklet)
    308  1.1  riastrad {
    309  1.8  riastrad 	struct tasklet_cpu **tcp, *tc;
    310  1.1  riastrad 	int s;
    311  1.1  riastrad 
    312  1.5  riastrad 	KASSERT(atomic_load_relaxed(&tasklet->tl_state) & TASKLET_SCHEDULED);
    313  1.1  riastrad 
    314  1.1  riastrad 	/*
    315  1.1  riastrad 	 * Insert on the current CPU's queue while all interrupts are
    316  1.1  riastrad 	 * blocked, and schedule a soft interrupt to process it.  No
    317  1.1  riastrad 	 * memory barriers: CPU-local state only.
    318  1.1  riastrad 	 */
    319  1.8  riastrad 	tcp = percpu_getref(tq->tq_percpu);
    320  1.8  riastrad 	tc = *tcp;
    321  1.1  riastrad 	s = splhigh();
    322  1.1  riastrad 	SIMPLEQ_INSERT_TAIL(&tc->tc_head, tasklet, tl_entry);
    323  1.1  riastrad 	splx(s);
    324  1.1  riastrad 	softint_schedule(tq->tq_sih);
    325  1.1  riastrad 	percpu_putref(tq->tq_percpu);
    326  1.1  riastrad }
    327  1.1  riastrad 
    328  1.1  riastrad /*
    329  1.1  riastrad  * tasklet_init(tasklet, func, data)
    330  1.1  riastrad  *
    331  1.1  riastrad  *	Initialize tasklet to call func(data) when scheduled.
    332  1.1  riastrad  *
    333  1.1  riastrad  *	Caller is responsible for issuing the appropriate memory
    334  1.1  riastrad  *	barriers or store releases to publish the tasklet to other CPUs
    335  1.1  riastrad  *	before use.
    336  1.1  riastrad  */
    337  1.1  riastrad void
    338  1.1  riastrad tasklet_init(struct tasklet_struct *tasklet, void (*func)(unsigned long),
    339  1.1  riastrad     unsigned long data)
    340  1.1  riastrad {
    341  1.1  riastrad 
    342  1.5  riastrad 	atomic_store_relaxed(&tasklet->tl_state, 0);
    343  1.5  riastrad 	atomic_store_relaxed(&tasklet->tl_disablecount, 0);
    344  1.1  riastrad 	tasklet->func = func;
    345  1.1  riastrad 	tasklet->data = data;
    346  1.1  riastrad }
    347  1.1  riastrad 
    348  1.1  riastrad /*
    349  1.1  riastrad  * tasklet_schedule(tasklet)
    350  1.1  riastrad  *
    351  1.1  riastrad  *	Schedule tasklet to run at regular priority.  If it was already
    352  1.1  riastrad  *	scheduled and has not yet run, no effect.
    353  1.1  riastrad  */
    354  1.1  riastrad void
    355  1.1  riastrad tasklet_schedule(struct tasklet_struct *tasklet)
    356  1.1  riastrad {
    357  1.1  riastrad 
    358  1.1  riastrad 	tasklet_queue_schedule(&tasklet_queue, tasklet);
    359  1.1  riastrad }
    360  1.1  riastrad 
    361  1.1  riastrad /*
    362  1.1  riastrad  * tasklet_hi_schedule(tasklet)
    363  1.1  riastrad  *
    364  1.1  riastrad  *	Schedule tasklet to run at high priority.  If it was already
    365  1.1  riastrad  *	scheduled and has not yet run, no effect.
    366  1.1  riastrad  */
    367  1.1  riastrad void
    368  1.1  riastrad tasklet_hi_schedule(struct tasklet_struct *tasklet)
    369  1.1  riastrad {
    370  1.1  riastrad 
    371  1.1  riastrad 	tasklet_queue_schedule(&tasklet_hi_queue, tasklet);
    372  1.1  riastrad }
    373  1.1  riastrad 
    374  1.1  riastrad /*
    375  1.7  riastrad  * tasklet_disable_nosync(tasklet)
    376  1.1  riastrad  *
    377  1.7  riastrad  *	Increment the disable count of tasklet, but don't wait for it
    378  1.7  riastrad  *	to complete -- it may remain running after this returns.
    379  1.1  riastrad  *
    380  1.1  riastrad  *	As long as the disable count is nonzero, the tasklet's function
    381  1.1  riastrad  *	will not run, but if already scheduled, the tasklet will remain
    382  1.1  riastrad  *	so and the softint will repeatedly trigger itself in a sort of
    383  1.1  riastrad  *	busy-wait, so this should be used only for short durations.
    384  1.1  riastrad  *
    385  1.5  riastrad  *	Load-acquire semantics.
    386  1.1  riastrad  */
    387  1.1  riastrad void
    388  1.7  riastrad tasklet_disable_nosync(struct tasklet_struct *tasklet)
    389  1.1  riastrad {
    390  1.1  riastrad 	unsigned int disablecount __diagused;
    391  1.1  riastrad 
    392  1.1  riastrad 	/* Increment the disable count.  */
    393  1.1  riastrad 	disablecount = atomic_inc_uint_nv(&tasklet->tl_disablecount);
    394  1.1  riastrad 	KASSERT(disablecount < UINT_MAX);
    395  1.2  riastrad 	KASSERT(disablecount != 0);
    396  1.1  riastrad 
    397  1.6  riastrad 	/* Pairs with membar_exit in __tasklet_enable.  */
    398  1.6  riastrad #ifndef __HAVE_ATOMIC_AS_MEMBAR
    399  1.6  riastrad 	membar_enter();
    400  1.6  riastrad #endif
    401  1.7  riastrad }
    402  1.7  riastrad 
    403  1.7  riastrad /*
    404  1.7  riastrad  * tasklet_disable(tasklet)
    405  1.7  riastrad  *
    406  1.7  riastrad  *	Increment the disable count of tasklet, and if it was already
    407  1.7  riastrad  *	running, busy-wait for it to complete.
    408  1.7  riastrad  *
    409  1.7  riastrad  *	As long as the disable count is nonzero, the tasklet's function
    410  1.7  riastrad  *	will not run, but if already scheduled, the tasklet will remain
    411  1.7  riastrad  *	so and the softint will repeatedly trigger itself in a sort of
    412  1.7  riastrad  *	busy-wait, so this should be used only for short durations.
    413  1.7  riastrad  *
    414  1.7  riastrad  *	If tasklet is guaranteed not to be scheduled, e.g. if you have
    415  1.7  riastrad  *	just invoked tasklet_kill, then tasklet_disable serves to wait
    416  1.7  riastrad  *	for it to complete in case it might already be running.
    417  1.7  riastrad  *
    418  1.7  riastrad  *	Load-acquire semantics.
    419  1.7  riastrad  */
    420  1.7  riastrad void
    421  1.7  riastrad tasklet_disable(struct tasklet_struct *tasklet)
    422  1.7  riastrad {
    423  1.7  riastrad 
    424  1.7  riastrad 	/* Increment the disable count.  */
    425  1.7  riastrad 	tasklet_disable_nosync(tasklet);
    426  1.6  riastrad 
    427  1.1  riastrad 	/* Wait for it to finish running, if it was running.  */
    428  1.5  riastrad 	tasklet_unlock_wait(tasklet);
    429  1.1  riastrad }
    430  1.1  riastrad 
    431  1.1  riastrad /*
    432  1.1  riastrad  * tasklet_enable(tasklet)
    433  1.1  riastrad  *
    434  1.1  riastrad  *	Decrement tasklet's disable count.  If it was previously
    435  1.1  riastrad  *	scheduled to run, it may now run.
    436  1.5  riastrad  *
    437  1.5  riastrad  *	Store-release semantics.
    438  1.1  riastrad  */
    439  1.1  riastrad void
    440  1.1  riastrad tasklet_enable(struct tasklet_struct *tasklet)
    441  1.1  riastrad {
    442  1.1  riastrad 
    443  1.5  riastrad 	(void)__tasklet_enable(tasklet);
    444  1.1  riastrad }
    445  1.1  riastrad 
    446  1.1  riastrad /*
    447  1.1  riastrad  * tasklet_kill(tasklet)
    448  1.1  riastrad  *
    449  1.1  riastrad  *	Busy-wait for tasklet to run, if it is currently scheduled.
    450  1.1  riastrad  *	Caller must guarantee it does not get scheduled again for this
    451  1.1  riastrad  *	to be useful.
    452  1.1  riastrad  */
    453  1.1  riastrad void
    454  1.1  riastrad tasklet_kill(struct tasklet_struct *tasklet)
    455  1.1  riastrad {
    456  1.1  riastrad 
    457  1.1  riastrad 	KASSERTMSG(!cpu_intr_p(),
    458  1.1  riastrad 	    "deadlock: soft interrupts are blocked in interrupt context");
    459  1.1  riastrad 
    460  1.1  riastrad 	/* Wait for it to be removed from the queue.  */
    461  1.5  riastrad 	while (atomic_load_relaxed(&tasklet->tl_state) & TASKLET_SCHEDULED)
    462  1.1  riastrad 		SPINLOCK_BACKOFF_HOOK;
    463  1.1  riastrad 
    464  1.1  riastrad 	/*
    465  1.1  riastrad 	 * No need for a memory barrier here because writes to the
    466  1.1  riastrad 	 * single state word are globally ordered, and RUNNING is set
    467  1.1  riastrad 	 * before SCHEDULED is cleared, so as long as the caller
    468  1.1  riastrad 	 * guarantees no scheduling, the only possible transitions we
    469  1.1  riastrad 	 * can witness are:
    470  1.1  riastrad 	 *
    471  1.1  riastrad 	 *	0                 -> 0
    472  1.1  riastrad 	 *	SCHEDULED         -> 0
    473  1.1  riastrad 	 *	SCHEDULED         -> RUNNING
    474  1.1  riastrad 	 *	RUNNING           -> 0
    475  1.1  riastrad 	 *	RUNNING           -> RUNNING
    476  1.1  riastrad 	 *	SCHEDULED|RUNNING -> 0
    477  1.1  riastrad 	 *	SCHEDULED|RUNNING -> RUNNING
    478  1.1  riastrad 	 */
    479  1.1  riastrad 
    480  1.1  riastrad 	/* Wait for it to finish running.  */
    481  1.5  riastrad 	tasklet_unlock_wait(tasklet);
    482  1.5  riastrad }
    483  1.5  riastrad 
    484  1.5  riastrad /*
    485  1.5  riastrad  * tasklet_is_scheduled(tasklet)
    486  1.5  riastrad  *
    487  1.5  riastrad  *	True if tasklet is currently locked.  Caller must use it only
    488  1.5  riastrad  *	for positive assertions.
    489  1.5  riastrad  */
    490  1.5  riastrad bool
    491  1.5  riastrad tasklet_is_locked(const struct tasklet_struct *tasklet)
    492  1.5  riastrad {
    493  1.5  riastrad 
    494  1.5  riastrad 	return atomic_load_relaxed(&tasklet->tl_state) & TASKLET_RUNNING;
    495  1.5  riastrad }
    496  1.5  riastrad 
    497  1.5  riastrad /*
    498  1.5  riastrad  * tasklet_trylock(tasklet)
    499  1.5  riastrad  *
    500  1.5  riastrad  *	Try to lock tasklet, i.e., set TASKLET_RUNNING.  Return true if
    501  1.5  riastrad  *	we locked it, false if already locked.
    502  1.5  riastrad  *
    503  1.5  riastrad  *	Load-acquire semantics.
    504  1.5  riastrad  */
    505  1.5  riastrad bool
    506  1.5  riastrad tasklet_trylock(struct tasklet_struct *tasklet)
    507  1.5  riastrad {
    508  1.5  riastrad 	unsigned state;
    509  1.5  riastrad 
    510  1.5  riastrad 	do {
    511  1.6  riastrad 		state = atomic_load_relaxed(&tasklet->tl_state);
    512  1.5  riastrad 		if (state & TASKLET_RUNNING)
    513  1.5  riastrad 			return false;
    514  1.5  riastrad 	} while (atomic_cas_uint(&tasklet->tl_state, state,
    515  1.5  riastrad 		state | TASKLET_RUNNING) != state);
    516  1.5  riastrad 
    517  1.6  riastrad 	/* Pairs with membar_exit in tasklet_unlock.  */
    518  1.6  riastrad #ifndef __HAVE_ATOMIC_AS_MEMBAR
    519  1.6  riastrad 	membar_enter();
    520  1.6  riastrad #endif
    521  1.6  riastrad 
    522  1.5  riastrad 	return true;
    523  1.5  riastrad }
    524  1.5  riastrad 
    525  1.5  riastrad /*
    526  1.5  riastrad  * tasklet_unlock(tasklet)
    527  1.5  riastrad  *
    528  1.5  riastrad  *	Unlock tasklet, i.e., clear TASKLET_RUNNING.
    529  1.5  riastrad  *
    530  1.5  riastrad  *	Store-release semantics.
    531  1.5  riastrad  */
    532  1.5  riastrad void
    533  1.5  riastrad tasklet_unlock(struct tasklet_struct *tasklet)
    534  1.5  riastrad {
    535  1.5  riastrad 
    536  1.5  riastrad 	KASSERT(atomic_load_relaxed(&tasklet->tl_state) & TASKLET_RUNNING);
    537  1.1  riastrad 
    538  1.1  riastrad 	/*
    539  1.6  riastrad 	 * Pairs with membar_enter in tasklet_trylock and with
    540  1.6  riastrad 	 * atomic_load_acquire in tasklet_unlock_wait.
    541  1.1  riastrad 	 */
    542  1.5  riastrad #ifndef __HAVE_ATOMIC_AS_MEMBAR
    543  1.5  riastrad 	membar_exit();
    544  1.5  riastrad #endif
    545  1.5  riastrad 	atomic_and_uint(&tasklet->tl_state, ~TASKLET_RUNNING);
    546  1.5  riastrad }
    547  1.5  riastrad 
    548  1.5  riastrad /*
    549  1.5  riastrad  * tasklet_unlock_wait(tasklet)
    550  1.5  riastrad  *
    551  1.5  riastrad  *	Busy-wait until tasklet is not running.
    552  1.5  riastrad  *
    553  1.5  riastrad  *	Load-acquire semantics.
    554  1.5  riastrad  */
    555  1.5  riastrad void
    556  1.5  riastrad tasklet_unlock_wait(const struct tasklet_struct *tasklet)
    557  1.5  riastrad {
    558  1.5  riastrad 
    559  1.5  riastrad 	/* Pairs with membar_exit in tasklet_unlock.  */
    560  1.5  riastrad 	while (atomic_load_acquire(&tasklet->tl_state) & TASKLET_RUNNING)
    561  1.5  riastrad 		SPINLOCK_BACKOFF_HOOK;
    562  1.1  riastrad }
    563  1.3  riastrad 
    564  1.3  riastrad /*
    565  1.5  riastrad  * BEGIN I915 HACKS
    566  1.5  riastrad  *
    567  1.5  riastrad  * The i915 driver abuses the tasklet abstraction like a cop abuses his
    568  1.5  riastrad  * wife.
    569  1.5  riastrad  */
    570  1.5  riastrad 
    571  1.5  riastrad /*
    572  1.5  riastrad  * __tasklet_disable_sync_once(tasklet)
    573  1.3  riastrad  *
    574  1.3  riastrad  *	Increment the disable count of tasklet, and if this is the
    575  1.3  riastrad  *	first time it was disabled and it was already running,
    576  1.3  riastrad  *	busy-wait for it to complete.
    577  1.3  riastrad  *
    578  1.3  riastrad  *	Caller must not care about whether the tasklet is running, or
    579  1.3  riastrad  *	about waiting for any side effects of the tasklet to complete,
    580  1.3  riastrad  *	if this was not the first time it was disabled.
    581  1.3  riastrad  */
    582  1.3  riastrad void
    583  1.5  riastrad __tasklet_disable_sync_once(struct tasklet_struct *tasklet)
    584  1.3  riastrad {
    585  1.3  riastrad 	unsigned int disablecount;
    586  1.3  riastrad 
    587  1.3  riastrad 	/* Increment the disable count.  */
    588  1.3  riastrad 	disablecount = atomic_inc_uint_nv(&tasklet->tl_disablecount);
    589  1.3  riastrad 	KASSERT(disablecount < UINT_MAX);
    590  1.3  riastrad 	KASSERT(disablecount != 0);
    591  1.3  riastrad 
    592  1.6  riastrad 	/* Pairs with membar_exit in __tasklet_enable_sync_once.  */
    593  1.6  riastrad #ifndef __HAVE_ATOMIC_AS_MEMBAR
    594  1.6  riastrad 	membar_enter();
    595  1.6  riastrad #endif
    596  1.6  riastrad 
    597  1.3  riastrad 	/*
    598  1.3  riastrad 	 * If it was zero, wait for it to finish running.  If it was
    599  1.3  riastrad 	 * not zero, caller must not care whether it was running.
    600  1.3  riastrad 	 */
    601  1.5  riastrad 	if (disablecount == 1)
    602  1.5  riastrad 		tasklet_unlock_wait(tasklet);
    603  1.3  riastrad }
    604  1.3  riastrad 
    605  1.3  riastrad /*
    606  1.5  riastrad  * __tasklet_enable_sync_once(tasklet)
    607  1.3  riastrad  *
    608  1.3  riastrad  *	Decrement the disable count of tasklet, and if it goes to zero,
    609  1.3  riastrad  *	kill tasklet.
    610  1.3  riastrad  */
    611  1.3  riastrad void
    612  1.5  riastrad __tasklet_enable_sync_once(struct tasklet_struct *tasklet)
    613  1.3  riastrad {
    614  1.3  riastrad 	unsigned int disablecount;
    615  1.3  riastrad 
    616  1.6  riastrad 	/* Pairs with membar_enter in __tasklet_disable_sync_once.  */
    617  1.6  riastrad #ifndef __HAVE_ATOMIC_AS_MEMBAR
    618  1.6  riastrad 	membar_exit();
    619  1.6  riastrad #endif
    620  1.6  riastrad 
    621  1.3  riastrad 	/* Decrement the disable count.  */
    622  1.3  riastrad 	disablecount = atomic_dec_uint_nv(&tasklet->tl_disablecount);
    623  1.3  riastrad 	KASSERT(disablecount < UINT_MAX);
    624  1.3  riastrad 
    625  1.3  riastrad 	/*
    626  1.3  riastrad 	 * If it became zero, kill the tasklet.  If it was not zero,
    627  1.3  riastrad 	 * caller must not care whether it was running.
    628  1.3  riastrad 	 */
    629  1.3  riastrad 	if (disablecount == 0)
    630  1.3  riastrad 		tasklet_kill(tasklet);
    631  1.3  riastrad }
    632  1.3  riastrad 
    633  1.3  riastrad /*
    634  1.5  riastrad  * __tasklet_is_enabled(tasklet)
    635  1.3  riastrad  *
    636  1.3  riastrad  *	True if tasklet is not currently disabled.  Answer may be stale
    637  1.3  riastrad  *	as soon as it is returned -- caller must use it only as a hint,
    638  1.3  riastrad  *	or must arrange synchronization externally.
    639  1.3  riastrad  */
    640  1.3  riastrad bool
    641  1.5  riastrad __tasklet_is_enabled(const struct tasklet_struct *tasklet)
    642  1.3  riastrad {
    643  1.3  riastrad 	unsigned int disablecount;
    644  1.3  riastrad 
    645  1.5  riastrad 	disablecount = atomic_load_relaxed(&tasklet->tl_disablecount);
    646  1.5  riastrad 
    647  1.5  riastrad 	return (disablecount == 0);
    648  1.5  riastrad }
    649  1.5  riastrad 
    650  1.5  riastrad /*
    651  1.5  riastrad  * __tasklet_is_scheduled(tasklet)
    652  1.5  riastrad  *
    653  1.5  riastrad  *	True if tasklet is currently scheduled.  Answer may be stale as
    654  1.5  riastrad  *	soon as it is returned -- caller must use it only as a hint, or
    655  1.5  riastrad  *	must arrange synchronization externally.
    656  1.5  riastrad  */
    657  1.5  riastrad bool
    658  1.5  riastrad __tasklet_is_scheduled(const struct tasklet_struct *tasklet)
    659  1.5  riastrad {
    660  1.5  riastrad 
    661  1.5  riastrad 	return atomic_load_relaxed(&tasklet->tl_state) & TASKLET_SCHEDULED;
    662  1.5  riastrad }
    663  1.5  riastrad 
    664  1.5  riastrad /*
    665  1.5  riastrad  * __tasklet_enable(tasklet)
    666  1.5  riastrad  *
    667  1.5  riastrad  *	Decrement tasklet's disable count.  If it was previously
    668  1.5  riastrad  *	scheduled to run, it may now run.  Return true if the disable
    669  1.5  riastrad  *	count went down to zero; otherwise return false.
    670  1.5  riastrad  *
    671  1.5  riastrad  *	Store-release semantics.
    672  1.5  riastrad  */
    673  1.5  riastrad bool
    674  1.5  riastrad __tasklet_enable(struct tasklet_struct *tasklet)
    675  1.5  riastrad {
    676  1.5  riastrad 	unsigned int disablecount;
    677  1.5  riastrad 
    678  1.5  riastrad 	/*
    679  1.5  riastrad 	 * Guarantee all caller-relevant reads or writes have completed
    680  1.5  riastrad 	 * before potentially allowing tasklet to run again by
    681  1.5  riastrad 	 * decrementing the disable count.
    682  1.5  riastrad 	 *
    683  1.6  riastrad 	 * Pairs with atomic_load_acquire in tasklet_softintr and with
    684  1.6  riastrad 	 * membar_enter in tasklet_disable.
    685  1.5  riastrad 	 */
    686  1.5  riastrad #ifndef __HAVE_ATOMIC_AS_MEMBAR
    687  1.5  riastrad 	membar_exit();
    688  1.5  riastrad #endif
    689  1.5  riastrad 
    690  1.5  riastrad 	/* Decrement the disable count.  */
    691  1.5  riastrad 	disablecount = atomic_dec_uint_nv(&tasklet->tl_disablecount);
    692  1.5  riastrad 	KASSERT(disablecount != UINT_MAX);
    693  1.3  riastrad 
    694  1.3  riastrad 	return (disablecount == 0);
    695  1.3  riastrad }
    696