Home | History | Annotate | Line # | Download | only in kern
kern_softint.c revision 1.31.4.2
      1 /*	$NetBSD: kern_softint.c,v 1.31.4.2 2011/04/21 01:42:08 rmind Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2007, 2008 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Generic software interrupt framework.
     34  *
     35  * Overview
     36  *
     37  *	The soft interrupt framework provides a mechanism to schedule a
     38  *	low priority callback that runs with thread context.  It allows
     39  *	for dynamic registration of software interrupts, and for fair
     40  *	queueing and prioritization of those interrupts.  The callbacks
     41  *	can be scheduled to run from nearly any point in the kernel: by
     42  *	code running with thread context, by code running from a
     43  *	hardware interrupt handler, and at any interrupt priority
     44  *	level.
     45  *
     46  * Priority levels
     47  *
     48  *	Since soft interrupt dispatch can be tied to the underlying
     49  *	architecture's interrupt dispatch code, it can be limited
     50  *	both by the capabilities of the hardware and the capabilities
     51  *	of the interrupt dispatch code itself.  The number of priority
     52  *	levels is restricted to four.  In order of priority (lowest to
     53  *	highest) the levels are: clock, bio, net, serial.
     54  *
     55  *	The names are symbolic and in isolation do not have any direct
     56  *	connection with a particular kind of device activity: they are
     57  *	only meant as a guide.
     58  *
     59  *	The four priority levels map directly to scheduler priority
     60  *	levels, and where the architecture implements 'fast' software
     61  *	interrupts, they also map onto interrupt priorities.  The
     62  *	interrupt priorities are intended to be hidden from machine
     63  *	independent code, which should use thread-safe mechanisms to
     64  *	synchronize with software interrupts (for example: mutexes).
     65  *
     66  * Capabilities
     67  *
     68  *	Software interrupts run with limited machine context.  In
     69  *	particular, they do not posess any address space context.  They
     70  *	should not try to operate on user space addresses, or to use
     71  *	virtual memory facilities other than those noted as interrupt
     72  *	safe.
     73  *
     74  *	Unlike hardware interrupts, software interrupts do have thread
     75  *	context.  They may block on synchronization objects, sleep, and
     76  *	resume execution at a later time.
     77  *
     78  *	Since software interrupts are a limited resource and run with
     79  *	higher priority than most other LWPs in the system, all
     80  *	block-and-resume activity by a software interrupt must be kept
     81  *	short to allow futher processing at that level to continue.  By
     82  *	extension, code running with process context must take care to
     83  *	ensure that any lock that may be taken from a software interrupt
     84  *	can not be held for more than a short period of time.
     85  *
     86  *	The kernel does not allow software interrupts to use facilities
     87  *	or perform actions that may block for a significant amount of
     88  *	time.  This means that it's not valid for a software interrupt
     89  *	to sleep on condition variables	or wait for resources to become
     90  *	available (for example,	memory).
     91  *
     92  * Per-CPU operation
     93  *
     94  *	If a soft interrupt is triggered on a CPU, it can only be
     95  *	dispatched on the same CPU.  Each LWP dedicated to handling a
     96  *	soft interrupt is bound to its home CPU, so if the LWP blocks
     97  *	and needs to run again, it can only run there.  Nearly all data
     98  *	structures used to manage software interrupts are per-CPU.
     99  *
    100  *	The per-CPU requirement is intended to reduce "ping-pong" of
    101  *	cache lines between CPUs: lines occupied by data structures
    102  *	used to manage the soft interrupts, and lines occupied by data
    103  *	items being passed down to the soft interrupt.  As a positive
    104  *	side effect, this also means that the soft interrupt dispatch
    105  *	code does not need to to use spinlocks to synchronize.
    106  *
    107  * Generic implementation
    108  *
    109  *	A generic, low performance implementation is provided that
    110  *	works across all architectures, with no machine-dependent
    111  *	modifications needed.  This implementation uses the scheduler,
    112  *	and so has a number of restrictions:
    113  *
    114  *	1) The software interrupts are not currently preemptive, so
    115  *	must wait for the currently executing LWP to yield the CPU.
    116  *	This can introduce latency.
    117  *
    118  *	2) An expensive context switch is required for a software
    119  *	interrupt to be handled.
    120  *
    121  * 'Fast' software interrupts
    122  *
    123  *	If an architectures defines __HAVE_FAST_SOFTINTS, it implements
    124  *	the fast mechanism.  Threads running either in the kernel or in
    125  *	userspace will be interrupted, but will not be preempted.  When
    126  *	the soft interrupt completes execution, the interrupted LWP
    127  *	is resumed.  Interrupt dispatch code must provide the minimum
    128  *	level of context necessary for the soft interrupt to block and
    129  *	be resumed at a later time.  The machine-dependent dispatch
    130  *	path looks something like the following:
    131  *
    132  *	softintr()
    133  *	{
    134  *		go to IPL_HIGH if necessary for switch;
    135  *		save any necessary registers in a format that can be
    136  *		    restored by cpu_switchto if the softint blocks;
    137  *		arrange for cpu_switchto() to restore into the
    138  *		    trampoline function;
    139  *		identify LWP to handle this interrupt;
    140  *		switch to the LWP's stack;
    141  *		switch register stacks, if necessary;
    142  *		assign new value of curlwp;
    143  *		call MI softint_dispatch, passing old curlwp and IPL
    144  *		    to execute interrupt at;
    145  *		switch back to old stack;
    146  *		switch back to old register stack, if necessary;
    147  *		restore curlwp;
    148  *		return to interrupted LWP;
    149  *	}
    150  *
    151  *	If the soft interrupt blocks, a trampoline function is returned
    152  *	to in the context of the interrupted LWP, as arranged for by
    153  *	softint():
    154  *
    155  *	softint_ret()
    156  *	{
    157  *		unlock soft interrupt LWP;
    158  *		resume interrupt processing, likely returning to
    159  *		    interrupted LWP or dispatching another, different
    160  *		    interrupt;
    161  *	}
    162  *
    163  *	Once the soft interrupt has fired (and even if it has blocked),
    164  *	no further soft interrupts at that level will be triggered by
    165  *	MI code until the soft interrupt handler has ceased execution.
    166  *	If a soft interrupt handler blocks and is resumed, it resumes
    167  *	execution as a normal LWP (kthread) and gains VM context.  Only
    168  *	when it has completed and is ready to fire again will it
    169  *	interrupt other threads.
    170  *
    171  * Future directions
    172  *
    173  *	Provide a cheap way to direct software interrupts to remote
    174  *	CPUs.  Provide a way to enqueue work items into the handler
    175  *	record,	removing additional spl calls (see subr_workqueue.c).
    176  */
    177 
    178 #include <sys/cdefs.h>
    179 __KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.31.4.2 2011/04/21 01:42:08 rmind Exp $");
    180 
    181 #include <sys/param.h>
    182 #include <sys/malloc.h>
    183 #include <sys/proc.h>
    184 #include <sys/intr.h>
    185 #include <sys/mutex.h>
    186 #include <sys/kthread.h>
    187 #include <sys/evcnt.h>
    188 #include <sys/cpu.h>
    189 #include <sys/xcall.h>
    190 
    191 #include <net/netisr.h>
    192 
    193 #include <uvm/uvm_extern.h>
    194 
    195 /* This could overlap with signal info in struct lwp. */
    196 typedef struct softint {
    197 	SIMPLEQ_HEAD(, softhand) si_q;
    198 	struct lwp		*si_lwp;
    199 	struct cpu_info		*si_cpu;
    200 	uintptr_t		si_machdep;
    201 	struct evcnt		si_evcnt;
    202 	struct evcnt		si_evcnt_block;
    203 	int			si_active;
    204 	char			si_name[8];
    205 	char			si_name_block[8+6];
    206 } softint_t;
    207 
    208 typedef struct softhand {
    209 	SIMPLEQ_ENTRY(softhand)	sh_q;
    210 	void			(*sh_func)(void *);
    211 	void			*sh_arg;
    212 	softint_t		*sh_isr;
    213 	u_int			sh_flags;
    214 } softhand_t;
    215 
    216 typedef struct softcpu {
    217 	struct cpu_info		*sc_cpu;
    218 	softint_t		sc_int[SOFTINT_COUNT];
    219 	softhand_t		sc_hand[1];
    220 } softcpu_t;
    221 
    222 static void	softint_thread(void *);
    223 
    224 u_int		softint_bytes = 8192;
    225 u_int		softint_timing;
    226 static u_int	softint_max;
    227 static kmutex_t	softint_lock;
    228 static void	*softint_netisrs[NETISR_MAX];
    229 
    230 /*
    231  * softint_init_isr:
    232  *
    233  *	Initialize a single interrupt level for a single CPU.
    234  */
    235 static void
    236 softint_init_isr(softcpu_t *sc, const char *desc, pri_t pri, u_int level)
    237 {
    238 	struct cpu_info *ci;
    239 	softint_t *si;
    240 	int error;
    241 
    242 	si = &sc->sc_int[level];
    243 	ci = sc->sc_cpu;
    244 	si->si_cpu = ci;
    245 
    246 	SIMPLEQ_INIT(&si->si_q);
    247 
    248 	error = kthread_create(pri, KTHREAD_MPSAFE | KTHREAD_INTR |
    249 	    KTHREAD_IDLE, ci, softint_thread, si, &si->si_lwp,
    250 	    "soft%s/%u", desc, ci->ci_index);
    251 	if (error != 0)
    252 		panic("softint_init_isr: error %d", error);
    253 
    254 	snprintf(si->si_name, sizeof(si->si_name), "%s/%u", desc,
    255 	    ci->ci_index);
    256 	evcnt_attach_dynamic(&si->si_evcnt, EVCNT_TYPE_MISC, NULL,
    257 	   "softint", si->si_name);
    258 	snprintf(si->si_name_block, sizeof(si->si_name_block), "%s block/%u",
    259 	    desc, ci->ci_index);
    260 	evcnt_attach_dynamic(&si->si_evcnt_block, EVCNT_TYPE_MISC, NULL,
    261 	   "softint", si->si_name_block);
    262 
    263 	si->si_lwp->l_private = si;
    264 	softint_init_md(si->si_lwp, level, &si->si_machdep);
    265 }
    266 /*
    267  * softint_init:
    268  *
    269  *	Initialize per-CPU data structures.  Called from mi_cpu_attach().
    270  */
    271 void
    272 softint_init(struct cpu_info *ci)
    273 {
    274 	static struct cpu_info *first;
    275 	softcpu_t *sc, *scfirst;
    276 	softhand_t *sh, *shmax;
    277 
    278 	if (first == NULL) {
    279 		/* Boot CPU. */
    280 		first = ci;
    281 		mutex_init(&softint_lock, MUTEX_DEFAULT, IPL_NONE);
    282 		softint_bytes = round_page(softint_bytes);
    283 		softint_max = (softint_bytes - sizeof(softcpu_t)) /
    284 		    sizeof(softhand_t);
    285 	}
    286 
    287 	sc = (softcpu_t *)uvm_km_alloc(kernel_map, softint_bytes, 0,
    288 	    UVM_KMF_WIRED | UVM_KMF_ZERO);
    289 	if (sc == NULL)
    290 		panic("softint_init_cpu: cannot allocate memory");
    291 
    292 	ci->ci_data.cpu_softcpu = sc;
    293 	ci->ci_data.cpu_softints = 0;
    294 	sc->sc_cpu = ci;
    295 
    296 	softint_init_isr(sc, "net", PRI_SOFTNET, SOFTINT_NET);
    297 	softint_init_isr(sc, "bio", PRI_SOFTBIO, SOFTINT_BIO);
    298 	softint_init_isr(sc, "clk", PRI_SOFTCLOCK, SOFTINT_CLOCK);
    299 	softint_init_isr(sc, "ser", PRI_SOFTSERIAL, SOFTINT_SERIAL);
    300 
    301 	if (first != ci) {
    302 		mutex_enter(&softint_lock);
    303 		scfirst = first->ci_data.cpu_softcpu;
    304 		sh = sc->sc_hand;
    305 		memcpy(sh, scfirst->sc_hand, sizeof(*sh) * softint_max);
    306 		/* Update pointers for this CPU. */
    307 		for (shmax = sh + softint_max; sh < shmax; sh++) {
    308 			if (sh->sh_func == NULL)
    309 				continue;
    310 			sh->sh_isr =
    311 			    &sc->sc_int[sh->sh_flags & SOFTINT_LVLMASK];
    312 		}
    313 		mutex_exit(&softint_lock);
    314 	} else {
    315 		/*
    316 		 * Establish handlers for legacy net interrupts.
    317 		 * XXX Needs to go away.
    318 		 */
    319 #define DONETISR(n, f)							\
    320     softint_netisrs[(n)] = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE,\
    321         (void (*)(void *))(f), NULL)
    322 #include <net/netisr_dispatch.h>
    323 	}
    324 }
    325 
    326 /*
    327  * softint_establish:
    328  *
    329  *	Register a software interrupt handler.
    330  */
    331 void *
    332 softint_establish(u_int flags, void (*func)(void *), void *arg)
    333 {
    334 	CPU_INFO_ITERATOR cii;
    335 	struct cpu_info *ci;
    336 	softcpu_t *sc;
    337 	softhand_t *sh;
    338 	u_int level, index;
    339 
    340 	level = (flags & SOFTINT_LVLMASK);
    341 	KASSERT(level < SOFTINT_COUNT);
    342 	KASSERT((flags & SOFTINT_IMPMASK) == 0);
    343 
    344 	mutex_enter(&softint_lock);
    345 
    346 	/* Find a free slot. */
    347 	sc = curcpu()->ci_data.cpu_softcpu;
    348 	for (index = 1; index < softint_max; index++) {
    349 		if (sc->sc_hand[index].sh_func == NULL)
    350 			break;
    351 	}
    352 	if (index == softint_max) {
    353 		mutex_exit(&softint_lock);
    354 		printf("WARNING: softint_establish: table full, "
    355 		    "increase softint_bytes\n");
    356 		return NULL;
    357 	}
    358 
    359 	/* Set up the handler on each CPU. */
    360 	if (ncpu < 2) {
    361 		/* XXX hack for machines with no CPU_INFO_FOREACH() early on */
    362 		sc = curcpu()->ci_data.cpu_softcpu;
    363 		sh = &sc->sc_hand[index];
    364 		sh->sh_isr = &sc->sc_int[level];
    365 		sh->sh_func = func;
    366 		sh->sh_arg = arg;
    367 		sh->sh_flags = flags;
    368 	} else for (CPU_INFO_FOREACH(cii, ci)) {
    369 		sc = ci->ci_data.cpu_softcpu;
    370 		sh = &sc->sc_hand[index];
    371 		sh->sh_isr = &sc->sc_int[level];
    372 		sh->sh_func = func;
    373 		sh->sh_arg = arg;
    374 		sh->sh_flags = flags;
    375 	}
    376 
    377 	mutex_exit(&softint_lock);
    378 
    379 	return (void *)((uint8_t *)&sc->sc_hand[index] - (uint8_t *)sc);
    380 }
    381 
    382 /*
    383  * softint_disestablish:
    384  *
    385  *	Unregister a software interrupt handler.  The soft interrupt could
    386  *	still be active at this point, but the caller commits not to try
    387  *	and trigger it again once this call is made.  The caller must not
    388  *	hold any locks that could be taken from soft interrupt context,
    389  *	because we will wait for the softint to complete if it's still
    390  *	running.
    391  */
    392 void
    393 softint_disestablish(void *arg)
    394 {
    395 	CPU_INFO_ITERATOR cii;
    396 	struct cpu_info *ci;
    397 	softcpu_t *sc;
    398 	softhand_t *sh;
    399 	uintptr_t offset;
    400 	uint64_t where;
    401 	u_int flags;
    402 
    403 	offset = (uintptr_t)arg;
    404 	KASSERT(offset != 0 && offset < softint_bytes);
    405 
    406 	/*
    407 	 * Run a cross call so we see up to date values of sh_flags from
    408 	 * all CPUs.  Once softint_disestablish() is called, the caller
    409 	 * commits to not trigger the interrupt and set SOFTINT_ACTIVE on
    410 	 * it again.  So, we are only looking for handler records with
    411 	 * SOFTINT_ACTIVE already set.
    412 	 */
    413 	where = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL);
    414 	xc_wait(where);
    415 
    416 	for (;;) {
    417 		/* Collect flag values from each CPU. */
    418 		flags = 0;
    419 		for (CPU_INFO_FOREACH(cii, ci)) {
    420 			sc = ci->ci_data.cpu_softcpu;
    421 			sh = (softhand_t *)((uint8_t *)sc + offset);
    422 			KASSERT(sh->sh_func != NULL);
    423 			flags |= sh->sh_flags;
    424 		}
    425 		/* Inactive on all CPUs? */
    426 		if ((flags & SOFTINT_ACTIVE) == 0) {
    427 			break;
    428 		}
    429 		/* Oops, still active.  Wait for it to clear. */
    430 		(void)kpause("softdis", false, 1, NULL);
    431 	}
    432 
    433 	/* Clear the handler on each CPU. */
    434 	mutex_enter(&softint_lock);
    435 	for (CPU_INFO_FOREACH(cii, ci)) {
    436 		sc = ci->ci_data.cpu_softcpu;
    437 		sh = (softhand_t *)((uint8_t *)sc + offset);
    438 		KASSERT(sh->sh_func != NULL);
    439 		sh->sh_func = NULL;
    440 	}
    441 	mutex_exit(&softint_lock);
    442 }
    443 
    444 /*
    445  * softint_schedule:
    446  *
    447  *	Trigger a software interrupt.  Must be called from a hardware
    448  *	interrupt handler, or with preemption disabled (since we are
    449  *	using the value of curcpu()).
    450  */
    451 void
    452 softint_schedule(void *arg)
    453 {
    454 	softhand_t *sh;
    455 	softint_t *si;
    456 	uintptr_t offset;
    457 	int s;
    458 
    459 	KASSERT(kpreempt_disabled());
    460 
    461 	/* Find the handler record for this CPU. */
    462 	offset = (uintptr_t)arg;
    463 	KASSERT(offset != 0 && offset < softint_bytes);
    464 	sh = (softhand_t *)((uint8_t *)curcpu()->ci_data.cpu_softcpu + offset);
    465 
    466 	/* If it's already pending there's nothing to do. */
    467 	if ((sh->sh_flags & SOFTINT_PENDING) != 0) {
    468 		return;
    469 	}
    470 
    471 	/*
    472 	 * Enqueue the handler into the LWP's pending list.
    473 	 * If the LWP is completely idle, then make it run.
    474 	 */
    475 	s = splhigh();
    476 	if ((sh->sh_flags & SOFTINT_PENDING) == 0) {
    477 		si = sh->sh_isr;
    478 		sh->sh_flags |= SOFTINT_PENDING;
    479 		SIMPLEQ_INSERT_TAIL(&si->si_q, sh, sh_q);
    480 		if (si->si_active == 0) {
    481 			si->si_active = 1;
    482 			softint_trigger(si->si_machdep);
    483 		}
    484 	}
    485 	splx(s);
    486 }
    487 
    488 /*
    489  * softint_execute:
    490  *
    491  *	Invoke handlers for the specified soft interrupt.
    492  *	Must be entered at splhigh.  Will drop the priority
    493  *	to the level specified, but returns back at splhigh.
    494  */
    495 static inline void
    496 softint_execute(softint_t *si, lwp_t *l, int s)
    497 {
    498 	softhand_t *sh;
    499 	bool havelock;
    500 
    501 #ifdef __HAVE_FAST_SOFTINTS
    502 	KASSERT(si->si_lwp == curlwp);
    503 #else
    504 	/* May be running in user context. */
    505 #endif
    506 	KASSERT(si->si_cpu == curcpu());
    507 	KASSERT(si->si_lwp->l_wchan == NULL);
    508 	KASSERT(si->si_active);
    509 
    510 	havelock = false;
    511 
    512 	/*
    513 	 * Note: due to priority inheritance we may have interrupted a
    514 	 * higher priority LWP.  Since the soft interrupt must be quick
    515 	 * and is non-preemptable, we don't bother yielding.
    516 	 */
    517 
    518 	while (!SIMPLEQ_EMPTY(&si->si_q)) {
    519 		/*
    520 		 * Pick the longest waiting handler to run.  We block
    521 		 * interrupts but do not lock in order to do this, as
    522 		 * we are protecting against the local CPU only.
    523 		 */
    524 		sh = SIMPLEQ_FIRST(&si->si_q);
    525 		SIMPLEQ_REMOVE_HEAD(&si->si_q, sh_q);
    526 		KASSERT((sh->sh_flags & SOFTINT_PENDING) != 0);
    527 		KASSERT((sh->sh_flags & SOFTINT_ACTIVE) == 0);
    528 		sh->sh_flags ^= (SOFTINT_PENDING | SOFTINT_ACTIVE);
    529 		splx(s);
    530 
    531 		/* Run the handler. */
    532 		if (sh->sh_flags & SOFTINT_MPSAFE) {
    533 			if (havelock) {
    534 				KERNEL_UNLOCK_ONE(l);
    535 				havelock = false;
    536 			}
    537 		} else if (!havelock) {
    538 			KERNEL_LOCK(1, l);
    539 			havelock = true;
    540 		}
    541 		(*sh->sh_func)(sh->sh_arg);
    542 
    543 		/* Diagnostic: check that spin-locks have not leaked. */
    544 		KASSERTMSG(curcpu()->ci_mtx_count == 0,
    545 		    ("%s: ci_mtx_count (%d) != 0, sh_func %p\n",
    546 		    __func__, curcpu()->ci_mtx_count, sh->sh_func));
    547 
    548 		(void)splhigh();
    549 		KASSERT((sh->sh_flags & SOFTINT_ACTIVE) != 0);
    550 		sh->sh_flags ^= SOFTINT_ACTIVE;
    551 	}
    552 
    553 	if (havelock) {
    554 		KERNEL_UNLOCK_ONE(l);
    555 	}
    556 
    557 	/*
    558 	 * Unlocked, but only for statistics.
    559 	 * Should be per-CPU to prevent cache ping-pong.
    560 	 */
    561 	curcpu()->ci_data.cpu_nsoft++;
    562 
    563 	KASSERT(si->si_cpu == curcpu());
    564 	KASSERT(si->si_lwp->l_wchan == NULL);
    565 	KASSERT(si->si_active);
    566 	si->si_evcnt.ev_count++;
    567 	si->si_active = 0;
    568 }
    569 
    570 /*
    571  * softint_block:
    572  *
    573  *	Update statistics when the soft interrupt blocks.
    574  */
    575 void
    576 softint_block(lwp_t *l)
    577 {
    578 	softint_t *si = l->l_private;
    579 
    580 	KASSERT((l->l_pflag & LP_INTR) != 0);
    581 	si->si_evcnt_block.ev_count++;
    582 }
    583 
    584 /*
    585  * schednetisr:
    586  *
    587  *	Trigger a legacy network interrupt.  XXX Needs to go away.
    588  */
    589 void
    590 schednetisr(int isr)
    591 {
    592 
    593 	softint_schedule(softint_netisrs[isr]);
    594 }
    595 
    596 #ifndef __HAVE_FAST_SOFTINTS
    597 
    598 #ifdef __HAVE_PREEMPTION
    599 #error __HAVE_PREEMPTION requires __HAVE_FAST_SOFTINTS
    600 #endif
    601 
    602 /*
    603  * softint_init_md:
    604  *
    605  *	Slow path: perform machine-dependent initialization.
    606  */
    607 void
    608 softint_init_md(lwp_t *l, u_int level, uintptr_t *machdep)
    609 {
    610 	softint_t *si;
    611 
    612 	*machdep = (1 << level);
    613 	si = l->l_private;
    614 
    615 	lwp_lock(l);
    616 	lwp_unlock_to(l, l->l_cpu->ci_schedstate.spc_mutex);
    617 	lwp_lock(l);
    618 	/* Cheat and make the KASSERT in softint_thread() happy. */
    619 	si->si_active = 1;
    620 	l->l_stat = LSRUN;
    621 	sched_enqueue(l, false);
    622 	lwp_unlock(l);
    623 }
    624 
    625 /*
    626  * softint_trigger:
    627  *
    628  *	Slow path: cause a soft interrupt handler to begin executing.
    629  *	Called at IPL_HIGH.
    630  */
    631 void
    632 softint_trigger(uintptr_t machdep)
    633 {
    634 	struct cpu_info *ci;
    635 	lwp_t *l;
    636 
    637 	l = curlwp;
    638 	ci = l->l_cpu;
    639 	ci->ci_data.cpu_softints |= machdep;
    640 	if (l == ci->ci_data.cpu_idlelwp) {
    641 		cpu_need_resched(ci, 0);
    642 	} else {
    643 		/* MI equivalent of aston() */
    644 		cpu_signotify(l);
    645 	}
    646 }
    647 
    648 /*
    649  * softint_thread:
    650  *
    651  *	Slow path: MI software interrupt dispatch.
    652  */
    653 void
    654 softint_thread(void *cookie)
    655 {
    656 	softint_t *si;
    657 	lwp_t *l;
    658 	int s;
    659 
    660 	l = curlwp;
    661 	si = l->l_private;
    662 
    663 	for (;;) {
    664 		/*
    665 		 * Clear pending status and run it.  We must drop the
    666 		 * spl before mi_switch(), since IPL_HIGH may be higher
    667 		 * than IPL_SCHED (and it is not safe to switch at a
    668 		 * higher level).
    669 		 */
    670 		s = splhigh();
    671 		l->l_cpu->ci_data.cpu_softints &= ~si->si_machdep;
    672 		softint_execute(si, l, s);
    673 		splx(s);
    674 
    675 		lwp_lock(l);
    676 		l->l_stat = LSIDL;
    677 		mi_switch(l);
    678 	}
    679 }
    680 
    681 /*
    682  * softint_picklwp:
    683  *
    684  *	Slow path: called from mi_switch() to pick the highest priority
    685  *	soft interrupt LWP that needs to run.
    686  */
    687 lwp_t *
    688 softint_picklwp(void)
    689 {
    690 	struct cpu_info *ci;
    691 	u_int mask;
    692 	softint_t *si;
    693 	lwp_t *l;
    694 
    695 	ci = curcpu();
    696 	si = ((softcpu_t *)ci->ci_data.cpu_softcpu)->sc_int;
    697 	mask = ci->ci_data.cpu_softints;
    698 
    699 	if ((mask & (1 << SOFTINT_SERIAL)) != 0) {
    700 		l = si[SOFTINT_SERIAL].si_lwp;
    701 	} else if ((mask & (1 << SOFTINT_NET)) != 0) {
    702 		l = si[SOFTINT_NET].si_lwp;
    703 	} else if ((mask & (1 << SOFTINT_BIO)) != 0) {
    704 		l = si[SOFTINT_BIO].si_lwp;
    705 	} else if ((mask & (1 << SOFTINT_CLOCK)) != 0) {
    706 		l = si[SOFTINT_CLOCK].si_lwp;
    707 	} else {
    708 		panic("softint_picklwp");
    709 	}
    710 
    711 	return l;
    712 }
    713 
    714 /*
    715  * softint_overlay:
    716  *
    717  *	Slow path: called from lwp_userret() to run a soft interrupt
    718  *	within the context of a user thread.
    719  */
    720 void
    721 softint_overlay(void)
    722 {
    723 	struct cpu_info *ci;
    724 	u_int softints, oflag;
    725 	softint_t *si;
    726 	pri_t obase;
    727 	lwp_t *l;
    728 	int s;
    729 
    730 	l = curlwp;
    731 	KASSERT((l->l_pflag & LP_INTR) == 0);
    732 
    733 	/*
    734 	 * Arrange to elevate priority if the LWP blocks.  Also, bind LWP
    735 	 * to the CPU.  Note: disable kernel preemption before doing that.
    736 	 */
    737 	s = splhigh();
    738 	ci = l->l_cpu;
    739 	si = ((softcpu_t *)ci->ci_data.cpu_softcpu)->sc_int;
    740 
    741 	obase = l->l_kpribase;
    742 	l->l_kpribase = PRI_KERNEL_RT;
    743 	oflag = l->l_pflag;
    744 	l->l_pflag = oflag | LP_INTR | LP_BOUND;
    745 
    746 	while ((softints = ci->ci_data.cpu_softints) != 0) {
    747 		if ((softints & (1 << SOFTINT_SERIAL)) != 0) {
    748 			ci->ci_data.cpu_softints &= ~(1 << SOFTINT_SERIAL);
    749 			softint_execute(&si[SOFTINT_SERIAL], l, s);
    750 			continue;
    751 		}
    752 		if ((softints & (1 << SOFTINT_NET)) != 0) {
    753 			ci->ci_data.cpu_softints &= ~(1 << SOFTINT_NET);
    754 			softint_execute(&si[SOFTINT_NET], l, s);
    755 			continue;
    756 		}
    757 		if ((softints & (1 << SOFTINT_BIO)) != 0) {
    758 			ci->ci_data.cpu_softints &= ~(1 << SOFTINT_BIO);
    759 			softint_execute(&si[SOFTINT_BIO], l, s);
    760 			continue;
    761 		}
    762 		if ((softints & (1 << SOFTINT_CLOCK)) != 0) {
    763 			ci->ci_data.cpu_softints &= ~(1 << SOFTINT_CLOCK);
    764 			softint_execute(&si[SOFTINT_CLOCK], l, s);
    765 			continue;
    766 		}
    767 	}
    768 	l->l_pflag = oflag;
    769 	l->l_kpribase = obase;
    770 	splx(s);
    771 }
    772 
    773 #else	/*  !__HAVE_FAST_SOFTINTS */
    774 
    775 /*
    776  * softint_thread:
    777  *
    778  *	Fast path: the LWP is switched to without restoring any state,
    779  *	so we should not arrive here - there is a direct handoff between
    780  *	the interrupt stub and softint_dispatch().
    781  */
    782 void
    783 softint_thread(void *cookie)
    784 {
    785 
    786 	panic("softint_thread");
    787 }
    788 
    789 /*
    790  * softint_dispatch:
    791  *
    792  *	Fast path: entry point from machine-dependent code.
    793  */
    794 void
    795 softint_dispatch(lwp_t *pinned, int s)
    796 {
    797 	struct bintime now;
    798 	softint_t *si;
    799 	u_int timing;
    800 	lwp_t *l;
    801 
    802 	KASSERT((pinned->l_pflag & LP_RUNNING) != 0);
    803 	l = curlwp;
    804 	si = l->l_private;
    805 
    806 	/*
    807 	 * Note the interrupted LWP, and mark the current LWP as running
    808 	 * before proceeding.  Although this must as a rule be done with
    809 	 * the LWP locked, at this point no external agents will want to
    810 	 * modify the interrupt LWP's state.
    811 	 */
    812 	timing = (softint_timing ? LP_TIMEINTR : 0);
    813 	l->l_switchto = pinned;
    814 	l->l_stat = LSONPROC;
    815 	l->l_pflag |= (LP_RUNNING | timing);
    816 
    817 	/*
    818 	 * Dispatch the interrupt.  If softints are being timed, charge
    819 	 * for it.
    820 	 */
    821 	if (timing)
    822 		binuptime(&l->l_stime);
    823 	softint_execute(si, l, s);
    824 	if (timing) {
    825 		binuptime(&now);
    826 		updatertime(l, &now);
    827 		l->l_pflag &= ~LP_TIMEINTR;
    828 	}
    829 
    830 	/*
    831 	 * If we blocked while handling the interrupt, the pinned LWP is
    832 	 * gone so switch to the idle LWP.  It will select a new LWP to
    833 	 * run.
    834 	 *
    835 	 * We must drop the priority level as switching at IPL_HIGH could
    836 	 * deadlock the system.  We have already set si->si_active = 0,
    837 	 * which means another interrupt at this level can be triggered.
    838 	 * That's not be a problem: we are lowering to level 's' which will
    839 	 * prevent softint_dispatch() from being reentered at level 's',
    840 	 * until the priority is finally dropped to IPL_NONE on entry to
    841 	 * the LWP chosen by lwp_exit_switchaway().
    842 	 */
    843 	l->l_stat = LSIDL;
    844 	if (l->l_switchto == NULL) {
    845 		splx(s);
    846 		pmap_deactivate(l);
    847 		lwp_exit_switchaway(l);
    848 		/* NOTREACHED */
    849 	}
    850 	l->l_switchto = NULL;
    851 	l->l_pflag &= ~LP_RUNNING;
    852 }
    853 
    854 #endif	/* !__HAVE_FAST_SOFTINTS */
    855