Home | History | Annotate | Line # | Download | only in kern
kern_softint.c revision 1.1.2.16
      1 /*	$NetBSD: kern_softint.c,v 1.1.2.16 2007/10/09 13:44:28 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by the NetBSD
     21  *	Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * Stub for code to be merged from the vmlocking CVS branch.
     41  */
     42 
     43 #include <sys/cdefs.h>
     44 __KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.1.2.16 2007/10/09 13:44:28 ad Exp $");
     45 
     46 #include <sys/param.h>
     47 #include <sys/intr.h>
     48 
     49 u_int	softint_timing;
     50 
     51 /*
     52  * softint_init:
     53  *
     54  *	Initialize per-CPU data structures.  Called from mi_cpu_attach().
     55  */
     56 void
     57 softint_init(struct cpu_info *ci)
     58 {
     59 
     60 	/* nothing yet */
     61 }
     62 
     63 /*
     64  * softint_establish:
     65  *
     66  *	Register a software interrupt handler.
     67  */
     68 void *
     69 softint_establish(u_int flags, void (*func)(void *), void *arg)
     70 {
     71 	u_int level;
     72 
     73 	level = (flags & SOFTINT_LVLMASK);
     74 	KASSERT(level < SOFTINT_COUNT);
     75 
     76 	switch (level) {
     77 	case SOFTINT_CLOCK:
     78 		level = IPL_SOFTCLOCK;
     79 		break;
     80 	case SOFTINT_NET:
     81 	case SOFTINT_BIO:
     82 		level = IPL_SOFTNET;
     83 		break;
     84 	case SOFTINT_SERIAL:
     85 #ifdef IPL_SOFTSERIAL
     86 		level = IPL_SOFTSERIAL;
     87 #else
     88 		level = IPL_SOFTNET;
     89 #endif
     90 		break;
     91 	default:
     92 		panic("softint_establish");
     93 	}
     94 
     95 	return softintr_establish(level, func, arg);
     96 }
     97 
     98 /*
     99  * softint_disestablish:
    100  *
    101  *	Unregister a software interrupt handler.
    102  */
    103 void
    104 softint_disestablish(void *arg)
    105 {
    106 
    107 	softintr_disestablish(arg);
    108 }
    109 
    110 /*
    111  * softint_schedule:
    112  *
    113  *	Trigger a software interrupt.  Must be called from a hardware
    114  *	interrupt handler, or with preemption disabled (since we are
    115  *	using the value of curcpu()).
    116  */
    117 void
    118 softint_schedule(void *arg)
    119 {
    120 
    121 	softintr_schedule(arg);
    122 }
    123 
    124 /*
    125  * softint_block:
    126  *
    127  *	Update statistics when the soft interrupt blocks.
    128  */
    129 void
    130 softint_block(lwp_t *l)
    131 {
    132 
    133 	/* nothing yet */
    134 }
    135 /*	$NetBSD: kern_softint.c,v 1.1.2.16 2007/10/09 13:44:28 ad Exp $	*/
    136 
    137 /*-
    138  * Copyright (c) 2007 The NetBSD Foundation, Inc.
    139  * All rights reserved.
    140  *
    141  * This code is derived from software contributed to The NetBSD Foundation
    142  * by Andrew Doran.
    143  *
    144  * Redistribution and use in source and binary forms, with or without
    145  * modification, are permitted provided that the following conditions
    146  * are met:
    147  * 1. Redistributions of source code must retain the above copyright
    148  *    notice, this list of conditions and the following disclaimer.
    149  * 2. Redistributions in binary form must reproduce the above copyright
    150  *    notice, this list of conditions and the following disclaimer in the
    151  *    documentation and/or other materials provided with the distribution.
    152  * 3. All advertising materials mentioning features or use of this software
    153  *    must display the following acknowledgement:
    154  *	This product includes software developed by the NetBSD
    155  *	Foundation, Inc. and its contributors.
    156  * 4. Neither the name of The NetBSD Foundation nor the names of its
    157  *    contributors may be used to endorse or promote products derived
    158  *    from this software without specific prior written permission.
    159  *
    160  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
    161  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
    162  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
    163  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
    164  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
    165  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
    166  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
    167  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
    168  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
    169  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
    170  * POSSIBILITY OF SUCH DAMAGE.
    171  */
    172 
    173 /*
    174  * Generic software interrupt framework.
    175  *
    176  * Overview
    177  *
    178  *	The soft interrupt framework provides a mechanism to schedule a
    179  *	low priority callback that runs with thread context.  It allows
    180  *	for dynamic registration of software interrupts, and for fair
    181  *	queueing and prioritization of those interrupts.  The callbacks
    182  *	can be scheduled to run from nearly any point in the kernel: by
    183  *	code running with thread context, by code running from a
    184  *	hardware interrupt handler, and at any interrupt priority
    185  *	level.
    186  *
    187  * Priority levels
    188  *
    189  *	Since soft interrupt dispatch can be tied to the underlying
    190  *	architecture's interrupt dispatch code, it can be limited
    191  *	both by the capabilities of the hardware and the capabilities
    192  *	of the interrupt dispatch code itself.  The number of priority
    193  *	levels is restricted to four.  In order of priority (lowest to
    194  *	highest) the levels are: clock, bio, net, serial.
    195  *
    196  *	The names are symbolic and in isolation do not have any direct
    197  *	connection with a particular kind of device activity: they are
    198  *	only meant as a guide.
    199  *
    200  *	The four priority levels map directly to scheduler priority
    201  *	levels, and where the architecture implements 'fast' software
    202  *	interrupts, they also map onto interrupt priorities.  The
    203  *	interrupt priorities are intended to be hidden from machine
    204  *	independent code, which should use thread-safe mechanisms to
    205  *	synchronize with software interrupts (for example: mutexes).
    206  *
    207  * Capabilities
    208  *
    209  *	Software interrupts run with limited machine context.  In
    210  *	particular, they do not posess any address space context.  They
    211  *	should not try to operate on user space addresses, or to use
    212  *	virtual memory facilities other than those noted as interrupt
    213  *	safe.
    214  *
    215  *	Unlike hardware interrupts, software interrupts do have thread
    216  *	context.  They may block on synchronization objects, sleep, and
    217  *	resume execution at a later time.
    218  *
    219  *	Since software interrupts are a limited resource and run with
    220  *	higher priority than most other LWPs in the system, all
    221  *	block-and-resume activity by a software interrupt must be kept
    222  *	short to allow futher processing at that level to continue.  By
    223  *	extension, code running with process context must take care to
    224  *	ensure that any lock that may be taken from a software interrupt
    225  *	can not be held for more than a short period of time.
    226  *
    227  *	The kernel does not allow software interrupts to use facilities
    228  *	or perform actions that may block for a significant amount of
    229  *	time.  This means that it's not valid for a software interrupt
    230  *	to: sleep on condition variables, use the lockmgr() facility,
    231  *	or wait for resources to become available (for example,
    232  *	memory).
    233  *
    234  * Per-CPU operation
    235  *
    236  *	If a soft interrupt is triggered on a CPU, it can only be
    237  *	dispatched on the same CPU.  Each LWP dedicated to handling a
    238  *	soft interrupt is bound to its home CPU, so if the LWP blocks
    239  *	and needs to run again, it can only run there.  Nearly all data
    240  *	structures used to manage software interrupts are per-CPU.
    241  *
    242  *	The per-CPU requirement is intended to reduce "ping-pong" of
    243  *	cache lines between CPUs: lines occupied by data structures
    244  *	used to manage the soft interrupts, and lines occupied by data
    245  *	items being passed down to the soft interrupt.  As a positive
    246  *	side effect, this also means that the soft interrupt dispatch
    247  *	code does not need to to use spinlocks to synchronize.
    248  *
    249  * Generic implementation
    250  *
    251  *	A generic, low performance implementation is provided that
    252  *	works across all architectures, with no machine-dependent
    253  *	modifications needed.  This implementation uses the scheduler,
    254  *	and so has a number of restrictions:
    255  *
    256  *	1) Since software interrupts can be triggered from any priority
    257  *	level, on architectures where the generic implementation is
    258  *	used IPL_SCHED must be equal to IPL_HIGH (it must block all
    259  *	interrupts).
    260  *
    261  *	2) The software interrupts are not currently preemptive, so
    262  *	must wait for the currently executing LWP to yield the CPU.
    263  *	This can introduce latency.
    264  *
    265  *	3) A context switch is required for each soft interrupt to be
    266  *	handled, which can be quite expensive.
    267  *
    268  * 'Fast' software interrupts
    269  *
    270  *	If an architectures defines __HAVE_FAST_SOFTINTS, it implements
    271  *	the fast mechanism.  Threads running either in the kernel or in
    272  *	userspace will be interrupted, but will not be preempted.  When
    273  *	the soft interrupt completes execution, the interrupted LWP
    274  *	is resumed.  Interrupt dispatch code must provide the minimum
    275  *	level of context necessary for the soft interrupt to block and
    276  *	be resumed at a later time.  The machine-dependent dispatch
    277  *	path looks something like the following:
    278  *
    279  *	softintr()
    280  *	{
    281  *		go to IPL_HIGH if necessary for switch;
    282  *		save any necessary registers in a format that can be
    283  *		    restored by cpu_switchto if the softint blocks;
    284  *		arrange for cpu_switchto() to restore into the
    285  *		    trampoline function;
    286  *		identify LWP to handle this interrupt;
    287  *		switch to the LWP's stack;
    288  *		switch register stacks, if necessary;
    289  *		assign new value of curlwp;
    290  *		call MI softint_dispatch, passing old curlwp and IPL
    291  *		    to execute interrupt at;
    292  *		switch back to old stack;
    293  *		switch back to old register stack, if necessary;
    294  *		restore curlwp;
    295  *		return to interrupted LWP;
    296  *	}
    297  *
    298  *	If the soft interrupt blocks, a trampoline function is returned
    299  *	to in the context of the interrupted LWP, as arranged for by
    300  *	softint():
    301  *
    302  *	softint_ret()
    303  *	{
    304  *		unlock soft interrupt LWP;
    305  *		resume interrupt processing, likely returning to
    306  *		    interrupted LWP or dispatching another, different
    307  *		    interrupt;
    308  *	}
    309  *
    310  *	Once the soft interrupt has fired (and even if it has blocked),
    311  *	no further soft interrupts at that level will be triggered by
    312  *	MI code until the soft interrupt handler has ceased execution.
    313  *	If a soft interrupt handler blocks and is resumed, it resumes
    314  *	execution as a normal LWP (kthread) and gains VM context.  Only
    315  *	when it has completed and is ready to fire again will it
    316  *	interrupt other threads.
    317  */
    318 
    319 #include <sys/cdefs.h>
    320 __KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.1.2.16 2007/10/09 13:44:28 ad Exp $");
    321 
    322 #include <sys/param.h>
    323 #include <sys/malloc.h>
    324 #include <sys/proc.h>
    325 #include <sys/intr.h>
    326 #include <sys/mutex.h>
    327 #include <sys/kthread.h>
    328 #include <sys/evcnt.h>
    329 #include <sys/cpu.h>
    330 
    331 #include <net/netisr.h>
    332 
    333 #include <uvm/uvm_extern.h>
    334 
    335 #define	PRI_SOFTSERIAL	(PRI_COUNT - 1)
    336 #define	PRI_SOFTNET	(PRI_SOFTSERIAL - schedppq * 1)
    337 #define	PRI_SOFTBIO	(PRI_SOFTSERIAL - schedppq * 2)
    338 #define	PRI_SOFTCLOCK	(PRI_SOFTSERIAL - schedppq * 3)
    339 
    340 /* This could overlap with signal info in struct lwp. */
    341 typedef struct softint {
    342 	SIMPLEQ_HEAD(, softhand) si_q;
    343 	struct lwp		*si_lwp;
    344 	struct cpu_info		*si_cpu;
    345 	uintptr_t		si_machdep;
    346 	struct evcnt		si_evcnt;
    347 	struct evcnt		si_evcnt_block;
    348 	int			si_active;
    349 	char			si_name[8];
    350 	char			si_name_block[8+6];
    351 } softint_t;
    352 
    353 typedef struct softhand {
    354 	SIMPLEQ_ENTRY(softhand)	sh_q;
    355 	void			(*sh_func)(void *);
    356 	void			*sh_arg;
    357 	softint_t		*sh_isr;
    358 	u_int			sh_pending;
    359 	u_int			sh_flags;
    360 } softhand_t;
    361 
    362 typedef struct softcpu {
    363 	struct cpu_info		*sc_cpu;
    364 	softint_t		sc_int[SOFTINT_COUNT];
    365 	softhand_t		sc_hand[1];
    366 } softcpu_t;
    367 
    368 static void	softint_thread(void *);
    369 static void	softint_netisr(void *);
    370 
    371 u_int		softint_bytes = 8192;
    372 u_int		softint_timing;
    373 static u_int	softint_max;
    374 static kmutex_t	softint_lock;
    375 static void	*softint_netisr_sih;
    376 
    377 /*
    378  * softint_init_isr:
    379  *
    380  *	Initialize a single interrupt level for a single CPU.
    381  */
    382 static void
    383 softint_init_isr(softcpu_t *sc, const char *desc, pri_t pri, u_int level)
    384 {
    385 	struct cpu_info *ci;
    386 	softint_t *si;
    387 	int error;
    388 
    389 	si = &sc->sc_int[level];
    390 	ci = sc->sc_cpu;
    391 	si->si_cpu = ci;
    392 
    393 	SIMPLEQ_INIT(&si->si_q);
    394 
    395 	error = kthread_create(pri, KTHREAD_MPSAFE | KTHREAD_INTR |
    396 	    KTHREAD_IDLE, ci, softint_thread, si, &si->si_lwp,
    397 	    "soft%s/%d", desc, (int)ci->ci_cpuid);
    398 	if (error != 0)
    399 		panic("softint_init_isr: error %d", error);
    400 
    401 	snprintf(si->si_name, sizeof(si->si_name), "%s/%d", desc,
    402 	    (int)ci->ci_cpuid);
    403 	evcnt_attach_dynamic(&si->si_evcnt, EVCNT_TYPE_INTR, NULL,
    404 	   "softint", si->si_name);
    405 	snprintf(si->si_name_block, sizeof(si->si_name_block), "%s block/%d",
    406 	    desc, (int)ci->ci_cpuid);
    407 	evcnt_attach_dynamic(&si->si_evcnt_block, EVCNT_TYPE_INTR, NULL,
    408 	   "softint", si->si_name_block);
    409 
    410 	si->si_lwp->l_private = si;
    411 	softint_init_md(si->si_lwp, level, &si->si_machdep);
    412 #ifdef __HAVE_FAST_SOFTINTS
    413 	si->si_lwp->l_mutex = &ci->ci_schedstate.spc_lwplock;
    414 #endif
    415 }
    416 /*
    417  * softint_init:
    418  *
    419  *	Initialize per-CPU data structures.  Called from mi_cpu_attach().
    420  */
    421 void
    422 softint_init(struct cpu_info *ci)
    423 {
    424 	static struct cpu_info *first;
    425 	softcpu_t *sc, *scfirst;
    426 	softhand_t *sh, *shmax;
    427 
    428 	if (first == NULL) {
    429 		/* Boot CPU. */
    430 		first = ci;
    431 		mutex_init(&softint_lock, MUTEX_DEFAULT, IPL_NONE);
    432 		softint_bytes = round_page(softint_bytes);
    433 		softint_max = (softint_bytes - sizeof(softcpu_t)) /
    434 		    sizeof(softhand_t);
    435 	}
    436 
    437 	sc = (softcpu_t *)uvm_km_alloc(kernel_map, softint_bytes, 0,
    438 	    UVM_KMF_WIRED | UVM_KMF_ZERO);
    439 	if (sc == NULL)
    440 		panic("softint_init_cpu: cannot allocate memory");
    441 
    442 	ci->ci_data.cpu_softcpu = sc;
    443 	sc->sc_cpu = ci;
    444 
    445 	softint_init_isr(sc, "net", PRI_SOFTNET, SOFTINT_NET);
    446 	softint_init_isr(sc, "bio", PRI_SOFTBIO, SOFTINT_BIO);
    447 	softint_init_isr(sc, "clk", PRI_SOFTCLOCK, SOFTINT_CLOCK);
    448 	softint_init_isr(sc, "ser", PRI_SOFTSERIAL, SOFTINT_SERIAL);
    449 
    450 	if (first != ci) {
    451 		/* Don't lock -- autoconfiguration will prevent reentry. */
    452 		scfirst = first->ci_data.cpu_softcpu;
    453 		sh = sc->sc_hand;
    454 		memcpy(sh, scfirst->sc_hand, sizeof(*sh) * softint_max);
    455 
    456 		/* Update pointers for this CPU. */
    457 		for (shmax = sh + softint_max; sh < shmax; sh++) {
    458 			if (sh->sh_func == NULL)
    459 				continue;
    460 			sh->sh_isr =
    461 			    &sc->sc_int[sh->sh_flags & SOFTINT_LVLMASK];
    462 		}
    463 	} else {
    464 		/* Establish a handler for legacy net interrupts. */
    465 		softint_netisr_sih = softint_establish(SOFTINT_NET,
    466 		    softint_netisr, NULL);
    467 		KASSERT(softint_netisr_sih != NULL);
    468 	}
    469 }
    470 
    471 /*
    472  * softint_establish:
    473  *
    474  *	Register a software interrupt handler.
    475  */
    476 void *
    477 softint_establish(u_int flags, void (*func)(void *), void *arg)
    478 {
    479 	CPU_INFO_ITERATOR cii;
    480 	struct cpu_info *ci;
    481 	softcpu_t *sc;
    482 	softhand_t *sh;
    483 	u_int level, index;
    484 
    485 	level = (flags & SOFTINT_LVLMASK);
    486 	KASSERT(level < SOFTINT_COUNT);
    487 
    488 	mutex_enter(&softint_lock);
    489 
    490 	/* Find a free slot. */
    491 	sc = curcpu()->ci_data.cpu_softcpu;
    492 	for (index = 1; index < softint_max; index++)
    493 		if (sc->sc_hand[index].sh_func == NULL)
    494 			break;
    495 	if (index == softint_max) {
    496 		mutex_exit(&softint_lock);
    497 		printf("WARNING: softint_establish: table full, "
    498 		    "increase softint_bytes\n");
    499 		return NULL;
    500 	}
    501 
    502 	/* Set up the handler on each CPU. */
    503 	for (CPU_INFO_FOREACH(cii, ci)) {
    504 		sc = ci->ci_data.cpu_softcpu;
    505 		sh = &sc->sc_hand[index];
    506 
    507 		sh->sh_isr = &sc->sc_int[level];
    508 		sh->sh_func = func;
    509 		sh->sh_arg = arg;
    510 		sh->sh_flags = flags;
    511 		sh->sh_pending = 0;
    512 	}
    513 
    514 	mutex_exit(&softint_lock);
    515 
    516 	return (void *)((uint8_t *)&sc->sc_hand[index] - (uint8_t *)sc);
    517 }
    518 
    519 /*
    520  * softint_disestablish:
    521  *
    522  *	Unregister a software interrupt handler.
    523  */
    524 void
    525 softint_disestablish(void *arg)
    526 {
    527 	CPU_INFO_ITERATOR cii;
    528 	struct cpu_info *ci;
    529 	softcpu_t *sc;
    530 	softhand_t *sh;
    531 	uintptr_t offset;
    532 
    533 	offset = (uintptr_t)arg;
    534 	KASSERT(offset != 0 && offset < softint_bytes);
    535 
    536 	mutex_enter(&softint_lock);
    537 
    538 	/* Set up the handler on each CPU. */
    539 	for (CPU_INFO_FOREACH(cii, ci)) {
    540 		sc = ci->ci_data.cpu_softcpu;
    541 		sh = (softhand_t *)((uint8_t *)sc + offset);
    542 		KASSERT(sh->sh_func != NULL);
    543 		KASSERT(sh->sh_pending == 0);
    544 		sh->sh_func = NULL;
    545 	}
    546 
    547 	mutex_exit(&softint_lock);
    548 }
    549 
    550 /*
    551  * softint_schedule:
    552  *
    553  *	Trigger a software interrupt.  Must be called from a hardware
    554  *	interrupt handler, or with preemption disabled (since we are
    555  *	using the value of curcpu()).
    556  */
    557 void
    558 softint_schedule(void *arg)
    559 {
    560 	softhand_t *sh;
    561 	softint_t *si;
    562 	uintptr_t offset;
    563 	int s;
    564 
    565 	/* Find the handler record for this CPU. */
    566 	offset = (uintptr_t)arg;
    567 	KASSERT(offset != 0 && offset < softint_bytes);
    568 	sh = (softhand_t *)((uint8_t *)curcpu()->ci_data.cpu_softcpu + offset);
    569 
    570 	/* If it's already pending there's nothing to do. */
    571 	if (sh->sh_pending)
    572 		return;
    573 
    574 	/*
    575 	 * Enqueue the handler into the LWP's pending list.
    576 	 * If the LWP is completely idle, then make it run.
    577 	 */
    578 	s = splhigh();
    579 	if (!sh->sh_pending) {
    580 		si = sh->sh_isr;
    581 		sh->sh_pending = 1;
    582 		SIMPLEQ_INSERT_TAIL(&si->si_q, sh, sh_q);
    583 		if (si->si_active == 0) {
    584 			si->si_active = 1;
    585 			softint_trigger(si->si_machdep);
    586 		}
    587 	}
    588 	splx(s);
    589 }
    590 
    591 /*
    592  * softint_execute:
    593  *
    594  *	Invoke handlers for the specified soft interrupt.
    595  *	Must be entered at splhigh.  Will drop the priority
    596  *	to the level specified, but returns back at splhigh.
    597  */
    598 static inline void
    599 softint_execute(softint_t *si, lwp_t *l, int s)
    600 {
    601 	softhand_t *sh;
    602 	bool havelock;
    603 
    604 	KASSERT(si->si_lwp == curlwp);
    605 	KASSERT(si->si_cpu == curcpu());
    606 	KASSERT(si->si_lwp->l_wchan == NULL);
    607 	KASSERT(si->si_active);
    608 
    609 	havelock = false;
    610 
    611 	/*
    612 	 * Note: due to priority inheritance we may have interrupted a
    613 	 * higher priority LWP.  Since the soft interrupt must be quick
    614 	 * and is non-preemptable, we don't bother yielding.
    615 	 */
    616 
    617 	while (!SIMPLEQ_EMPTY(&si->si_q)) {
    618 		/*
    619 		 * Pick the longest waiting handler to run.  We block
    620 		 * interrupts but do not lock in order to do this, as
    621 		 * we are protecting against the local CPU only.
    622 		 */
    623 		sh = SIMPLEQ_FIRST(&si->si_q);
    624 		SIMPLEQ_REMOVE_HEAD(&si->si_q, sh_q);
    625 		sh->sh_pending = 0;
    626 		splx(s);
    627 
    628 		/* Run the handler. */
    629 		if ((sh->sh_flags & SOFTINT_MPSAFE) == 0 && !havelock) {
    630 			KERNEL_LOCK(1, l);
    631 			havelock = true;
    632 		}
    633 		(*sh->sh_func)(sh->sh_arg);
    634 
    635 		(void)splhigh();
    636 	}
    637 
    638 	if (havelock) {
    639 		KERNEL_UNLOCK_ONE(l);
    640 	}
    641 
    642 	/*
    643 	 * Unlocked, but only for statistics.
    644 	 * Should be per-CPU to prevent cache ping-pong.
    645 	 */
    646 	uvmexp.softs++;
    647 
    648 	si->si_evcnt.ev_count++;
    649 	si->si_active = 0;
    650 }
    651 
    652 /*
    653  * schednetisr:
    654  *
    655  *	Trigger a legacy network interrupt.  XXX Needs to go away.
    656  */
    657 void
    658 schednetisr(int isr)
    659 {
    660 	int s;
    661 
    662 	s = splhigh();
    663 	curcpu()->ci_data.cpu_netisrs |= (1 << isr);
    664 	softint_schedule(softint_netisr_sih);
    665 	splx(s);
    666 }
    667 
    668 /*
    669  * softintr_netisr:
    670  *
    671  *	Dispatch legacy network interrupts.  XXX Needs to go away.
    672  */
    673 static void
    674 softint_netisr(void *cookie)
    675 {
    676 	struct cpu_info *ci;
    677 	int s, bits;
    678 
    679 	ci = curcpu();
    680 
    681 	s = splhigh();
    682 	bits = ci->ci_data.cpu_netisrs;
    683 	ci->ci_data.cpu_netisrs = 0;
    684 	splx(s);
    685 
    686 #define	DONETISR(which, func)				\
    687 	do {						\
    688 		void func(void);			\
    689 		if ((bits & (1 << which)) != 0)		\
    690 			func();				\
    691 	} while(0);
    692 #include <net/netisr_dispatch.h>
    693 #undef DONETISR
    694 }
    695 
    696 #ifndef __HAVE_FAST_SOFTINTS
    697 
    698 /*
    699  * softint_init_md:
    700  *
    701  *	Perform machine-dependent initialization.
    702  */
    703 void
    704 softint_init_md(lwp_t *l, u_int level, uintptr_t *machdep)
    705 {
    706 	softint_t *si;
    707 
    708 	*machdep = (uintptr_t)l;
    709 	si = l->l_private;
    710 
    711 	lwp_lock(l);
    712 	/* Cheat and make the KASSERT in softint_thread() happy. */
    713 	si->si_active = 1;
    714 	l->l_stat = LSRUN;
    715 	sched_enqueue(l, false);
    716 	lwp_unlock(l);
    717 }
    718 
    719 /*
    720  * softint_trigger:
    721  *
    722  *	Cause a soft interrupt handler to begin executing.
    723  */
    724 void
    725 softint_trigger(uintptr_t machdep)
    726 {
    727 	struct cpu_info *ci;
    728 	lwp_t *l;
    729 
    730 	l = (lwp_t *)machdep;
    731 	ci = l->l_cpu;
    732 
    733 	spc_lock(ci);
    734 	l->l_mutex = ci->ci_schedstate.spc_mutex;
    735 	l->l_stat = LSRUN;
    736 	sched_enqueue(l, false);
    737 	cpu_need_resched(ci, RESCHED_IMMED);
    738 	spc_unlock(ci);
    739 }
    740 
    741 /*
    742  * softint_thread:
    743  *
    744  *	Slow path MI software interrupt dispatch.
    745  */
    746 void
    747 softint_thread(void *cookie)
    748 {
    749 	softint_t *si;
    750 	lwp_t *l;
    751 	int s;
    752 
    753 	l = curlwp;
    754 	si = l->l_private;
    755 	s = splhigh();
    756 
    757 	for (;;) {
    758 		softint_execute(si, l, s);
    759 
    760 		lwp_lock(l);
    761 		l->l_stat = LSIDL;
    762 		mi_switch(l);
    763 	}
    764 }
    765 
    766 #else	/*  !__HAVE_FAST_SOFTINTS */
    767 
    768 /*
    769  * softint_thread:
    770  *
    771  *	In the __HAVE_FAST_SOFTINTS case, the LWP is switched to without
    772  *	restoring any state, so we should not arrive here - there is a
    773  *	direct handoff between the interrupt stub and softint_dispatch().
    774  */
    775 void
    776 softint_thread(void *cookie)
    777 {
    778 
    779 	panic("softint_thread");
    780 }
    781 
    782 /*
    783  * softint_dispatch:
    784  *
    785  *	Entry point from machine-dependent code.
    786  */
    787 void
    788 softint_dispatch(lwp_t *pinned, int s)
    789 {
    790 	struct timeval now;
    791 	softint_t *si;
    792 	u_int timing;
    793 	lwp_t *l;
    794 
    795 	l = curlwp;
    796 	si = l->l_private;
    797 
    798 	/*
    799 	 * Note the interrupted LWP, and mark the current LWP as running
    800 	 * before proceeding.  Although this must as a rule be done with
    801 	 * the LWP locked, at this point no external agents will want to
    802 	 * modify the interrupt LWP's state.
    803 	 */
    804 	timing = (softint_timing ? LW_TIMEINTR : 0);
    805 	l->l_switchto = pinned;
    806 	l->l_stat = LSONPROC;
    807 	l->l_flag |= (LW_RUNNING | timing);
    808 
    809 	/*
    810 	 * Dispatch the interrupt.  If softints are being timed, charge
    811 	 * for it.
    812 	 */
    813 	if (timing)
    814 		microtime(&l->l_stime);
    815 	softint_execute(si, l, s);
    816 	if (timing) {
    817 		microtime(&now);
    818 		updatertime(l, &now);
    819 		l->l_flag &= ~LW_TIMEINTR;
    820 	}
    821 
    822 	/*
    823 	 * If we blocked while handling the interrupt, the pinned LWP is
    824 	 * gone so switch to the idle LWP.  It will select a new LWP to
    825 	 * run.
    826 	 *
    827 	 * We must drop the priority level as switching at IPL_HIGH could
    828 	 * deadlock the system.  We have already set si->si_active = 0,
    829 	 * which means another interrupt at this level can be triggered.
    830 	 * That's not be a problem: we are lowering to level 's' which will
    831 	 * prevent softint_dispatch() from being reentered at level 's',
    832 	 * until the priority is finally dropped to IPL_NONE on entry to
    833 	 * the idle loop.
    834 	 */
    835 	l->l_stat = LSIDL;
    836 	if (l->l_switchto == NULL) {
    837 		splx(s);
    838 		pmap_deactivate(l);
    839 		lwp_exit_switchaway(l);
    840 		/* NOTREACHED */
    841 	}
    842 	l->l_switchto = NULL;
    843 	l->l_flag &= ~LW_RUNNING;
    844 }
    845 
    846 #endif	/* !__HAVE_FAST_SOFTINTS */
    847 
    848 /*
    849  * softint_block:
    850  *
    851  *	Update statistics when the soft interrupt blocks.
    852  */
    853 void
    854 softint_block(lwp_t *l)
    855 {
    856 	softint_t *si = l->l_private;
    857 
    858 	KASSERT((l->l_flag & LW_INTR) != 0);
    859 	si->si_evcnt_block.ev_count++;
    860 }
    861