Home | History | Annotate | Line # | Download | only in kern
kern_softint.c revision 1.1.2.1
      1 /*	$NetBSD: kern_softint.c,v 1.1.2.1 2007/06/17 21:31:27 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by the NetBSD
     21  *	Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * Soft interrupt implementation.  XXX blurb
     41  *
     42  * The !__HAVE_FAST_SOFTINTS case assumes splhigh == splsched.
     43  */
     44 
     45 #include <sys/cdefs.h>
     46 __KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.1.2.1 2007/06/17 21:31:27 ad Exp $");
     47 
     48 #include <sys/param.h>
     49 #include <sys/malloc.h>
     50 #include <sys/proc.h>
     51 #include <sys/intr.h>
     52 #include <sys/mutex.h>
     53 #include <sys/kthread.h>
     54 #include <sys/evcnt.h>
     55 #include <sys/cpu.h>
     56 
     57 #include <net/netisr.h>
     58 
     59 #include <uvm/uvm_extern.h>
     60 
     61 #define	PRI_SOFTCLOCK	PRI_INTERRUPT
     62 #define	PRI_SOFTBIO	(PRI_INTERRUPT + 4)
     63 #define	PRI_SOFTNET	(PRI_INTERRUPT + 8)
     64 #define	PRI_SOFTSERIAL	(PRI_INTERRUPT + 12)
     65 
     66 /* This could overlap with signal info in struct lwp. */
     67 typedef struct softint {
     68 	TAILQ_HEAD(, softhand)	si_q;
     69 	struct lwp		*si_lwp;
     70 	struct cpu_info		*si_cpu;
     71 	uintptr_t		si_machdep;
     72 	struct evcnt		si_evcnt;
     73 	int			si_active;
     74 } softint_t;
     75 
     76 typedef struct softhand {
     77 	TAILQ_ENTRY(softhand)	sh_q;
     78 	void			(*sh_func)(void *);
     79 	void			*sh_arg;
     80 	softint_t		*sh_isr;
     81 	u_int			sh_pending;
     82 	u_int			sh_flags;
     83 } softhand_t;
     84 
     85 typedef struct softcpu {
     86 	struct cpu_info		*sc_cpu;
     87 	softint_t		sc_int[SOFTINT_COUNT];
     88 	softhand_t		sc_hand[1];
     89 } softcpu_t;
     90 
     91 static void	softint_thread(void *);
     92 static void	softint_netisr(void *);
     93 
     94 u_int		softint_bytes = 8192;
     95 static u_int	softint_max;
     96 static kmutex_t	softint_lock;
     97 static void	*softint_netisr_sih;
     98 
     99 /*
    100  * softint_init_isr:
    101  *
    102  *	Initialize a single interrupt level for a single CPU.
    103  */
    104 static void
    105 softint_init_isr(softcpu_t *sc, const char *desc, pri_t pri, u_int level)
    106 {
    107 	struct cpu_info *ci;
    108 	softint_t *si;
    109 	int error;
    110 
    111 	si = &sc->sc_int[level];
    112 	ci = sc->sc_cpu;
    113 	si->si_cpu = ci;
    114 
    115 	TAILQ_INIT(&si->si_q);
    116 
    117 	error = kthread_create(pri, KTHREAD_MPSAFE | KTHREAD_INTR |
    118 	    KTHREAD_IDLE, ci, softint_thread, si, &si->si_lwp,
    119 	    "soft%s/%d", desc, (int)ci->ci_cpuid);
    120 	if (error != 0)
    121 		panic("softint_init_isr: error %d", error);
    122 
    123 	evcnt_attach_dynamic(&si->si_evcnt, EVCNT_TYPE_INTR, NULL,
    124 	   "cpu", si->si_lwp->l_name);
    125 
    126 	softint_init_md(si->si_lwp, level, &si->si_machdep, si);
    127 }
    128 
    129 /*
    130  * softint_init_md:
    131  *
    132  *	Perform machine-dependent initialization.  Arguments:
    133  *
    134  *	l
    135  *
    136  *	    LWP to handle the interrupt
    137  *
    138  *	level
    139  *
    140  *	    Symbolic level: SOFTINT_*
    141  *
    142  *	machdep
    143  *
    144  *	    Private value for machine dependent code,
    145  *	    passed by MI code to softint_trigger().
    146  *
    147  *	cookie
    148  *
    149  *	    Value to be passed to softint_execute() by
    150  *	    MD code when an interrupt is being handled.
    151  */
    152 #ifndef __HAVE_FAST_SOFTINTS
    153 void
    154 softint_init_md(lwp_t *l, u_int level, uintptr_t *machdep, void *cookie)
    155 {
    156 
    157 	*machdep = (lwp_t)l;
    158 
    159 	lwp_lock(l);
    160 	/* Cheat and make the KASSERT in softint_thread() happy. */
    161 	((softint_t *)cookie)->si_active = 1;
    162 	l->l_stat = LSRUN;
    163 	sched_enqueue(l, false);
    164 	lwp_unlock(l);
    165 }
    166 #endif	/* !__HAVE_FAST_SOFTINTS */
    167 
    168 /*
    169  * softint_init:
    170  *
    171  *	Initialize per-CPU data structures.  Called from mi_cpu_attach().
    172  */
    173 void
    174 softint_init(struct cpu_info *ci)
    175 {
    176 	static struct cpu_info *first;
    177 	softcpu_t *sc, *scfirst;
    178 	softhand_t *sh, *shmax;
    179 
    180 	if (first == NULL) {
    181 		first = ci;
    182 		mutex_init(&softint_lock, MUTEX_DEFAULT, IPL_NONE);
    183 		softint_bytes = round_page(softint_bytes);
    184 		softint_max = (softint_bytes - sizeof(softcpu_t)) /
    185 		    sizeof(softhand_t);
    186 	}
    187 
    188 	sc = (softcpu_t *)uvm_km_alloc(kernel_map, softint_bytes, 0,
    189 	    UVM_KMF_WIRED | UVM_KMF_ZERO);
    190 	if (sc == NULL)
    191 		panic("softint_init_cpu: cannot allocate memory");
    192 
    193 	ci->ci_data.cpu_softcpu = sc;
    194 	sc->sc_cpu = ci;
    195 
    196 	softint_init_isr(sc, "net", PRI_SOFTNET, SOFTINT_NET);
    197 	softint_init_isr(sc, "bio", PRI_SOFTBIO, SOFTINT_BIO);
    198 	softint_init_isr(sc, "clk", PRI_SOFTCLOCK, SOFTINT_CLOCK);
    199 	softint_init_isr(sc, "ser", PRI_SOFTSERIAL, SOFTINT_SERIAL);
    200 
    201 	if (first != ci) {
    202 		/* No need to lock - the system is still cold. */
    203 		scfirst = first->ci_data.cpu_softcpu;
    204 		sh = sc->sc_hand;
    205 		memcpy(sh, scfirst->sc_hand, sizeof(*sh) * softint_max);
    206 
    207 		/* Update pointers for this CPU. */
    208 		for (shmax = sh + softint_max; sh < shmax; sh++) {
    209 			if (sh->sh_func == NULL)
    210 				continue;
    211 			sh->sh_isr =
    212 			    &sc->sc_int[sh->sh_flags & SOFTINT_LVLMASK];
    213 		}
    214 	} else {
    215 		/* Establish a handler for legacy net interrupts. */
    216 		softint_netisr_sih = softint_establish(SOFTINT_NET,
    217 		    softint_netisr, NULL);
    218 		KASSERT(softint_netisr_sih != NULL);
    219 	}
    220 }
    221 
    222 /*
    223  * softint_establish:
    224  *
    225  *	Register a software interrupt handler.
    226  */
    227 void *
    228 softint_establish(u_int flags, void (*func)(void *), void *arg)
    229 {
    230 	CPU_INFO_ITERATOR cii;
    231 	struct cpu_info *ci;
    232 	softcpu_t *sc;
    233 	softhand_t *sh;
    234 	u_int level;
    235 	u_int index;
    236 
    237 	level = (flags & SOFTINT_LVLMASK);
    238 	KASSERT(level < SOFTINT_COUNT);
    239 
    240 	mutex_enter(&softint_lock);
    241 
    242 	/* Find a free slot. */
    243 	sc = curcpu()->ci_data.cpu_softcpu;
    244 	for (index = 1; index < softint_max; index++)
    245 		if (sc->sc_hand[index].sh_func == NULL)
    246 			break;
    247 	if (index == softint_max) {
    248 		mutex_exit(&softint_lock);
    249 		printf("WARNING: softint_establish: table full, "
    250 		    "increase softint_bytes\n");
    251 		return NULL;
    252 	}
    253 
    254 	/* Set up the handler on each CPU. */
    255 	for (CPU_INFO_FOREACH(cii, ci)) {
    256 		sc = ci->ci_data.cpu_softcpu;
    257 		sh = &sc->sc_hand[index];
    258 
    259 		sh->sh_isr = &sc->sc_int[level];
    260 		sh->sh_func = func;
    261 		sh->sh_arg = arg;
    262 		sh->sh_flags = flags;
    263 		sh->sh_pending = 0;
    264 	}
    265 
    266 	mutex_exit(&softint_lock);
    267 
    268 	return (void *)index;
    269 }
    270 
    271 /*
    272  * softint_disestablish:
    273  *
    274  *	Unregister a software interrupt handler.
    275  */
    276 void
    277 softint_disestablish(void *arg)
    278 {
    279 	CPU_INFO_ITERATOR cii;
    280 	struct cpu_info *ci;
    281 	softcpu_t *sc;
    282 	softhand_t *sh;
    283 	u_int index;
    284 
    285 	index = (u_int)arg;
    286 	KASSERT(index != 0 && index < softint_max);
    287 
    288 	mutex_enter(&softint_lock);
    289 
    290 	/* Set up the handler on each CPU. */
    291 	for (CPU_INFO_FOREACH(cii, ci)) {
    292 		sc = ci->ci_data.cpu_softcpu;
    293 		sh = &sc->sc_hand[index];
    294 		KASSERT(sh->sh_func != NULL);
    295 		KASSERT(sh->sh_pending == 0);
    296 		sh->sh_func = NULL;
    297 	}
    298 
    299 	mutex_exit(&softint_lock);
    300 }
    301 
    302 /*
    303  * softint_trigger:
    304  *
    305  *	Cause a soft interrupt handler to begin executing.
    306  */
    307 #ifndef __HAVE_FAST_SOFTINTS
    308 inline void
    309 softint_trigger(uintptr_t machdep)
    310 {
    311 	struct cpu_info *ci;
    312 	lwp_t *l;
    313 
    314 	l = (lwp_t *)machdep;
    315 	ci = l->l_cpu;
    316 
    317 	spc_lock(ci);
    318 	l->l_mutex = ci->ci_schedstate.spc_mutex;
    319 	l->l_stat = LSRUN;
    320 	sched_enqueue(l, false);
    321 	cpu_need_resched(ci, 1);
    322 	spc_unlock(ci);
    323 }
    324 #endif	/* __HAVE_FAST_SOFTINTS */
    325 
    326 /*
    327  * softint_schedule:
    328  *
    329  *	Trigger a software interrupt.  Must be called from a hardware
    330  *	interrupt handler, or with preemption disabled (since we are
    331  *	using the value of curcpu()).
    332  */
    333 void
    334 softint_schedule(void *arg)
    335 {
    336 	softhand_t *sh;
    337 	softint_t *si;
    338 	u_int index;
    339 	int s;
    340 
    341 	/* Find the handler record for this CPU. */
    342 	index = (u_int)arg;
    343 	KASSERT(index != 0 && index < softint_max);
    344 	sh = &((softcpu_t *)curcpu()->ci_data.cpu_softcpu)->sc_hand[index];
    345 
    346 	/* If it's already pending there's nothing to do. */
    347 	if (sh->sh_pending)
    348 		return;
    349 
    350 	/*
    351 	 * Enqueue the handler into the LWP's pending list.
    352 	 * If the LWP is completely idle, then make it run.
    353 	 */
    354 	s = splhigh();
    355 	if (!sh->sh_pending) {
    356 		si = sh->sh_isr;
    357 		sh->sh_pending = 1;
    358 		TAILQ_INSERT_TAIL(&si->si_q, sh, sh_q);
    359 		if (si->si_active == 0) {
    360 			si->si_active = 1;
    361 			softint_trigger(si->si_machdep);
    362 		}
    363 	}
    364 	splx(s);
    365 }
    366 
    367 /*
    368  * softint_thread:
    369  *
    370  *	MI software interrupt dispatch.  In the __HAVE_FAST_SOFTINTS
    371  *	case, the LWP is switched to without restoring any state, so
    372  *	we should not arrive here - there is a direct handoff between
    373  *	the interrupt stub and softint_execute().
    374  */
    375 void
    376 softint_thread(void *cookie)
    377 {
    378 #ifdef __HAVE_FAST_SOFTINTS
    379 	panic("softint_thread");
    380 #else	/* __HAVE_FAST_SOFTINTS */
    381 	lwp_t *l;
    382 	int s;
    383 
    384 	l = curlwp;
    385 	s = splhigh();
    386 
    387 	for (;;) {
    388 		softint_execute(cookie, s);
    389 
    390 		lwp_lock(l);
    391 		l->l_stat = LSIDL;
    392 		mi_switch(l);
    393 	}
    394 #endif	/* !__HAVE_FAST_SOFTINTS */
    395 }
    396 
    397 /*
    398  * softint_execute:
    399  *
    400  *	Invoke handlers for the specified soft interrupt.
    401  *	Must be entered at splhigh.  Will drop the priority
    402  *	to the level specified, but returns back at splhigh.
    403  */
    404 void
    405 softint_execute(void *cookie, int s)
    406 {
    407 	softint_t *si;
    408 	softhand_t *sh;
    409 
    410 	si = cookie;
    411 
    412 	KASSERT(si->si_lwp == curlwp);
    413 	KASSERT(si->si_cpu == curcpu());
    414 	KASSERT(si->si_lwp->l_wchan == NULL);
    415 	KASSERT(!TAILQ_EMPTY(&si->si_q));
    416 	KASSERT(si->si_active);
    417 
    418 	while (!TAILQ_EMPTY(&si->si_q)) {
    419 		/*
    420 		 * Pick the longest waiting handler to run.  We block
    421 		 * interrupts but do not lock in order to do this, as
    422 		 * we are protecting against the local CPU only.
    423 		 */
    424 		sh = TAILQ_FIRST(&si->si_q);
    425 		TAILQ_REMOVE(&si->si_q, sh, sh_q);
    426 		sh->sh_pending = 0;
    427 		splx(s);
    428 
    429 		/* Run the handler. */
    430 		if ((sh->sh_flags & SOFTINT_MPSAFE) == 0) {
    431 			KERNEL_LOCK(1, si->si_lwp);
    432 		}
    433 		(*sh->sh_func)(sh->sh_arg);
    434 		if ((sh->sh_flags & SOFTINT_MPSAFE) == 0) {
    435 			KERNEL_UNLOCK_ONE(si->si_lwp);
    436 		}
    437 
    438 		(void)splhigh();
    439 	}
    440 
    441 	/*
    442 	 * Unlocked, but only for statistics.
    443 	 * Should be per-CPU to prevent cache ping-pong.
    444 	 */
    445 	uvmexp.softs++;
    446 
    447 	si->si_evcnt.ev_count++;
    448 	si->si_active = 0;
    449 }
    450 
    451 /*
    452  * schednetisr:
    453  *
    454  *	Trigger a legacy network interrupt.  XXX Needs to go away.
    455  */
    456 void
    457 schednetisr(int isr)
    458 {
    459 	int s;
    460 
    461 	s = splhigh();
    462 	curcpu()->ci_data.cpu_netisrs |= (1 << isr);
    463 	softint_schedule(softint_netisr_sih);
    464 	splx(s);
    465 }
    466 
    467 /*
    468  * softintr_netisr:
    469  *
    470  *	Dispatch legacy network interrupts.  XXX Needs to away.
    471  */
    472 static void
    473 softint_netisr(void *cookie)
    474 {
    475 	struct cpu_info *ci;
    476 	int s, bits;
    477 
    478 	ci = curcpu();
    479 
    480 	s = splhigh();
    481 	bits = ci->ci_data.cpu_netisrs;
    482 	ci->ci_data.cpu_netisrs = 0;
    483 	splx(s);
    484 
    485 #define	DONETISR(which, func)				\
    486 	do {						\
    487 		void func(void);			\
    488 		if ((bits & (1 << which)) != 0)		\
    489 			func();				\
    490 	} while(0);
    491 #include <net/netisr_dispatch.h>
    492 #undef DONETISR
    493 }
    494