Home | History | Annotate | Line # | Download | only in include
intr.h revision 1.14
      1 /*	$NetBSD: intr.h,v 1.14 2006/12/26 15:22:44 ad Exp $	*/
      2 /*	NetBSD intr.h,v 1.15 2004/10/31 10:39:34 yamt Exp	*/
      3 
      4 /*-
      5  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
      6  * All rights reserved.
      7  *
      8  * This code is derived from software contributed to The NetBSD Foundation
      9  * by Charles M. Hannum, and by Jason R. Thorpe.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *        This product includes software developed by the NetBSD
     22  *        Foundation, Inc. and its contributors.
     23  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  *    contributors may be used to endorse or promote products derived
     25  *    from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  * POSSIBILITY OF SUCH DAMAGE.
     38  */
     39 
     40 #ifndef _XEN_INTR_H_
     41 #define	_XEN_INTR_H_
     42 
     43 #include <machine/intrdefs.h>
     44 
     45 #ifndef _LOCORE
     46 #include <machine/cpu.h>
     47 #include <machine/pic.h>
     48 
     49 #include "opt_xen.h"
     50 
     51 /*
     52  * Struct describing an event channel.
     53  */
     54 
     55 struct evtsource {
     56 	int ev_maxlevel;		/* max. IPL for this source */
     57 	u_int32_t ev_imask;		/* interrupt mask */
     58 	struct intrhand *ev_handlers;	/* handler chain */
     59 	struct evcnt ev_evcnt;		/* interrupt counter */
     60 	char ev_evname[32];		/* event counter name */
     61 };
     62 
     63 /*
     64  * Structure describing an interrupt level. struct cpu_info has an array of
     65  * IPL_MAX of theses. The index in the array is equal to the stub number of
     66  * the stubcode as present in vector.s
     67  */
     68 
     69 struct intrstub {
     70 #if 0
     71 	void *ist_entry;
     72 #endif
     73 	void *ist_recurse;
     74 	void *ist_resume;
     75 };
     76 
     77 #ifdef XEN3
     78 /* for x86 compatibility */
     79 extern struct intrstub i8259_stubs[];
     80 extern struct intrstub ioapic_edge_stubs[];
     81 extern struct intrstub ioapic_level_stubs[];
     82 #endif
     83 
     84 struct iplsource {
     85 	struct intrhand *ipl_handlers;   /* handler chain */
     86 	void *ipl_recurse;               /* entry for spllower */
     87 	void *ipl_resume;                /* entry for doreti */
     88 	u_int32_t ipl_evt_mask1;	/* pending events for this IPL */
     89 	u_int32_t ipl_evt_mask2[NR_EVENT_CHANNELS];
     90 };
     91 
     92 
     93 
     94 /*
     95  * Interrupt handler chains. These are linked in both the evtsource and
     96  * the iplsource.
     97  * The handler is called with its (single) argument.
     98  */
     99 
    100 struct intrhand {
    101 	int	(*ih_fun)(void *);
    102 	void	*ih_arg;
    103 	int	ih_level;
    104 	struct	intrhand *ih_ipl_next;
    105 	struct	intrhand *ih_evt_next;
    106 	struct cpu_info *ih_cpu;
    107 };
    108 
    109 struct xen_intr_handle {
    110 	int pirq; /* also contains the  APIC_INT_* flags if NIOAPIC > 0 */
    111 	int evtch;
    112 };
    113 
    114 extern struct intrstub xenev_stubs[];
    115 
    116 #define IUNMASK(ci,level) (ci)->ci_iunmask[(level)]
    117 
    118 extern void Xspllower(int);
    119 
    120 static __inline int splraise(int);
    121 static __inline void spllower(int);
    122 static __inline void softintr(int);
    123 
    124 /*
    125  * Add a mask to cpl, and return the old value of cpl.
    126  */
    127 static __inline int
    128 splraise(int nlevel)
    129 {
    130 	int olevel;
    131 	struct cpu_info *ci = curcpu();
    132 
    133 	olevel = ci->ci_ilevel;
    134 	if (nlevel > olevel)
    135 		ci->ci_ilevel = nlevel;
    136 	__insn_barrier();
    137 	return (olevel);
    138 }
    139 
    140 /*
    141  * Restore a value to cpl (unmasking interrupts).  If any unmasked
    142  * interrupts are pending, call Xspllower() to process them.
    143  */
    144 static __inline void
    145 spllower(int nlevel)
    146 {
    147 	struct cpu_info *ci = curcpu();
    148 	u_int32_t imask;
    149 	u_long psl;
    150 
    151 	__insn_barrier();
    152 
    153 	imask = IUNMASK(ci, nlevel);
    154 	psl = read_psl();
    155 	disable_intr();
    156 	if (ci->ci_ipending & imask) {
    157 		Xspllower(nlevel);
    158 		/* Xspllower does enable_intr() */
    159 	} else {
    160 		ci->ci_ilevel = nlevel;
    161 		write_psl(psl);
    162 	}
    163 }
    164 
    165 #define SPL_ASSERT_BELOW(x) KDASSERT(curcpu()->ci_ilevel < (x))
    166 
    167 /*
    168  * Software interrupt masks
    169  *
    170  * NOTE: spllowersoftclock() is used by hardclock() to lower the priority from
    171  * clock to softclock before it calls softclock().
    172  */
    173 #define	spllowersoftclock() spllower(IPL_SOFTCLOCK)
    174 
    175 #define splsoftxenevt()	splraise(IPL_SOFTXENEVT)
    176 
    177 /*
    178  * Miscellaneous
    179  */
    180 #define	spl0()		spllower(IPL_NONE)
    181 #define	splx(x)		spllower(x)
    182 
    183 typedef uint8_t ipl_t;
    184 typedef struct {
    185 	ipl_t _ipl;
    186 } ipl_cookie_t;
    187 
    188 static inline ipl_cookie_t
    189 makeiplcookie(ipl_t ipl)
    190 {
    191 
    192 	return (ipl_cookie_t){._ipl = ipl};
    193 }
    194 
    195 static inline int
    196 splraiseipl(ipl_cookie_t icookie)
    197 {
    198 
    199 	return splraise(icookie._ipl);
    200 }
    201 
    202 #include <sys/spl.h>
    203 
    204 /*
    205  * Software interrupt registration
    206  *
    207  * We hand-code this to ensure that it's atomic.
    208  *
    209  * XXX always scheduled on the current CPU.
    210  */
    211 static __inline void
    212 softintr(int sir)
    213 {
    214 	struct cpu_info *ci = curcpu();
    215 
    216 	__asm volatile("lock ; orl %1, %0" :
    217 	    "=m"(ci->ci_ipending) : "ir" (1 << sir));
    218 }
    219 
    220 /*
    221  * XXX
    222  */
    223 #define	setsoftnet()	softintr(SIR_NET)
    224 
    225 /*
    226  * Stub declarations.
    227  */
    228 
    229 extern void Xsoftclock(void);
    230 extern void Xsoftnet(void);
    231 extern void Xsoftserial(void);
    232 extern void Xsoftxenevt(void);
    233 
    234 struct cpu_info;
    235 
    236 extern char idt_allocmap[];
    237 
    238 struct pcibus_attach_args;
    239 
    240 void intr_default_setup(void);
    241 int x86_nmi(void);
    242 void intr_calculatemasks(struct evtsource *);
    243 
    244 void *intr_establish(int, struct pic *, int, int, int, int (*)(void *), void *);
    245 void intr_disestablish(struct intrhand *);
    246 const char *intr_string(int);
    247 void cpu_intr_init(struct cpu_info *);
    248 int xen_intr_map(int *, int);
    249 #ifdef INTRDEBUG
    250 void intr_printconfig(void);
    251 #endif
    252 int intr_find_mpmapping(int, int, struct xen_intr_handle *);
    253 struct pic *intr_findpic(int);
    254 void intr_add_pcibus(struct pcibus_attach_args *);
    255 
    256 #endif /* !_LOCORE */
    257 
    258 /*
    259  * Generic software interrupt support.
    260  */
    261 
    262 #define	X86_SOFTINTR_SOFTCLOCK		0
    263 #define	X86_SOFTINTR_SOFTNET		1
    264 #define	X86_SOFTINTR_SOFTSERIAL		2
    265 #define	X86_NSOFTINTR			3
    266 
    267 #ifndef _LOCORE
    268 #include <sys/queue.h>
    269 
    270 struct x86_soft_intrhand {
    271 	TAILQ_ENTRY(x86_soft_intrhand)
    272 		sih_q;
    273 	struct x86_soft_intr *sih_intrhead;
    274 	void	(*sih_fn)(void *);
    275 	void	*sih_arg;
    276 	int	sih_pending;
    277 };
    278 
    279 struct x86_soft_intr {
    280 	TAILQ_HEAD(, x86_soft_intrhand)
    281 		softintr_q;
    282 	int softintr_ssir;
    283 	struct simplelock softintr_slock;
    284 };
    285 
    286 #define	x86_softintr_lock(si, s)					\
    287 do {									\
    288 	(s) = splhigh();						\
    289 	simple_lock(&si->softintr_slock);				\
    290 } while (/*CONSTCOND*/ 0)
    291 
    292 #define	x86_softintr_unlock(si, s)					\
    293 do {									\
    294 	simple_unlock(&si->softintr_slock);				\
    295 	splx((s));							\
    296 } while (/*CONSTCOND*/ 0)
    297 
    298 void	*softintr_establish(int, void (*)(void *), void *);
    299 void	softintr_disestablish(void *);
    300 void	softintr_init(void);
    301 void	softintr_dispatch(int);
    302 
    303 #define	softintr_schedule(arg)						\
    304 do {									\
    305 	struct x86_soft_intrhand *__sih = (arg);			\
    306 	struct x86_soft_intr *__si = __sih->sih_intrhead;		\
    307 	int __s;							\
    308 									\
    309 	x86_softintr_lock(__si, __s);					\
    310 	if (__sih->sih_pending == 0) {					\
    311 		TAILQ_INSERT_TAIL(&__si->softintr_q, __sih, sih_q);	\
    312 		__sih->sih_pending = 1;					\
    313 		softintr(__si->softintr_ssir);				\
    314 	}								\
    315 	x86_softintr_unlock(__si, __s);					\
    316 } while (/*CONSTCOND*/ 0)
    317 #endif /* _LOCORE */
    318 
    319 #endif /* _XEN_INTR_H_ */
    320