Home | History | Annotate | Line # | Download | only in include
intr.h revision 1.6
      1 /*	$NetBSD: intr.h,v 1.6 2005/11/03 13:06:07 yamt Exp $	*/
      2 /*	NetBSD intr.h,v 1.15 2004/10/31 10:39:34 yamt Exp	*/
      3 
      4 /*-
      5  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
      6  * All rights reserved.
      7  *
      8  * This code is derived from software contributed to The NetBSD Foundation
      9  * by Charles M. Hannum, and by Jason R. Thorpe.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *        This product includes software developed by the NetBSD
     22  *        Foundation, Inc. and its contributors.
     23  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  *    contributors may be used to endorse or promote products derived
     25  *    from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  * POSSIBILITY OF SUCH DAMAGE.
     38  */
     39 
     40 
     41 #ifndef _XEN_INTR_H_
     42 #define	_XEN_INTR_H_
     43 
     44 #include <machine/intrdefs.h>
     45 
     46 #ifndef _LOCORE
     47 #include <machine/cpu.h>
     48 #include <machine/pic.h>
     49 
     50 /*
     51  * Struct describing an event channel.
     52  */
     53 
     54 struct evtsource {
     55 	int ev_maxlevel;		/* max. IPL for this source */
     56 	u_int32_t ev_imask;		/* interrupt mask */
     57 	struct intrhand *ev_handlers;	/* handler chain */
     58 	struct evcnt ev_evcnt;		/* interrupt counter */
     59 	char ev_evname[32];		/* event counter name */
     60 };
     61 
     62 /*
     63  * Structure describing an interrupt level. struct cpu_info has an array of
     64  * IPL_MAX of theses. The index in the array is equal to the stub number of
     65  * the stubcode as present in vector.s
     66  */
     67 
     68 struct intrstub {
     69 #if 0
     70 	void *ist_entry;
     71 #endif
     72 	void *ist_recurse;
     73 	void *ist_resume;
     74 };
     75 
     76 struct iplsource {
     77 	struct intrhand *ipl_handlers;   /* handler chain */
     78 	void *ipl_recurse;               /* entry for spllower */
     79 	void *ipl_resume;                /* entry for doreti */
     80 	u_int32_t ipl_evt_mask1;	/* pending events for this IPL */
     81 	u_int32_t ipl_evt_mask2[NR_EVENT_CHANNELS];
     82 };
     83 
     84 
     85 
     86 /*
     87  * Interrupt handler chains. These are linked in both the evtsource and
     88  * the iplsource.
     89  * The handler is called with its (single) argument.
     90  */
     91 
     92 struct intrhand {
     93 	int	(*ih_fun)(void *);
     94 	void	*ih_arg;
     95 	int	ih_level;
     96 	struct	intrhand *ih_ipl_next;
     97 	struct	intrhand *ih_evt_next;
     98 	struct cpu_info *ih_cpu;
     99 };
    100 
    101 
    102 extern struct intrstub xenev_stubs[];
    103 
    104 #define IUNMASK(ci,level) (ci)->ci_iunmask[(level)]
    105 
    106 extern void Xspllower(int);
    107 
    108 static __inline int splraise(int);
    109 static __inline void spllower(int);
    110 static __inline void softintr(int);
    111 
    112 /*
    113  * Add a mask to cpl, and return the old value of cpl.
    114  */
    115 static __inline int
    116 splraise(int nlevel)
    117 {
    118 	int olevel;
    119 	struct cpu_info *ci = curcpu();
    120 
    121 	olevel = ci->ci_ilevel;
    122 	if (nlevel > olevel)
    123 		ci->ci_ilevel = nlevel;
    124 	__insn_barrier();
    125 	return (olevel);
    126 }
    127 
    128 /*
    129  * Restore a value to cpl (unmasking interrupts).  If any unmasked
    130  * interrupts are pending, call Xspllower() to process them.
    131  */
    132 static __inline void
    133 spllower(int nlevel)
    134 {
    135 	struct cpu_info *ci = curcpu();
    136 	u_int32_t imask;
    137 	u_long psl;
    138 
    139 	__insn_barrier();
    140 
    141 	imask = IUNMASK(ci, nlevel);
    142 	psl = read_psl();
    143 	disable_intr();
    144 	if (ci->ci_ipending & imask) {
    145 		Xspllower(nlevel);
    146 		/* Xspllower does enable_intr() */
    147 	} else {
    148 		ci->ci_ilevel = nlevel;
    149 		write_psl(psl);
    150 	}
    151 }
    152 
    153 #define SPL_ASSERT_BELOW(x) KDASSERT(curcpu()->ci_ilevel < (x))
    154 
    155 /*
    156  * Software interrupt masks
    157  *
    158  * NOTE: spllowersoftclock() is used by hardclock() to lower the priority from
    159  * clock to softclock before it calls softclock().
    160  */
    161 #define	spllowersoftclock() spllower(IPL_SOFTCLOCK)
    162 
    163 #define splsoftxenevt()	splraise(IPL_SOFTXENEVT)
    164 
    165 /*
    166  * Miscellaneous
    167  */
    168 #define	spl0()		spllower(IPL_NONE)
    169 #define splraiseipl(x) 	splraise(x)
    170 #define	splx(x)		spllower(x)
    171 
    172 #include <sys/spl.h>
    173 
    174 /*
    175  * Software interrupt registration
    176  *
    177  * We hand-code this to ensure that it's atomic.
    178  *
    179  * XXX always scheduled on the current CPU.
    180  */
    181 static __inline void
    182 softintr(int sir)
    183 {
    184 	struct cpu_info *ci = curcpu();
    185 
    186 	__asm __volatile("lock ; orl %1, %0" :
    187 	    "=m"(ci->ci_ipending) : "ir" (1 << sir));
    188 }
    189 
    190 /*
    191  * XXX
    192  */
    193 #define	setsoftnet()	softintr(SIR_NET)
    194 
    195 /*
    196  * Stub declarations.
    197  */
    198 
    199 extern void Xsoftclock(void);
    200 extern void Xsoftnet(void);
    201 extern void Xsoftserial(void);
    202 extern void Xsoftxenevt(void);
    203 
    204 struct cpu_info;
    205 
    206 extern char idt_allocmap[];
    207 
    208 struct pcibus_attach_args;
    209 
    210 void intr_default_setup(void);
    211 int x86_nmi(void);
    212 void intr_calculatemasks(struct evtsource *);
    213 void *intr_establish(int, struct pic *, int, int, int, int (*)(void *), void *);
    214 void intr_disestablish(struct intrhand *);
    215 const char *intr_string(int);
    216 void cpu_intr_init(struct cpu_info *);
    217 #ifdef INTRDEBUG
    218 void intr_printconfig(void);
    219 #endif
    220 
    221 #endif /* !_LOCORE */
    222 
    223 /*
    224  * Generic software interrupt support.
    225  */
    226 
    227 #define	X86_SOFTINTR_SOFTCLOCK		0
    228 #define	X86_SOFTINTR_SOFTNET		1
    229 #define	X86_SOFTINTR_SOFTSERIAL		2
    230 #define	X86_NSOFTINTR			3
    231 
    232 #ifndef _LOCORE
    233 #include <sys/queue.h>
    234 
    235 struct x86_soft_intrhand {
    236 	TAILQ_ENTRY(x86_soft_intrhand)
    237 		sih_q;
    238 	struct x86_soft_intr *sih_intrhead;
    239 	void	(*sih_fn)(void *);
    240 	void	*sih_arg;
    241 	int	sih_pending;
    242 };
    243 
    244 struct x86_soft_intr {
    245 	TAILQ_HEAD(, x86_soft_intrhand)
    246 		softintr_q;
    247 	int softintr_ssir;
    248 	struct simplelock softintr_slock;
    249 };
    250 
    251 #define	x86_softintr_lock(si, s)					\
    252 do {									\
    253 	(s) = splhigh();						\
    254 	simple_lock(&si->softintr_slock);				\
    255 } while (/*CONSTCOND*/ 0)
    256 
    257 #define	x86_softintr_unlock(si, s)					\
    258 do {									\
    259 	simple_unlock(&si->softintr_slock);				\
    260 	splx((s));							\
    261 } while (/*CONSTCOND*/ 0)
    262 
    263 void	*softintr_establish(int, void (*)(void *), void *);
    264 void	softintr_disestablish(void *);
    265 void	softintr_init(void);
    266 void	softintr_dispatch(int);
    267 
    268 #define	softintr_schedule(arg)						\
    269 do {									\
    270 	struct x86_soft_intrhand *__sih = (arg);			\
    271 	struct x86_soft_intr *__si = __sih->sih_intrhead;		\
    272 	int __s;							\
    273 									\
    274 	x86_softintr_lock(__si, __s);					\
    275 	if (__sih->sih_pending == 0) {					\
    276 		TAILQ_INSERT_TAIL(&__si->softintr_q, __sih, sih_q);	\
    277 		__sih->sih_pending = 1;					\
    278 		softintr(__si->softintr_ssir);				\
    279 	}								\
    280 	x86_softintr_unlock(__si, __s);					\
    281 } while (/*CONSTCOND*/ 0)
    282 #endif /* _LOCORE */
    283 
    284 #endif /* _XEN_INTR_H_ */
    285