intr.h revision 1.3 1 1.3 thorpej /* $NetBSD: intr.h,v 1.3 2003/06/16 20:01:06 thorpej Exp $ */
2 1.1 fvdl
3 1.1 fvdl /*-
4 1.1 fvdl * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
5 1.1 fvdl * All rights reserved.
6 1.1 fvdl *
7 1.1 fvdl * This code is derived from software contributed to The NetBSD Foundation
8 1.1 fvdl * by Charles M. Hannum, and by Jason R. Thorpe.
9 1.1 fvdl *
10 1.1 fvdl * Redistribution and use in source and binary forms, with or without
11 1.1 fvdl * modification, are permitted provided that the following conditions
12 1.1 fvdl * are met:
13 1.1 fvdl * 1. Redistributions of source code must retain the above copyright
14 1.1 fvdl * notice, this list of conditions and the following disclaimer.
15 1.1 fvdl * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 fvdl * notice, this list of conditions and the following disclaimer in the
17 1.1 fvdl * documentation and/or other materials provided with the distribution.
18 1.1 fvdl * 3. All advertising materials mentioning features or use of this software
19 1.1 fvdl * must display the following acknowledgement:
20 1.1 fvdl * This product includes software developed by the NetBSD
21 1.1 fvdl * Foundation, Inc. and its contributors.
22 1.1 fvdl * 4. Neither the name of The NetBSD Foundation nor the names of its
23 1.1 fvdl * contributors may be used to endorse or promote products derived
24 1.1 fvdl * from this software without specific prior written permission.
25 1.1 fvdl *
26 1.1 fvdl * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 1.1 fvdl * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 1.1 fvdl * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.1 fvdl * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 1.1 fvdl * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 1.1 fvdl * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 1.1 fvdl * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 1.1 fvdl * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 1.1 fvdl * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 1.1 fvdl * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 1.1 fvdl * POSSIBILITY OF SUCH DAMAGE.
37 1.1 fvdl */
38 1.1 fvdl
39 1.1 fvdl #ifndef _X86_INTR_H_
40 1.1 fvdl #define _X86_INTR_H_
41 1.1 fvdl
42 1.1 fvdl #include <machine/intrdefs.h>
43 1.1 fvdl
44 1.1 fvdl #ifndef _LOCORE
45 1.1 fvdl #include <machine/cpu.h>
46 1.1 fvdl #include <machine/pic.h>
47 1.1 fvdl
48 1.1 fvdl /*
49 1.1 fvdl * Struct describing an interrupt source for a CPU. struct cpu_info
50 1.1 fvdl * has an array of MAX_INTR_SOURCES of these. The index in the array
51 1.1 fvdl * is equal to the stub number of the stubcode as present in vector.s
52 1.1 fvdl *
53 1.1 fvdl * The primary CPU's array of interrupt sources has its first 16
54 1.1 fvdl * entries reserved for legacy ISA irq handlers. This means that
55 1.1 fvdl * they have a 1:1 mapping for arrayindex:irq_num. This is not
56 1.1 fvdl * true for interrupts that come in through IO APICs, to find
57 1.1 fvdl * their source, go through ci->ci_isources[index].is_pic
58 1.1 fvdl *
59 1.1 fvdl * It's possible to always maintain a 1:1 mapping, but that means
60 1.1 fvdl * limiting the total number of interrupt sources to MAX_INTR_SOURCES
61 1.1 fvdl * (32), instead of 32 per CPU. It also would mean that having multiple
62 1.1 fvdl * IO APICs which deliver interrupts from an equal pin number would
63 1.1 fvdl * overlap if they were to be sent to the same CPU.
64 1.1 fvdl */
65 1.1 fvdl
66 1.1 fvdl struct intrstub {
67 1.1 fvdl void *ist_entry;
68 1.1 fvdl void *ist_recurse;
69 1.1 fvdl void *ist_resume;
70 1.1 fvdl };
71 1.1 fvdl
72 1.1 fvdl struct intrsource {
73 1.1 fvdl int is_maxlevel; /* max. IPL for this source */
74 1.1 fvdl int is_pin; /* IRQ for legacy; pin for IO APIC */
75 1.1 fvdl struct intrhand *is_handlers; /* handler chain */
76 1.1 fvdl struct pic *is_pic; /* originating PIC */
77 1.1 fvdl void *is_recurse; /* entry for spllower */
78 1.1 fvdl void *is_resume; /* entry for doreti */
79 1.1 fvdl struct evcnt is_evcnt; /* interrupt counter */
80 1.1 fvdl char is_evname[32]; /* event counter name */
81 1.1 fvdl int is_flags; /* see below */
82 1.1 fvdl int is_type; /* level, edge */
83 1.1 fvdl int is_idtvec;
84 1.1 fvdl int is_minlevel;
85 1.1 fvdl };
86 1.1 fvdl
87 1.1 fvdl #define IS_LEGACY 0x0001 /* legacy ISA irq source */
88 1.1 fvdl #define IS_IPI 0x0002
89 1.1 fvdl #define IS_LOG 0x0004
90 1.1 fvdl
91 1.1 fvdl
92 1.1 fvdl /*
93 1.1 fvdl * Interrupt handler chains. *_intr_establish() insert a handler into
94 1.1 fvdl * the list. The handler is called with its (single) argument.
95 1.1 fvdl */
96 1.1 fvdl
97 1.1 fvdl struct intrhand {
98 1.1 fvdl int (*ih_fun)(void *);
99 1.1 fvdl void *ih_arg;
100 1.1 fvdl int ih_level;
101 1.1 fvdl struct intrhand *ih_next;
102 1.1 fvdl int ih_pin;
103 1.1 fvdl int ih_slot;
104 1.1 fvdl struct cpu_info *ih_cpu;
105 1.1 fvdl };
106 1.1 fvdl
107 1.1 fvdl #define IMASK(ci,level) (ci)->ci_imask[(level)]
108 1.1 fvdl #define IUNMASK(ci,level) (ci)->ci_iunmask[(level)]
109 1.1 fvdl
110 1.1 fvdl extern void Xspllower __P((int));
111 1.1 fvdl
112 1.1 fvdl static __inline int splraise __P((int));
113 1.1 fvdl static __inline void spllower __P((int));
114 1.1 fvdl static __inline void softintr __P((int));
115 1.1 fvdl
116 1.1 fvdl /*
117 1.1 fvdl * Convert spl level to local APIC level
118 1.1 fvdl */
119 1.1 fvdl #define APIC_LEVEL(l) ((l) << 4)
120 1.1 fvdl
121 1.1 fvdl /*
122 1.1 fvdl * compiler barrier: prevent reordering of instructions.
123 1.1 fvdl * XXX something similar will move to <sys/cdefs.h>
124 1.1 fvdl * or thereabouts.
125 1.1 fvdl * This prevents the compiler from reordering code around
126 1.1 fvdl * this "instruction", acting as a sequence point for code generation.
127 1.1 fvdl */
128 1.1 fvdl
129 1.1 fvdl #define __splbarrier() __asm __volatile("":::"memory")
130 1.1 fvdl
131 1.1 fvdl /*
132 1.1 fvdl * Add a mask to cpl, and return the old value of cpl.
133 1.1 fvdl */
134 1.1 fvdl static __inline int
135 1.1 fvdl splraise(int nlevel)
136 1.1 fvdl {
137 1.1 fvdl int olevel;
138 1.1 fvdl struct cpu_info *ci = curcpu();
139 1.1 fvdl
140 1.1 fvdl olevel = ci->ci_ilevel;
141 1.1 fvdl if (nlevel > olevel)
142 1.1 fvdl ci->ci_ilevel = nlevel;
143 1.1 fvdl __splbarrier();
144 1.1 fvdl return (olevel);
145 1.1 fvdl }
146 1.1 fvdl
147 1.1 fvdl /*
148 1.1 fvdl * Restore a value to cpl (unmasking interrupts). If any unmasked
149 1.1 fvdl * interrupts are pending, call Xspllower() to process them.
150 1.1 fvdl */
151 1.1 fvdl static __inline void
152 1.1 fvdl spllower(int nlevel)
153 1.1 fvdl {
154 1.1 fvdl struct cpu_info *ci = curcpu();
155 1.1 fvdl
156 1.1 fvdl __splbarrier();
157 1.1 fvdl /*
158 1.1 fvdl * Since this should only lower the interrupt level,
159 1.1 fvdl * the XOR below should only show interrupts that
160 1.1 fvdl * are being unmasked.
161 1.1 fvdl */
162 1.1 fvdl if (ci->ci_ipending & IUNMASK(ci,nlevel))
163 1.1 fvdl Xspllower(nlevel);
164 1.1 fvdl else
165 1.1 fvdl ci->ci_ilevel = nlevel;
166 1.1 fvdl }
167 1.1 fvdl
168 1.1 fvdl /*
169 1.1 fvdl * Hardware interrupt masks
170 1.1 fvdl */
171 1.1 fvdl #define splbio() splraise(IPL_BIO)
172 1.1 fvdl #define splnet() splraise(IPL_NET)
173 1.1 fvdl #define spltty() splraise(IPL_TTY)
174 1.1 fvdl #define splaudio() splraise(IPL_AUDIO)
175 1.1 fvdl #define splclock() splraise(IPL_CLOCK)
176 1.1 fvdl #define splstatclock() splclock()
177 1.1 fvdl #define splserial() splraise(IPL_SERIAL)
178 1.1 fvdl #define splipi() splraise(IPL_IPI)
179 1.1 fvdl
180 1.1 fvdl #define spllpt() spltty()
181 1.1 fvdl
182 1.1 fvdl #define SPL_ASSERT_BELOW(x) KDASSERT(curcpu()->ci_ilevel < (x))
183 1.1 fvdl #define spllpt() spltty()
184 1.1 fvdl
185 1.1 fvdl /*
186 1.1 fvdl * Software interrupt masks
187 1.1 fvdl *
188 1.1 fvdl * NOTE: splsoftclock() is used by hardclock() to lower the priority from
189 1.1 fvdl * clock to softclock before it calls softclock().
190 1.1 fvdl */
191 1.1 fvdl #define spllowersoftclock() spllower(IPL_SOFTCLOCK)
192 1.1 fvdl
193 1.1 fvdl #define splsoftclock() splraise(IPL_SOFTCLOCK)
194 1.1 fvdl #define splsoftnet() splraise(IPL_SOFTNET)
195 1.1 fvdl #define splsoftserial() splraise(IPL_SOFTSERIAL)
196 1.1 fvdl
197 1.1 fvdl /*
198 1.1 fvdl * Miscellaneous
199 1.1 fvdl */
200 1.3 thorpej #define splvm() splraise(IPL_VM)
201 1.1 fvdl #define splhigh() splraise(IPL_HIGH)
202 1.1 fvdl #define spl0() spllower(IPL_NONE)
203 1.1 fvdl #define splsched() splraise(IPL_SCHED)
204 1.1 fvdl #define spllock() splhigh()
205 1.1 fvdl #define splx(x) spllower(x)
206 1.1 fvdl
207 1.1 fvdl /*
208 1.1 fvdl * Software interrupt registration
209 1.1 fvdl *
210 1.1 fvdl * We hand-code this to ensure that it's atomic.
211 1.1 fvdl *
212 1.1 fvdl * XXX always scheduled on the current CPU.
213 1.1 fvdl */
214 1.1 fvdl static __inline void
215 1.1 fvdl softintr(int sir)
216 1.1 fvdl {
217 1.1 fvdl struct cpu_info *ci = curcpu();
218 1.1 fvdl
219 1.1 fvdl __asm __volatile("lock ; orl %1, %0" :
220 1.1 fvdl "=m"(ci->ci_ipending) : "ir" (1 << sir));
221 1.1 fvdl }
222 1.1 fvdl
223 1.1 fvdl /*
224 1.1 fvdl * XXX
225 1.1 fvdl */
226 1.1 fvdl #define setsoftnet() softintr(SIR_NET)
227 1.1 fvdl
228 1.1 fvdl /*
229 1.1 fvdl * Stub declarations.
230 1.1 fvdl */
231 1.1 fvdl
232 1.1 fvdl extern void Xsoftclock(void);
233 1.1 fvdl extern void Xsoftnet(void);
234 1.1 fvdl extern void Xsoftserial(void);
235 1.1 fvdl
236 1.1 fvdl extern struct intrstub i8259_stubs[];
237 1.2 fvdl extern struct intrstub ioapic_edge_stubs[];
238 1.2 fvdl extern struct intrstub ioapic_level_stubs[];
239 1.1 fvdl
240 1.1 fvdl struct cpu_info;
241 1.1 fvdl
242 1.1 fvdl extern char idt_allocmap[];
243 1.1 fvdl
244 1.1 fvdl void intr_default_setup(void);
245 1.1 fvdl int x86_nmi(void);
246 1.1 fvdl void intr_calculatemasks(struct cpu_info *);
247 1.1 fvdl int intr_allocate_slot_cpu(struct cpu_info *, struct pic *, int, int *);
248 1.1 fvdl int intr_allocate_slot(struct pic *, int, int, int, struct cpu_info **, int *,
249 1.1 fvdl int *);
250 1.1 fvdl void *intr_establish(int, struct pic *, int, int, int, int (*)(void *), void *);
251 1.1 fvdl void intr_disestablish(struct intrhand *);
252 1.1 fvdl void cpu_intr_init(struct cpu_info *);
253 1.1 fvdl int intr_find_mpmapping(int bus, int pin, int *handle);
254 1.1 fvdl #ifdef INTRDEBUG
255 1.1 fvdl void intr_printconfig(void);
256 1.1 fvdl #endif
257 1.1 fvdl
258 1.1 fvdl #ifdef MULTIPROCESSOR
259 1.1 fvdl int x86_send_ipi(struct cpu_info *, int);
260 1.1 fvdl void x86_broadcast_ipi(int);
261 1.1 fvdl void x86_multicast_ipi(int, int);
262 1.1 fvdl void x86_ipi_handler(void);
263 1.1 fvdl void x86_intlock(struct intrframe);
264 1.1 fvdl void x86_intunlock(struct intrframe);
265 1.1 fvdl void x86_softintlock(void);
266 1.1 fvdl void x86_softintunlock(void);
267 1.1 fvdl
268 1.1 fvdl extern void (*ipifunc[X86_NIPI])(struct cpu_info *);
269 1.1 fvdl #endif
270 1.1 fvdl
271 1.1 fvdl #endif /* !_LOCORE */
272 1.1 fvdl
273 1.1 fvdl /*
274 1.1 fvdl * Generic software interrupt support.
275 1.1 fvdl */
276 1.1 fvdl
277 1.1 fvdl #define X86_SOFTINTR_SOFTCLOCK 0
278 1.1 fvdl #define X86_SOFTINTR_SOFTNET 1
279 1.1 fvdl #define X86_SOFTINTR_SOFTSERIAL 2
280 1.1 fvdl #define X86_NSOFTINTR 3
281 1.1 fvdl
282 1.1 fvdl #ifndef _LOCORE
283 1.1 fvdl #include <sys/queue.h>
284 1.1 fvdl
285 1.1 fvdl struct x86_soft_intrhand {
286 1.1 fvdl TAILQ_ENTRY(x86_soft_intrhand)
287 1.1 fvdl sih_q;
288 1.1 fvdl struct x86_soft_intr *sih_intrhead;
289 1.1 fvdl void (*sih_fn)(void *);
290 1.1 fvdl void *sih_arg;
291 1.1 fvdl int sih_pending;
292 1.1 fvdl };
293 1.1 fvdl
294 1.1 fvdl struct x86_soft_intr {
295 1.1 fvdl TAILQ_HEAD(, x86_soft_intrhand)
296 1.1 fvdl softintr_q;
297 1.1 fvdl int softintr_ssir;
298 1.1 fvdl struct simplelock softintr_slock;
299 1.1 fvdl };
300 1.1 fvdl
301 1.1 fvdl #define x86_softintr_lock(si, s) \
302 1.1 fvdl do { \
303 1.1 fvdl (s) = splhigh(); \
304 1.1 fvdl simple_lock(&si->softintr_slock); \
305 1.1 fvdl } while (/*CONSTCOND*/ 0)
306 1.1 fvdl
307 1.1 fvdl #define x86_softintr_unlock(si, s) \
308 1.1 fvdl do { \
309 1.1 fvdl simple_unlock(&si->softintr_slock); \
310 1.1 fvdl splx((s)); \
311 1.1 fvdl } while (/*CONSTCOND*/ 0)
312 1.1 fvdl
313 1.1 fvdl void *softintr_establish(int, void (*)(void *), void *);
314 1.1 fvdl void softintr_disestablish(void *);
315 1.1 fvdl void softintr_init(void);
316 1.1 fvdl void softintr_dispatch(int);
317 1.1 fvdl
318 1.1 fvdl #define softintr_schedule(arg) \
319 1.1 fvdl do { \
320 1.1 fvdl struct x86_soft_intrhand *__sih = (arg); \
321 1.1 fvdl struct x86_soft_intr *__si = __sih->sih_intrhead; \
322 1.1 fvdl int __s; \
323 1.1 fvdl \
324 1.1 fvdl x86_softintr_lock(__si, __s); \
325 1.1 fvdl if (__sih->sih_pending == 0) { \
326 1.1 fvdl TAILQ_INSERT_TAIL(&__si->softintr_q, __sih, sih_q); \
327 1.1 fvdl __sih->sih_pending = 1; \
328 1.1 fvdl softintr(__si->softintr_ssir); \
329 1.1 fvdl } \
330 1.1 fvdl x86_softintr_unlock(__si, __s); \
331 1.1 fvdl } while (/*CONSTCOND*/ 0)
332 1.1 fvdl #endif /* _LOCORE */
333 1.1 fvdl
334 1.1 fvdl #endif /* !_X86_INTR_H_ */
335