intr.h revision 1.14 1 /* $NetBSD: intr.h,v 1.14 2004/10/23 21:24:05 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum, and by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #ifndef _X86_INTR_H_
40 #define _X86_INTR_H_
41
42 #ifdef _KERNEL_OPT
43 #include "opt_multiprocessor.h"
44 #endif
45
46 #include <machine/intrdefs.h>
47
48 #ifndef _LOCORE
49 #include <machine/cpu.h>
50 #include <machine/pic.h>
51
52 /*
53 * Struct describing an interrupt source for a CPU. struct cpu_info
54 * has an array of MAX_INTR_SOURCES of these. The index in the array
55 * is equal to the stub number of the stubcode as present in vector.s
56 *
57 * The primary CPU's array of interrupt sources has its first 16
58 * entries reserved for legacy ISA irq handlers. This means that
59 * they have a 1:1 mapping for arrayindex:irq_num. This is not
60 * true for interrupts that come in through IO APICs, to find
61 * their source, go through ci->ci_isources[index].is_pic
62 *
63 * It's possible to always maintain a 1:1 mapping, but that means
64 * limiting the total number of interrupt sources to MAX_INTR_SOURCES
65 * (32), instead of 32 per CPU. It also would mean that having multiple
66 * IO APICs which deliver interrupts from an equal pin number would
67 * overlap if they were to be sent to the same CPU.
68 */
69
70 struct intrstub {
71 void *ist_entry;
72 void *ist_recurse;
73 void *ist_resume;
74 };
75
76 struct intrsource {
77 int is_maxlevel; /* max. IPL for this source */
78 int is_pin; /* IRQ for legacy; pin for IO APIC */
79 struct intrhand *is_handlers; /* handler chain */
80 struct pic *is_pic; /* originating PIC */
81 void *is_recurse; /* entry for spllower */
82 void *is_resume; /* entry for doreti */
83 struct evcnt is_evcnt; /* interrupt counter */
84 char is_evname[32]; /* event counter name */
85 int is_flags; /* see below */
86 int is_type; /* level, edge */
87 int is_idtvec;
88 int is_minlevel;
89 };
90
91 #define IS_LEGACY 0x0001 /* legacy ISA irq source */
92 #define IS_IPI 0x0002
93 #define IS_LOG 0x0004
94
95
96 /*
97 * Interrupt handler chains. *_intr_establish() insert a handler into
98 * the list. The handler is called with its (single) argument.
99 */
100
101 struct intrhand {
102 int (*ih_fun)(void *);
103 void *ih_arg;
104 int ih_level;
105 int (*ih_realfun)(void *);
106 void *ih_realarg;
107 struct intrhand *ih_next;
108 int ih_pin;
109 int ih_slot;
110 struct cpu_info *ih_cpu;
111 };
112
113 #define IMASK(ci,level) (ci)->ci_imask[(level)]
114 #define IUNMASK(ci,level) (ci)->ci_iunmask[(level)]
115
116 extern void Xspllower(int);
117
118 static __inline int splraise(int);
119 static __inline void spllower(int);
120 static __inline void softintr(int);
121
122 /*
123 * Convert spl level to local APIC level
124 */
125 #define APIC_LEVEL(l) ((l) << 4)
126
127 /*
128 * compiler barrier: prevent reordering of instructions.
129 * XXX something similar will move to <sys/cdefs.h>
130 * or thereabouts.
131 * This prevents the compiler from reordering code around
132 * this "instruction", acting as a sequence point for code generation.
133 */
134
135 #define __splbarrier() __asm __volatile("":::"memory")
136
137 /*
138 * Add a mask to cpl, and return the old value of cpl.
139 */
140 static __inline int
141 splraise(int nlevel)
142 {
143 int olevel;
144 struct cpu_info *ci = curcpu();
145
146 olevel = ci->ci_ilevel;
147 if (nlevel > olevel)
148 ci->ci_ilevel = nlevel;
149 __splbarrier();
150 return (olevel);
151 }
152
153 /*
154 * Restore a value to cpl (unmasking interrupts). If any unmasked
155 * interrupts are pending, call Xspllower() to process them.
156 */
157 static __inline void
158 spllower(int nlevel)
159 {
160 struct cpu_info *ci = curcpu();
161 u_int32_t imask;
162 u_long psl;
163
164 __splbarrier();
165
166 imask = IUNMASK(ci, nlevel);
167 psl = read_psl();
168 disable_intr();
169 if (ci->ci_ipending & imask) {
170 Xspllower(nlevel);
171 /* Xspllower does enable_intr() */
172 } else {
173 ci->ci_ilevel = nlevel;
174 write_psl(psl);
175 }
176 }
177
178 /*
179 * Hardware interrupt masks
180 */
181 #define splbio() splraise(IPL_BIO)
182 #define splnet() splraise(IPL_NET)
183 #define spltty() splraise(IPL_TTY)
184 #define splaudio() splraise(IPL_AUDIO)
185 #define splclock() splraise(IPL_CLOCK)
186 #define splstatclock() splclock()
187 #define splserial() splraise(IPL_SERIAL)
188 #define splipi() splraise(IPL_IPI)
189
190 #define spllpt() spltty()
191
192 #define SPL_ASSERT_BELOW(x) KDASSERT(curcpu()->ci_ilevel < (x))
193 #define spllpt() spltty()
194
195 /*
196 * Software interrupt masks
197 *
198 * NOTE: spllowersoftclock() is used by hardclock() to lower the priority from
199 * clock to softclock before it calls softclock().
200 */
201 #define spllowersoftclock() spllower(IPL_SOFTCLOCK)
202
203 #define splsoftclock() splraise(IPL_SOFTCLOCK)
204 #define splsoftnet() splraise(IPL_SOFTNET)
205 #define splsoftserial() splraise(IPL_SOFTSERIAL)
206
207 /*
208 * Miscellaneous
209 */
210 #define splvm() splraise(IPL_VM)
211 #define splhigh() splraise(IPL_HIGH)
212 #define spl0() spllower(IPL_NONE)
213 #define splsched() splraise(IPL_SCHED)
214 #define spllock() splhigh()
215 #define splx(x) spllower(x)
216
217 /*
218 * Software interrupt registration
219 *
220 * We hand-code this to ensure that it's atomic.
221 *
222 * XXX always scheduled on the current CPU.
223 */
224 static __inline void
225 softintr(int sir)
226 {
227 struct cpu_info *ci = curcpu();
228
229 __asm __volatile("lock ; orl %1, %0" :
230 "=m"(ci->ci_ipending) : "ir" (1 << sir));
231 }
232
233 /*
234 * XXX
235 */
236 #define setsoftnet() softintr(SIR_NET)
237
238 /*
239 * Stub declarations.
240 */
241
242 extern void Xsoftclock(void);
243 extern void Xsoftnet(void);
244 extern void Xsoftserial(void);
245
246 extern struct intrstub i8259_stubs[];
247 extern struct intrstub ioapic_edge_stubs[];
248 extern struct intrstub ioapic_level_stubs[];
249
250 struct cpu_info;
251
252 extern char idt_allocmap[];
253
254 struct pcibus_attach_args;
255
256 void intr_default_setup(void);
257 int x86_nmi(void);
258 void intr_calculatemasks(struct cpu_info *);
259 int intr_allocate_slot_cpu(struct cpu_info *, struct pic *, int, int *);
260 int intr_allocate_slot(struct pic *, int, int, int, struct cpu_info **, int *,
261 int *);
262 void *intr_establish(int, struct pic *, int, int, int, int (*)(void *), void *);
263 void intr_disestablish(struct intrhand *);
264 void intr_add_pcibus(struct pcibus_attach_args *);
265 const char *intr_string(int);
266 void cpu_intr_init(struct cpu_info *);
267 int intr_find_mpmapping(int, int, int *);
268 #ifdef INTRDEBUG
269 void intr_printconfig(void);
270 #endif
271
272 #ifdef MULTIPROCESSOR
273 int x86_send_ipi(struct cpu_info *, int);
274 void x86_broadcast_ipi(int);
275 void x86_multicast_ipi(int, int);
276 void x86_ipi_handler(void);
277 void x86_intlock(struct intrframe *);
278 void x86_intunlock(struct intrframe *);
279 void x86_softintlock(void);
280 void x86_softintunlock(void);
281
282 extern void (*ipifunc[X86_NIPI])(struct cpu_info *);
283 #endif
284
285 #endif /* !_LOCORE */
286
287 /*
288 * Generic software interrupt support.
289 */
290
291 #define X86_SOFTINTR_SOFTCLOCK 0
292 #define X86_SOFTINTR_SOFTNET 1
293 #define X86_SOFTINTR_SOFTSERIAL 2
294 #define X86_NSOFTINTR 3
295
296 #ifndef _LOCORE
297 #include <sys/queue.h>
298
299 struct x86_soft_intrhand {
300 TAILQ_ENTRY(x86_soft_intrhand)
301 sih_q;
302 struct x86_soft_intr *sih_intrhead;
303 void (*sih_fn)(void *);
304 void *sih_arg;
305 int sih_pending;
306 };
307
308 struct x86_soft_intr {
309 TAILQ_HEAD(, x86_soft_intrhand)
310 softintr_q;
311 int softintr_ssir;
312 struct simplelock softintr_slock;
313 };
314
315 #define x86_softintr_lock(si, s) \
316 do { \
317 (s) = splhigh(); \
318 simple_lock(&si->softintr_slock); \
319 } while (/*CONSTCOND*/ 0)
320
321 #define x86_softintr_unlock(si, s) \
322 do { \
323 simple_unlock(&si->softintr_slock); \
324 splx((s)); \
325 } while (/*CONSTCOND*/ 0)
326
327 void *softintr_establish(int, void (*)(void *), void *);
328 void softintr_disestablish(void *);
329 void softintr_init(void);
330 void softintr_dispatch(int);
331
332 #define softintr_schedule(arg) \
333 do { \
334 struct x86_soft_intrhand *__sih = (arg); \
335 struct x86_soft_intr *__si = __sih->sih_intrhead; \
336 int __s; \
337 \
338 x86_softintr_lock(__si, __s); \
339 if (__sih->sih_pending == 0) { \
340 TAILQ_INSERT_TAIL(&__si->softintr_q, __sih, sih_q); \
341 __sih->sih_pending = 1; \
342 softintr(__si->softintr_ssir); \
343 } \
344 x86_softintr_unlock(__si, __s); \
345 } while (/*CONSTCOND*/ 0)
346 #endif /* _LOCORE */
347
348 #endif /* !_X86_INTR_H_ */
349