intr.h revision 1.23 1 1.23 ad /* $NetBSD: intr.h,v 1.23 2006/12/26 15:22:44 ad Exp $ */
2 1.1 fvdl
3 1.1 fvdl /*-
4 1.1 fvdl * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
5 1.1 fvdl * All rights reserved.
6 1.1 fvdl *
7 1.1 fvdl * This code is derived from software contributed to The NetBSD Foundation
8 1.1 fvdl * by Charles M. Hannum, and by Jason R. Thorpe.
9 1.1 fvdl *
10 1.1 fvdl * Redistribution and use in source and binary forms, with or without
11 1.1 fvdl * modification, are permitted provided that the following conditions
12 1.1 fvdl * are met:
13 1.1 fvdl * 1. Redistributions of source code must retain the above copyright
14 1.1 fvdl * notice, this list of conditions and the following disclaimer.
15 1.1 fvdl * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 fvdl * notice, this list of conditions and the following disclaimer in the
17 1.1 fvdl * documentation and/or other materials provided with the distribution.
18 1.1 fvdl * 3. All advertising materials mentioning features or use of this software
19 1.1 fvdl * must display the following acknowledgement:
20 1.1 fvdl * This product includes software developed by the NetBSD
21 1.1 fvdl * Foundation, Inc. and its contributors.
22 1.1 fvdl * 4. Neither the name of The NetBSD Foundation nor the names of its
23 1.1 fvdl * contributors may be used to endorse or promote products derived
24 1.1 fvdl * from this software without specific prior written permission.
25 1.1 fvdl *
26 1.1 fvdl * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 1.1 fvdl * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 1.1 fvdl * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.1 fvdl * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 1.1 fvdl * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 1.1 fvdl * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 1.1 fvdl * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 1.1 fvdl * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 1.1 fvdl * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 1.1 fvdl * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 1.1 fvdl * POSSIBILITY OF SUCH DAMAGE.
37 1.1 fvdl */
38 1.1 fvdl
39 1.1 fvdl #ifndef _X86_INTR_H_
40 1.1 fvdl #define _X86_INTR_H_
41 1.1 fvdl
42 1.5 martin #ifdef _KERNEL_OPT
43 1.4 martin #include "opt_multiprocessor.h"
44 1.5 martin #endif
45 1.5 martin
46 1.1 fvdl #include <machine/intrdefs.h>
47 1.1 fvdl
48 1.1 fvdl #ifndef _LOCORE
49 1.1 fvdl #include <machine/cpu.h>
50 1.1 fvdl #include <machine/pic.h>
51 1.1 fvdl
52 1.1 fvdl /*
53 1.1 fvdl * Struct describing an interrupt source for a CPU. struct cpu_info
54 1.1 fvdl * has an array of MAX_INTR_SOURCES of these. The index in the array
55 1.1 fvdl * is equal to the stub number of the stubcode as present in vector.s
56 1.1 fvdl *
57 1.1 fvdl * The primary CPU's array of interrupt sources has its first 16
58 1.1 fvdl * entries reserved for legacy ISA irq handlers. This means that
59 1.1 fvdl * they have a 1:1 mapping for arrayindex:irq_num. This is not
60 1.1 fvdl * true for interrupts that come in through IO APICs, to find
61 1.1 fvdl * their source, go through ci->ci_isources[index].is_pic
62 1.1 fvdl *
63 1.1 fvdl * It's possible to always maintain a 1:1 mapping, but that means
64 1.1 fvdl * limiting the total number of interrupt sources to MAX_INTR_SOURCES
65 1.1 fvdl * (32), instead of 32 per CPU. It also would mean that having multiple
66 1.1 fvdl * IO APICs which deliver interrupts from an equal pin number would
67 1.1 fvdl * overlap if they were to be sent to the same CPU.
68 1.1 fvdl */
69 1.1 fvdl
70 1.1 fvdl struct intrstub {
71 1.1 fvdl void *ist_entry;
72 1.1 fvdl void *ist_recurse;
73 1.1 fvdl void *ist_resume;
74 1.1 fvdl };
75 1.1 fvdl
76 1.1 fvdl struct intrsource {
77 1.1 fvdl int is_maxlevel; /* max. IPL for this source */
78 1.1 fvdl int is_pin; /* IRQ for legacy; pin for IO APIC */
79 1.1 fvdl struct intrhand *is_handlers; /* handler chain */
80 1.1 fvdl struct pic *is_pic; /* originating PIC */
81 1.1 fvdl void *is_recurse; /* entry for spllower */
82 1.1 fvdl void *is_resume; /* entry for doreti */
83 1.1 fvdl struct evcnt is_evcnt; /* interrupt counter */
84 1.1 fvdl char is_evname[32]; /* event counter name */
85 1.1 fvdl int is_flags; /* see below */
86 1.1 fvdl int is_type; /* level, edge */
87 1.1 fvdl int is_idtvec;
88 1.1 fvdl int is_minlevel;
89 1.1 fvdl };
90 1.1 fvdl
91 1.1 fvdl #define IS_LEGACY 0x0001 /* legacy ISA irq source */
92 1.1 fvdl #define IS_IPI 0x0002
93 1.1 fvdl #define IS_LOG 0x0004
94 1.1 fvdl
95 1.1 fvdl
96 1.1 fvdl /*
97 1.1 fvdl * Interrupt handler chains. *_intr_establish() insert a handler into
98 1.1 fvdl * the list. The handler is called with its (single) argument.
99 1.1 fvdl */
100 1.1 fvdl
101 1.1 fvdl struct intrhand {
102 1.1 fvdl int (*ih_fun)(void *);
103 1.1 fvdl void *ih_arg;
104 1.1 fvdl int ih_level;
105 1.14 yamt int (*ih_realfun)(void *);
106 1.14 yamt void *ih_realarg;
107 1.1 fvdl struct intrhand *ih_next;
108 1.1 fvdl int ih_pin;
109 1.1 fvdl int ih_slot;
110 1.1 fvdl struct cpu_info *ih_cpu;
111 1.1 fvdl };
112 1.1 fvdl
113 1.1 fvdl #define IMASK(ci,level) (ci)->ci_imask[(level)]
114 1.1 fvdl #define IUNMASK(ci,level) (ci)->ci_iunmask[(level)]
115 1.1 fvdl
116 1.9 junyoung extern void Xspllower(int);
117 1.1 fvdl
118 1.20 perry static __inline int splraise(int);
119 1.20 perry static __inline void spllower(int);
120 1.20 perry static __inline void softintr(int);
121 1.1 fvdl
122 1.1 fvdl /*
123 1.1 fvdl * Convert spl level to local APIC level
124 1.1 fvdl */
125 1.1 fvdl #define APIC_LEVEL(l) ((l) << 4)
126 1.1 fvdl
127 1.1 fvdl /*
128 1.1 fvdl * Add a mask to cpl, and return the old value of cpl.
129 1.1 fvdl */
130 1.20 perry static __inline int
131 1.1 fvdl splraise(int nlevel)
132 1.1 fvdl {
133 1.1 fvdl int olevel;
134 1.1 fvdl struct cpu_info *ci = curcpu();
135 1.1 fvdl
136 1.1 fvdl olevel = ci->ci_ilevel;
137 1.1 fvdl if (nlevel > olevel)
138 1.1 fvdl ci->ci_ilevel = nlevel;
139 1.15 yamt __insn_barrier();
140 1.1 fvdl return (olevel);
141 1.1 fvdl }
142 1.1 fvdl
143 1.1 fvdl /*
144 1.1 fvdl * Restore a value to cpl (unmasking interrupts). If any unmasked
145 1.1 fvdl * interrupts are pending, call Xspllower() to process them.
146 1.1 fvdl */
147 1.20 perry static __inline void
148 1.1 fvdl spllower(int nlevel)
149 1.1 fvdl {
150 1.1 fvdl struct cpu_info *ci = curcpu();
151 1.13 fvdl u_int32_t imask;
152 1.13 fvdl u_long psl;
153 1.1 fvdl
154 1.15 yamt __insn_barrier();
155 1.13 fvdl
156 1.13 fvdl imask = IUNMASK(ci, nlevel);
157 1.13 fvdl psl = read_psl();
158 1.13 fvdl disable_intr();
159 1.13 fvdl if (ci->ci_ipending & imask) {
160 1.1 fvdl Xspllower(nlevel);
161 1.13 fvdl /* Xspllower does enable_intr() */
162 1.13 fvdl } else {
163 1.13 fvdl ci->ci_ilevel = nlevel;
164 1.13 fvdl write_psl(psl);
165 1.13 fvdl }
166 1.1 fvdl }
167 1.1 fvdl
168 1.1 fvdl #define SPL_ASSERT_BELOW(x) KDASSERT(curcpu()->ci_ilevel < (x))
169 1.1 fvdl
170 1.1 fvdl /*
171 1.1 fvdl * Software interrupt masks
172 1.1 fvdl *
173 1.12 dbj * NOTE: spllowersoftclock() is used by hardclock() to lower the priority from
174 1.1 fvdl * clock to softclock before it calls softclock().
175 1.1 fvdl */
176 1.1 fvdl #define spllowersoftclock() spllower(IPL_SOFTCLOCK)
177 1.1 fvdl
178 1.1 fvdl /*
179 1.1 fvdl * Miscellaneous
180 1.1 fvdl */
181 1.1 fvdl #define spl0() spllower(IPL_NONE)
182 1.1 fvdl #define splx(x) spllower(x)
183 1.1 fvdl
184 1.23 ad typedef uint8_t ipl_t;
185 1.22 yamt typedef struct {
186 1.22 yamt ipl_t _ipl;
187 1.22 yamt } ipl_cookie_t;
188 1.22 yamt
189 1.22 yamt static inline ipl_cookie_t
190 1.22 yamt makeiplcookie(ipl_t ipl)
191 1.22 yamt {
192 1.22 yamt
193 1.22 yamt return (ipl_cookie_t){._ipl = ipl};
194 1.22 yamt }
195 1.22 yamt
196 1.22 yamt static inline int
197 1.22 yamt splraiseipl(ipl_cookie_t icookie)
198 1.22 yamt {
199 1.22 yamt
200 1.22 yamt return splraise(icookie._ipl);
201 1.22 yamt }
202 1.22 yamt
203 1.18 yamt #include <sys/spl.h>
204 1.18 yamt
205 1.1 fvdl /*
206 1.1 fvdl * Software interrupt registration
207 1.1 fvdl *
208 1.1 fvdl * We hand-code this to ensure that it's atomic.
209 1.1 fvdl *
210 1.1 fvdl * XXX always scheduled on the current CPU.
211 1.1 fvdl */
212 1.20 perry static __inline void
213 1.1 fvdl softintr(int sir)
214 1.1 fvdl {
215 1.1 fvdl struct cpu_info *ci = curcpu();
216 1.1 fvdl
217 1.19 perry __asm volatile("lock ; orl %1, %0" :
218 1.1 fvdl "=m"(ci->ci_ipending) : "ir" (1 << sir));
219 1.1 fvdl }
220 1.1 fvdl
221 1.1 fvdl /*
222 1.1 fvdl * XXX
223 1.1 fvdl */
224 1.1 fvdl #define setsoftnet() softintr(SIR_NET)
225 1.1 fvdl
226 1.1 fvdl /*
227 1.1 fvdl * Stub declarations.
228 1.1 fvdl */
229 1.1 fvdl
230 1.1 fvdl extern void Xsoftclock(void);
231 1.1 fvdl extern void Xsoftnet(void);
232 1.1 fvdl extern void Xsoftserial(void);
233 1.1 fvdl
234 1.1 fvdl extern struct intrstub i8259_stubs[];
235 1.2 fvdl extern struct intrstub ioapic_edge_stubs[];
236 1.2 fvdl extern struct intrstub ioapic_level_stubs[];
237 1.1 fvdl
238 1.1 fvdl struct cpu_info;
239 1.1 fvdl
240 1.1 fvdl extern char idt_allocmap[];
241 1.1 fvdl
242 1.10 fvdl struct pcibus_attach_args;
243 1.10 fvdl
244 1.1 fvdl void intr_default_setup(void);
245 1.1 fvdl int x86_nmi(void);
246 1.1 fvdl void intr_calculatemasks(struct cpu_info *);
247 1.1 fvdl int intr_allocate_slot_cpu(struct cpu_info *, struct pic *, int, int *);
248 1.1 fvdl int intr_allocate_slot(struct pic *, int, int, int, struct cpu_info **, int *,
249 1.1 fvdl int *);
250 1.1 fvdl void *intr_establish(int, struct pic *, int, int, int, int (*)(void *), void *);
251 1.1 fvdl void intr_disestablish(struct intrhand *);
252 1.10 fvdl void intr_add_pcibus(struct pcibus_attach_args *);
253 1.7 fvdl const char *intr_string(int);
254 1.1 fvdl void cpu_intr_init(struct cpu_info *);
255 1.10 fvdl int intr_find_mpmapping(int, int, int *);
256 1.21 christos struct pic *intr_findpic(int);
257 1.1 fvdl #ifdef INTRDEBUG
258 1.1 fvdl void intr_printconfig(void);
259 1.1 fvdl #endif
260 1.1 fvdl
261 1.1 fvdl #ifdef MULTIPROCESSOR
262 1.1 fvdl int x86_send_ipi(struct cpu_info *, int);
263 1.1 fvdl void x86_broadcast_ipi(int);
264 1.1 fvdl void x86_multicast_ipi(int, int);
265 1.1 fvdl void x86_ipi_handler(void);
266 1.6 fvdl void x86_intlock(struct intrframe *);
267 1.6 fvdl void x86_intunlock(struct intrframe *);
268 1.1 fvdl void x86_softintlock(void);
269 1.1 fvdl void x86_softintunlock(void);
270 1.1 fvdl
271 1.1 fvdl extern void (*ipifunc[X86_NIPI])(struct cpu_info *);
272 1.1 fvdl #endif
273 1.1 fvdl
274 1.1 fvdl #endif /* !_LOCORE */
275 1.1 fvdl
276 1.1 fvdl /*
277 1.1 fvdl * Generic software interrupt support.
278 1.1 fvdl */
279 1.1 fvdl
280 1.1 fvdl #define X86_SOFTINTR_SOFTCLOCK 0
281 1.1 fvdl #define X86_SOFTINTR_SOFTNET 1
282 1.1 fvdl #define X86_SOFTINTR_SOFTSERIAL 2
283 1.1 fvdl #define X86_NSOFTINTR 3
284 1.1 fvdl
285 1.1 fvdl #ifndef _LOCORE
286 1.1 fvdl #include <sys/queue.h>
287 1.1 fvdl
288 1.1 fvdl struct x86_soft_intrhand {
289 1.1 fvdl TAILQ_ENTRY(x86_soft_intrhand)
290 1.1 fvdl sih_q;
291 1.1 fvdl struct x86_soft_intr *sih_intrhead;
292 1.1 fvdl void (*sih_fn)(void *);
293 1.1 fvdl void *sih_arg;
294 1.1 fvdl int sih_pending;
295 1.1 fvdl };
296 1.1 fvdl
297 1.1 fvdl struct x86_soft_intr {
298 1.1 fvdl TAILQ_HEAD(, x86_soft_intrhand)
299 1.1 fvdl softintr_q;
300 1.1 fvdl int softintr_ssir;
301 1.1 fvdl struct simplelock softintr_slock;
302 1.1 fvdl };
303 1.1 fvdl
304 1.1 fvdl #define x86_softintr_lock(si, s) \
305 1.1 fvdl do { \
306 1.1 fvdl (s) = splhigh(); \
307 1.1 fvdl simple_lock(&si->softintr_slock); \
308 1.1 fvdl } while (/*CONSTCOND*/ 0)
309 1.1 fvdl
310 1.1 fvdl #define x86_softintr_unlock(si, s) \
311 1.1 fvdl do { \
312 1.1 fvdl simple_unlock(&si->softintr_slock); \
313 1.1 fvdl splx((s)); \
314 1.1 fvdl } while (/*CONSTCOND*/ 0)
315 1.1 fvdl
316 1.1 fvdl void *softintr_establish(int, void (*)(void *), void *);
317 1.1 fvdl void softintr_disestablish(void *);
318 1.1 fvdl void softintr_init(void);
319 1.1 fvdl void softintr_dispatch(int);
320 1.1 fvdl
321 1.1 fvdl #define softintr_schedule(arg) \
322 1.1 fvdl do { \
323 1.1 fvdl struct x86_soft_intrhand *__sih = (arg); \
324 1.1 fvdl struct x86_soft_intr *__si = __sih->sih_intrhead; \
325 1.1 fvdl int __s; \
326 1.1 fvdl \
327 1.1 fvdl x86_softintr_lock(__si, __s); \
328 1.1 fvdl if (__sih->sih_pending == 0) { \
329 1.1 fvdl TAILQ_INSERT_TAIL(&__si->softintr_q, __sih, sih_q); \
330 1.1 fvdl __sih->sih_pending = 1; \
331 1.1 fvdl softintr(__si->softintr_ssir); \
332 1.1 fvdl } \
333 1.1 fvdl x86_softintr_unlock(__si, __s); \
334 1.1 fvdl } while (/*CONSTCOND*/ 0)
335 1.1 fvdl #endif /* _LOCORE */
336 1.1 fvdl
337 1.1 fvdl #endif /* !_X86_INTR_H_ */
338