marvell_intr.h revision 1.4 1 1.4 matt /* $NetBSD: marvell_intr.h,v 1.4 2003/03/17 16:54:16 matt Exp $ */
2 1.1 matt
3 1.1 matt /*-
4 1.1 matt * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 1.1 matt * All rights reserved.
6 1.1 matt *
7 1.1 matt * This code is derived from software contributed to The NetBSD Foundation
8 1.1 matt * by Charles M. Hannum.
9 1.1 matt *
10 1.1 matt * Redistribution and use in source and binary forms, with or without
11 1.1 matt * modification, are permitted provided that the following conditions
12 1.1 matt * are met:
13 1.1 matt * 1. Redistributions of source code must retain the above copyright
14 1.1 matt * notice, this list of conditions and the following disclaimer.
15 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 matt * notice, this list of conditions and the following disclaimer in the
17 1.1 matt * documentation and/or other materials provided with the distribution.
18 1.1 matt * 3. All advertising materials mentioning features or use of this software
19 1.1 matt * must display the following acknowledgement:
20 1.1 matt * This product includes software developed by the NetBSD
21 1.1 matt * Foundation, Inc. and its contributors.
22 1.1 matt * 4. Neither the name of The NetBSD Foundation nor the names of its
23 1.1 matt * contributors may be used to endorse or promote products derived
24 1.1 matt * from this software without specific prior written permission.
25 1.1 matt *
26 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 1.1 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 1.1 matt * POSSIBILITY OF SUCH DAMAGE.
37 1.1 matt */
38 1.1 matt
39 1.1 matt #ifndef _MVPPPC_INTR_H_
40 1.1 matt #define _MVPPPC_INTR_H_
41 1.1 matt
42 1.1 matt /*
43 1.1 matt * Interrupt Priority Levels
44 1.1 matt */
45 1.1 matt #define IPL_NONE 0 /* nothing */
46 1.1 matt #define IPL_SOFTCLOCK 1 /* timeouts */
47 1.1 matt #define IPL_SOFTNET 2 /* protocol stacks */
48 1.1 matt #define IPL_BIO 3 /* block I/O */
49 1.1 matt #define IPL_NET 4 /* network */
50 1.1 matt #define IPL_NCP 5 /* network processors */
51 1.1 matt #define IPL_SOFTI2C 6 /* i2c */
52 1.1 matt #define IPL_SOFTSERIAL 7 /* serial */
53 1.1 matt #define IPL_TTY 8 /* terminal */
54 1.1 matt #define IPL_AUDIO 9 /* boom box */
55 1.1 matt #define IPL_EJECT 10 /* card eject */
56 1.1 matt #define IPL_GTERR 10 /* GT-64260 errors */
57 1.1 matt #define IPL_I2C 11 /* i2c */
58 1.1 matt #define IPL_VM 12 /* memory allocation */
59 1.1 matt #define IPL_SERIAL 13 /* serial */
60 1.1 matt #define IPL_CLOCK 14 /* clock */
61 1.1 matt #define IPL_SCHED 14 /* schedular */
62 1.1 matt #define IPL_LOCK 14 /* same as high for now */
63 1.1 matt #define IPL_HIGH 15 /* everything */
64 1.1 matt #define NIPL 16
65 1.1 matt #define IPL_PRIMASK 0xf
66 1.1 matt #define IPL_EE 0x10 /* enable external interrupts on splx */
67 1.1 matt
68 1.1 matt /* Interrupt sharing types. */
69 1.1 matt #define IST_NONE 0 /* none */
70 1.1 matt #define IST_PULSE 1 /* pulsed */
71 1.1 matt #define IST_EDGE 2 /* edge-triggered */
72 1.1 matt #define IST_LEVEL 3 /* level-triggered */
73 1.1 matt #define IST_SOFT 4 /* software-triggered */
74 1.1 matt #define IST_CLOCK 5 /* exclusive for clock */
75 1.1 matt #define NIST 6
76 1.1 matt
77 1.1 matt #if !defined(_LOCORE) && defined(_KERNEL)
78 1.1 matt
79 1.1 matt /*
80 1.1 matt * we support 128 IRQs:
81 1.1 matt * 96 (ICU_LEN) hard interrupt IRQs:
82 1.1 matt * - 64 Main Cause IRQs,
83 1.1 matt * - 32 GPP IRQs,
84 1.1 matt * and 32 softint IRQs
85 1.1 matt */
86 1.1 matt #define ICU_LEN 96 /* number of HW IRQs */
87 1.1 matt #define IRQ_GPP_BASE 64 /* base of GPP IRQs */
88 1.1 matt #define IRQ_GPP_SUM (32+24) /* GPP[7..0] interrupt */ /* XXX */
89 1.1 matt #define NIRQ 128 /* total # of HW IRQs */
90 1.1 matt
91 1.1 matt #define IMASK_ICU_LO 0
92 1.1 matt #define IMASK_ICU_HI 1
93 1.1 matt #define IMASK_ICU_GPP 2
94 1.1 matt #define IMASK_SOFTINT 3
95 1.1 matt #define IMASK_WORDSHIFT 5 /* log2(32) */
96 1.1 matt #define IMASK_BITMASK ~((~0) << IMASK_WORDSHIFT)
97 1.1 matt
98 1.1 matt #define IRQ_IS_GPP(irq) ((irq >= IRQ_GPP_BASE) && (irq < ICU_LEN))
99 1.1 matt
100 1.1 matt /*
101 1.1 matt * interrupt mask bit vector
102 1.1 matt */
103 1.1 matt typedef u_int32_t imask_t[4] __attribute__ ((aligned(16)));
104 1.1 matt
105 1.1 matt static inline void imask_zero __P((imask_t *));
106 1.1 matt static inline void imask_zero_v __P((volatile imask_t *));
107 1.1 matt static inline void imask_dup_v __P((imask_t *, volatile imask_t *));
108 1.1 matt static inline void imask_and __P((imask_t *, imask_t *));
109 1.1 matt static inline void imask_andnot_v __P((volatile imask_t *, imask_t *));
110 1.1 matt static inline void imask_andnot_icu_vv __P((volatile imask_t *, volatile imask_t *));
111 1.1 matt static inline int imask_empty __P((imask_t *));
112 1.1 matt static inline void imask_orbit __P((imask_t *, int));
113 1.1 matt static inline void imask_orbit_v __P((volatile imask_t *, int));
114 1.1 matt static inline void imask_clrbit __P((imask_t *, int));
115 1.1 matt static inline void imask_clrbit_v __P((volatile imask_t *, int));
116 1.1 matt static inline int imask_test_v __P((volatile imask_t *, imask_t *));
117 1.1 matt
118 1.1 matt static inline void
119 1.1 matt imask_zero(imask_t *idp)
120 1.1 matt {
121 1.1 matt (*idp)[IMASK_ICU_LO] = 0;
122 1.1 matt (*idp)[IMASK_ICU_HI] = 0;
123 1.1 matt (*idp)[IMASK_ICU_GPP] = 0;
124 1.1 matt (*idp)[IMASK_SOFTINT] = 0;
125 1.1 matt }
126 1.1 matt
127 1.1 matt static inline void
128 1.1 matt imask_zero_v(volatile imask_t *idp)
129 1.1 matt {
130 1.1 matt (*idp)[IMASK_ICU_LO] = 0;
131 1.1 matt (*idp)[IMASK_ICU_HI] = 0;
132 1.1 matt (*idp)[IMASK_ICU_GPP] = 0;
133 1.1 matt (*idp)[IMASK_SOFTINT] = 0;
134 1.1 matt }
135 1.1 matt
136 1.1 matt static inline void
137 1.1 matt imask_dup_v(imask_t *idp, volatile imask_t *isp)
138 1.1 matt {
139 1.1 matt (*idp)[IMASK_ICU_LO] = (*isp)[IMASK_ICU_LO];
140 1.1 matt (*idp)[IMASK_ICU_HI] = (*isp)[IMASK_ICU_HI];
141 1.1 matt (*idp)[IMASK_ICU_GPP] = (*isp)[IMASK_ICU_GPP];
142 1.1 matt (*idp)[IMASK_SOFTINT] = (*isp)[IMASK_SOFTINT];
143 1.1 matt }
144 1.1 matt
145 1.1 matt static inline void
146 1.1 matt imask_and(imask_t *idp, imask_t *isp)
147 1.1 matt {
148 1.1 matt (*idp)[IMASK_ICU_LO] &= (*isp)[IMASK_ICU_LO];
149 1.1 matt (*idp)[IMASK_ICU_HI] &= (*isp)[IMASK_ICU_HI];
150 1.1 matt (*idp)[IMASK_ICU_GPP] &= (*isp)[IMASK_ICU_GPP];
151 1.1 matt (*idp)[IMASK_SOFTINT] &= (*isp)[IMASK_SOFTINT];
152 1.1 matt }
153 1.1 matt
154 1.1 matt static inline void
155 1.1 matt imask_andnot_v(volatile imask_t *idp, imask_t *isp)
156 1.1 matt {
157 1.1 matt (*idp)[IMASK_ICU_LO] &= ~(*isp)[IMASK_ICU_LO];
158 1.1 matt (*idp)[IMASK_ICU_HI] &= ~(*isp)[IMASK_ICU_HI];
159 1.1 matt (*idp)[IMASK_ICU_GPP] &= ~(*isp)[IMASK_ICU_GPP];
160 1.1 matt (*idp)[IMASK_SOFTINT] &= ~(*isp)[IMASK_SOFTINT];
161 1.1 matt }
162 1.1 matt
163 1.1 matt static inline void
164 1.1 matt imask_andnot_icu_vv(volatile imask_t *idp, volatile imask_t *isp)
165 1.1 matt {
166 1.1 matt (*idp)[IMASK_ICU_LO] &= ~(*isp)[IMASK_ICU_LO];
167 1.1 matt (*idp)[IMASK_ICU_HI] &= ~(*isp)[IMASK_ICU_HI];
168 1.1 matt (*idp)[IMASK_ICU_GPP] &= ~(*isp)[IMASK_ICU_GPP];
169 1.1 matt }
170 1.1 matt
171 1.1 matt static inline int
172 1.1 matt imask_empty(imask_t *isp)
173 1.1 matt {
174 1.1 matt return (! ((*isp)[IMASK_ICU_LO] ||
175 1.1 matt (*isp)[IMASK_ICU_HI] ||
176 1.1 matt (*isp)[IMASK_ICU_GPP]||
177 1.1 matt (*isp)[IMASK_SOFTINT]));
178 1.1 matt }
179 1.1 matt
180 1.1 matt static inline void
181 1.1 matt imask_orbit(imask_t *idp, int bitno)
182 1.1 matt {
183 1.1 matt (*idp)[bitno>>IMASK_WORDSHIFT] |= (1 << (bitno&IMASK_BITMASK));
184 1.1 matt }
185 1.1 matt
186 1.1 matt static inline void
187 1.1 matt imask_orbit_v(volatile imask_t *idp, int bitno)
188 1.1 matt {
189 1.1 matt (*idp)[bitno>>IMASK_WORDSHIFT] |= (1 << (bitno&IMASK_BITMASK));
190 1.1 matt }
191 1.1 matt
192 1.1 matt static inline void
193 1.1 matt imask_clrbit(imask_t *idp, int bitno)
194 1.1 matt {
195 1.1 matt (*idp)[bitno>>IMASK_WORDSHIFT] &= ~(1 << (bitno&IMASK_BITMASK));
196 1.1 matt }
197 1.1 matt
198 1.1 matt static inline void
199 1.1 matt imask_clrbit_v(volatile imask_t *idp, int bitno)
200 1.1 matt {
201 1.1 matt (*idp)[bitno>>IMASK_WORDSHIFT] &= ~(1 << (bitno&IMASK_BITMASK));
202 1.1 matt }
203 1.1 matt
204 1.1 matt static inline u_int32_t
205 1.1 matt imask_andbit_v(volatile imask_t *idp, int bitno)
206 1.1 matt {
207 1.1 matt return (*idp)[bitno>>IMASK_WORDSHIFT] & (1 << (bitno&IMASK_BITMASK));
208 1.1 matt }
209 1.1 matt
210 1.1 matt static inline int
211 1.1 matt imask_test_v(volatile imask_t *idp, imask_t *isp)
212 1.1 matt {
213 1.1 matt return (((*idp)[IMASK_ICU_LO] & (*isp)[IMASK_ICU_LO]) ||
214 1.1 matt ((*idp)[IMASK_ICU_HI] & (*isp)[IMASK_ICU_HI]) ||
215 1.1 matt ((*idp)[IMASK_ICU_GPP] & (*isp)[IMASK_ICU_GPP])||
216 1.1 matt ((*idp)[IMASK_SOFTINT] & (*isp)[IMASK_SOFTINT]));
217 1.1 matt }
218 1.1 matt
219 1.1 matt #ifdef EXT_INTR_STATS
220 1.1 matt /*
221 1.1 matt * ISR timing stats
222 1.1 matt */
223 1.1 matt
224 1.1 matt typedef struct ext_intr_hist {
225 1.1 matt u_int64_t tcause;
226 1.1 matt u_int64_t tcommit;
227 1.1 matt u_int64_t tstart;
228 1.1 matt u_int64_t tfin;
229 1.1 matt } ext_intr_hist_t __attribute__ ((aligned(32)));
230 1.1 matt
231 1.1 matt typedef struct ext_intr_stat {
232 1.1 matt struct ext_intr_hist *histp;
233 1.1 matt unsigned int histix;
234 1.1 matt u_int64_t cnt;
235 1.1 matt u_int64_t sum;
236 1.1 matt u_int64_t min;
237 1.1 matt u_int64_t max;
238 1.1 matt u_int64_t pnd;
239 1.1 matt u_int64_t borrowed;
240 1.1 matt struct ext_intr_stat *save;
241 1.1 matt unsigned long preempted[NIRQ]; /* XXX */
242 1.1 matt } ext_intr_stat_t __attribute__ ((aligned(32)));
243 1.1 matt
244 1.1 matt extern int intr_depth_max;
245 1.1 matt extern int ext_intr_stats_enb;
246 1.1 matt extern ext_intr_stat_t ext_intr_stats[];
247 1.1 matt extern ext_intr_stat_t *ext_intr_statp;
248 1.1 matt
249 1.1 matt extern void ext_intr_stats_init __P((void));
250 1.1 matt extern void ext_intr_stats_cause
251 1.1 matt __P((u_int32_t, u_int32_t, u_int32_t, u_int32_t));
252 1.1 matt extern void ext_intr_stats_pend
253 1.1 matt __P((u_int32_t, u_int32_t, u_int32_t, u_int32_t));
254 1.1 matt extern void ext_intr_stats_commit __P((imask_t *));
255 1.1 matt extern void ext_intr_stats_commit_m __P((imask_t *));
256 1.1 matt extern void ext_intr_stats_commit_irq __P((u_int));
257 1.1 matt extern u_int64_t ext_intr_stats_pre __P((int));
258 1.1 matt extern void ext_intr_stats_post __P((int, u_int64_t));
259 1.1 matt
260 1.1 matt #define EXT_INTR_STATS_INIT() ext_intr_stats_init()
261 1.1 matt #define EXT_INTR_STATS_CAUSE(l, h, g, s) ext_intr_stats_cause(l, h, g, s)
262 1.1 matt #define EXT_INTR_STATS_COMMIT_M(m) ext_intr_stats_commit_m(m)
263 1.1 matt #define EXT_INTR_STATS_COMMIT_IRQ(i) ext_intr_stats_commit_irq(i)
264 1.1 matt #define EXT_INTR_STATS_DECL(t) u_int64_t t
265 1.1 matt #define EXT_INTR_STATS_PRE(i, t) t = ext_intr_stats_pre(i)
266 1.1 matt #define EXT_INTR_STATS_POST(i, t) ext_intr_stats_post(i, t)
267 1.1 matt #define EXT_INTR_STATS_PEND(l, h, g, s) ext_intr_stats_pend(l, h, g, s)
268 1.1 matt #define EXT_INTR_STATS_PEND_IRQ(i) ext_intr_stats[i].pnd++
269 1.1 matt #define EXT_INTR_STATS_DEPTH() \
270 1.1 matt intr_depth_max = (intr_depth > intr_depth_max) ? \
271 1.1 matt intr_depth : intr_depth_max
272 1.1 matt
273 1.1 matt #else /* EXT_INTR_STATS */
274 1.1 matt
275 1.1 matt #define EXT_INTR_STATS_INIT()
276 1.1 matt #define EXT_INTR_STATS_CAUSE(l, h, g, s)
277 1.1 matt #define EXT_INTR_STATS_COMMIT_M(m)
278 1.1 matt #define EXT_INTR_STATS_COMMIT_IRQ(i)
279 1.1 matt #define EXT_INTR_STATS_DECL(t)
280 1.1 matt #define EXT_INTR_STATS_PRE(irq, t)
281 1.1 matt #define EXT_INTR_STATS_POST(i, t)
282 1.1 matt #define EXT_INTR_STATS_PEND(l, h, g, s)
283 1.1 matt #define EXT_INTR_STATS_PEND_IRQ(i)
284 1.1 matt #define EXT_INTR_STATS_DEPTH()
285 1.1 matt
286 1.1 matt #endif /* EXT_INTR_STATS */
287 1.1 matt
288 1.1 matt
289 1.1 matt #ifdef SPL_STATS
290 1.1 matt typedef struct spl_hist {
291 1.1 matt int level;
292 1.1 matt void *addr;
293 1.1 matt u_int64_t time;
294 1.1 matt } spl_hist_t;
295 1.1 matt
296 1.1 matt extern void spl_stats_init();
297 1.1 matt extern void spl_stats_log();
298 1.1 matt extern unsigned int spl_stats_enb;
299 1.1 matt
300 1.1 matt #define SPL_STATS_INIT() spl_stats_init()
301 1.1 matt #define SPL_STATS_LOG(ipl, cc) spl_stats_log((ipl), (cc))
302 1.1 matt
303 1.1 matt #else
304 1.1 matt
305 1.1 matt #define SPL_STATS_INIT()
306 1.1 matt #define SPL_STATS_LOG(ipl, cc)
307 1.1 matt
308 1.1 matt #endif /* SPL_STATS */
309 1.1 matt
310 1.1 matt
311 1.1 matt void setsoftclock __P((void));
312 1.1 matt void clearsoftclock __P((void));
313 1.1 matt int splsoftclock __P((void));
314 1.1 matt void setsoftnet __P((void));
315 1.1 matt void clearsoftnet __P((void));
316 1.1 matt int splsoftnet __P((void));
317 1.1 matt
318 1.1 matt void intr_dispatch __P((void));
319 1.1 matt #ifdef SPL_INLINE
320 1.1 matt static __inline int splraise __P((int));
321 1.1 matt static __inline int spllower __P((int));
322 1.1 matt static __inline void splx __P((int));
323 1.1 matt #else
324 1.1 matt extern int splraise __P((int));
325 1.1 matt extern int spllower __P((int));
326 1.1 matt extern void splx __P((int));
327 1.1 matt #endif
328 1.1 matt
329 1.1 matt extern volatile int tickspending;
330 1.1 matt
331 1.1 matt extern volatile imask_t ipending;
332 1.1 matt extern imask_t imask[];
333 1.1 matt
334 1.1 matt /*
335 1.1 matt * inlines for manipulating PSL_EE
336 1.1 matt */
337 1.1 matt static __inline void
338 1.1 matt extintr_restore(register_t omsr)
339 1.1 matt {
340 1.1 matt __asm __volatile ("sync; mtmsr %0;" :: "r"(omsr));
341 1.1 matt }
342 1.1 matt
343 1.1 matt static __inline register_t
344 1.1 matt extintr_enable(void)
345 1.1 matt {
346 1.1 matt register_t omsr;
347 1.1 matt
348 1.1 matt __asm __volatile("sync;");
349 1.1 matt __asm __volatile("mfmsr %0;" : "=r"(omsr));
350 1.1 matt __asm __volatile("mtmsr %0;" :: "r"(omsr | PSL_EE));
351 1.1 matt
352 1.1 matt return omsr;
353 1.1 matt }
354 1.1 matt
355 1.1 matt static __inline register_t
356 1.1 matt extintr_disable(void)
357 1.1 matt {
358 1.1 matt register_t omsr;
359 1.1 matt
360 1.1 matt __asm __volatile("mfmsr %0;" : "=r"(omsr));
361 1.1 matt __asm __volatile("mtmsr %0;" :: "r"(omsr & ~PSL_EE));
362 1.1 matt __asm __volatile("isync;");
363 1.1 matt
364 1.1 matt return omsr;
365 1.1 matt }
366 1.1 matt
367 1.1 matt #ifdef SPL_INLINE
368 1.1 matt static __inline int
369 1.1 matt splraise(int ncpl)
370 1.1 matt {
371 1.1 matt int ocpl;
372 1.1 matt register_t omsr;
373 1.1 matt
374 1.1 matt omsr = extintr_disable();
375 1.1 matt ocpl = cpl;
376 1.1 matt if (ncpl > cpl) {
377 1.1 matt SPL_STATS_LOG(ncpl, 0);
378 1.1 matt cpl = ncpl;
379 1.1 matt if ((ncpl == IPL_HIGH) && ((omsr & PSL_EE) != 0)) {
380 1.1 matt /* leave external interrupts disabled */
381 1.1 matt return (ocpl | IPL_EE);
382 1.1 matt }
383 1.1 matt }
384 1.1 matt extintr_restore(omsr);
385 1.1 matt return (ocpl);
386 1.1 matt }
387 1.1 matt
388 1.1 matt static __inline void
389 1.1 matt splx(int xcpl)
390 1.1 matt {
391 1.1 matt imask_t *ncplp;
392 1.1 matt register_t omsr;
393 1.1 matt int ncpl = xcpl & IPL_PRIMASK;
394 1.1 matt
395 1.1 matt ncplp = &imask[ncpl];
396 1.1 matt
397 1.1 matt omsr = extintr_disable();
398 1.1 matt if (ncpl < cpl) {
399 1.1 matt cpl = ncpl;
400 1.1 matt SPL_STATS_LOG(ncpl, 0);
401 1.1 matt if (imask_test_v(&ipending, ncplp))
402 1.1 matt intr_dispatch();
403 1.1 matt }
404 1.1 matt if (xcpl & IPL_EE)
405 1.1 matt omsr |= PSL_EE;
406 1.1 matt extintr_restore(omsr);
407 1.1 matt }
408 1.1 matt
409 1.1 matt static __inline int
410 1.1 matt spllower(int ncpl)
411 1.1 matt {
412 1.1 matt int ocpl;
413 1.1 matt imask_t *ncplp;
414 1.1 matt register_t omsr;
415 1.1 matt
416 1.1 matt ncpl &= IPL_PRIMASK;
417 1.1 matt ncplp = &imask[ncpl];
418 1.1 matt
419 1.1 matt omsr = extintr_disable();
420 1.1 matt ocpl = cpl;
421 1.1 matt cpl = ncpl;
422 1.1 matt SPL_STATS_LOG(ncpl, 0);
423 1.1 matt #ifdef EXT_INTR_STATS
424 1.1 matt ext_intr_statp = 0;
425 1.1 matt #endif
426 1.1 matt if (imask_test_v(&ipending, ncplp))
427 1.1 matt intr_dispatch();
428 1.1 matt
429 1.1 matt if (ncpl < IPL_HIGH)
430 1.1 matt omsr |= PSL_EE;
431 1.1 matt extintr_restore(omsr);
432 1.1 matt
433 1.1 matt return (ocpl);
434 1.1 matt }
435 1.1 matt #endif /* SPL_INLINE */
436 1.1 matt
437 1.1 matt
438 1.1 matt /*
439 1.1 matt * Soft interrupt IRQs
440 1.1 matt * see also intrnames[] in locore.S
441 1.1 matt */
442 1.1 matt #define SIR_BASE (NIRQ-32)
443 1.1 matt #define SIR_SOFTCLOCK (NIRQ-5)
444 1.4 matt #define SIR_SOFTNET (NIRQ-4)
445 1.4 matt #define SIR_SOFTI2C (NIRQ-3)
446 1.4 matt #define SIR_SOFTSERIAL (NIRQ-2)
447 1.1 matt #define SIR_HWCLOCK (NIRQ-1)
448 1.4 matt #define SIR_RES ~(SIBIT(SIR_SOFTCLOCK)|\
449 1.4 matt SIBIT(SIR_SOFTNET)|\
450 1.4 matt SIBIT(SIR_SOFTI2C)|\
451 1.4 matt SIBIT(SIR_SOFTSERIAL)|\
452 1.4 matt SIBIT(SIR_HWCLOCK))
453 1.1 matt
454 1.1 matt /*
455 1.1 matt * standard hardware interrupt spl's
456 1.1 matt */
457 1.1 matt #define splbio() splraise(IPL_BIO)
458 1.1 matt #define splnet() splraise(IPL_NET)
459 1.1 matt #define spltty() splraise(IPL_TTY)
460 1.1 matt #define splaudio() splraise(IPL_AUDIO)
461 1.1 matt #define splsched() splraise(IPL_SCHED)
462 1.1 matt #define splclock() splraise(IPL_CLOCK)
463 1.1 matt #define splstatclock() splclock()
464 1.1 matt #define splserial() splraise(IPL_SERIAL)
465 1.1 matt
466 1.1 matt #define spllpt() spltty()
467 1.1 matt
468 1.1 matt /*
469 1.1 matt * Software interrupt spl's
470 1.1 matt *
471 1.1 matt * NOTE: splsoftclock() is used by hardclock() to lower the priority from
472 1.1 matt * clock to softclock before it calls softclock().
473 1.1 matt */
474 1.1 matt #define spllowersoftclock() spllower(IPL_SOFTCLOCK)
475 1.1 matt #define splsoftclock() splraise(IPL_SOFTCLOCK)
476 1.1 matt #define splsoftnet() splraise(IPL_SOFTNET)
477 1.1 matt #define splsoftserial() splraise(IPL_SOFTSERIAL)
478 1.1 matt
479 1.4 matt #define __HAVE_GENERIC_SOFT_INTERRUPTS /* should be in <machine/types.h> */
480 1.4 matt void *softintr_establish(int level, void (*fun)(void *), void *arg);
481 1.4 matt void softintr_disestablish(void *cookie);
482 1.4 matt void softintr_schedule(void *cookie);
483 1.4 matt
484 1.4 matt
485 1.1 matt /*
486 1.1 matt * Miscellaneous
487 1.1 matt */
488 1.1 matt #define splvm() splraise(IPL_VM)
489 1.1 matt #define spllock() splraise(IPL_LOCK)
490 1.1 matt #define splhigh() splraise(IPL_HIGH)
491 1.1 matt #define spl0() spllower(IPL_NONE)
492 1.1 matt
493 1.1 matt #define SIBIT(ipl) (1 << ((ipl) - SIR_BASE))
494 1.4 matt #if 0
495 1.1 matt #define setsoftclock() softintr(SIBIT(SIR_SOFTCLOCK))
496 1.4 matt #define setsoftnet() softintr(SIBIT(SIR_SOFTNET))
497 1.4 matt #define setsoftserial() softintr(SIBIT(SIR_SOFTSERIAL))
498 1.4 matt #define setsofti2c() softintr(SIBIT(SIR_SOFTI2C))
499 1.4 matt #endif
500 1.1 matt
501 1.4 matt extern void *softnet_si;
502 1.1 matt void *intr_establish(int, int, int, int (*)(void *), void *);
503 1.1 matt void intr_disestablish(void *);
504 1.1 matt void init_interrupt(void);
505 1.1 matt const char * intr_typename(int);
506 1.1 matt const char * intr_string(int);
507 1.1 matt const struct evcnt * intr_evcnt(int);
508 1.1 matt void ext_intr(struct intrframe *);
509 1.1 matt
510 1.4 matt #if 0
511 1.1 matt void softserial(void);
512 1.4 matt #endif
513 1.1 matt void strayintr(int);
514 1.2 matt
515 1.2 matt #define schednetisr(isr) do { \
516 1.2 matt __asm __volatile( \
517 1.2 matt "1: lwarx 0,0,%1\n" \
518 1.2 matt " or 0,0,%0\n" \
519 1.2 matt " stwcx. 0,0,%1\n" \
520 1.2 matt " bne- 1b" \
521 1.2 matt : \
522 1.3 matt : "r"(1 << (isr)), "b"(&netisr) \
523 1.2 matt : "cr0", "r0"); \
524 1.4 matt softintr_schedule(softnet_si); \
525 1.2 matt } while (/*CONSTCOND*/ 0)
526 1.1 matt
527 1.1 matt /*
528 1.1 matt * defines for indexing intrcnt
529 1.1 matt */
530 1.1 matt #define CNT_IRQ0 0
531 1.1 matt #define CNT_CLOCK SIR_HWCLOCK
532 1.1 matt #define CNT_SOFTCLOCK SIR_SOFTCLOCK
533 1.1 matt #define CNT_SOFTNET SIR_NET
534 1.1 matt #define CNT_SOFTSERIAL SIR_SOFTSERIAL
535 1.1 matt #define CNT_SOFTI2C SIR_I2C
536 1.1 matt
537 1.1 matt #endif /* !_LOCORE */
538 1.1 matt
539 1.1 matt #endif /* _MVPPPC_INTR_H_ */
540