marvell_intr.h revision 1.1 1 /* $NetBSD: marvell_intr.h,v 1.1 2003/03/05 22:08:28 matt Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #ifndef _MVPPPC_INTR_H_
40 #define _MVPPPC_INTR_H_
41
42 /*
43 * Interrupt Priority Levels
44 */
45 #define IPL_NONE 0 /* nothing */
46 #define IPL_SOFTCLOCK 1 /* timeouts */
47 #define IPL_SOFTNET 2 /* protocol stacks */
48 #define IPL_BIO 3 /* block I/O */
49 #define IPL_NET 4 /* network */
50 #define IPL_NCP 5 /* network processors */
51 #define IPL_SOFTI2C 6 /* i2c */
52 #define IPL_SOFTSERIAL 7 /* serial */
53 #define IPL_TTY 8 /* terminal */
54 #define IPL_AUDIO 9 /* boom box */
55 #define IPL_EJECT 10 /* card eject */
56 #define IPL_GTERR 10 /* GT-64260 errors */
57 #define IPL_I2C 11 /* i2c */
58 #define IPL_VM 12 /* memory allocation */
59 #define IPL_SERIAL 13 /* serial */
60 #define IPL_CLOCK 14 /* clock */
61 #define IPL_SCHED 14 /* schedular */
62 #define IPL_LOCK 14 /* same as high for now */
63 #define IPL_HIGH 15 /* everything */
64 #define NIPL 16
65 #define IPL_PRIMASK 0xf
66 #define IPL_EE 0x10 /* enable external interrupts on splx */
67
68 /* Interrupt sharing types. */
69 #define IST_NONE 0 /* none */
70 #define IST_PULSE 1 /* pulsed */
71 #define IST_EDGE 2 /* edge-triggered */
72 #define IST_LEVEL 3 /* level-triggered */
73 #define IST_SOFT 4 /* software-triggered */
74 #define IST_CLOCK 5 /* exclusive for clock */
75 #define NIST 6
76
77 #if !defined(_LOCORE) && defined(_KERNEL)
78
79 /*
80 * we support 128 IRQs:
81 * 96 (ICU_LEN) hard interrupt IRQs:
82 * - 64 Main Cause IRQs,
83 * - 32 GPP IRQs,
84 * and 32 softint IRQs
85 */
86 #define ICU_LEN 96 /* number of HW IRQs */
87 #define IRQ_GPP_BASE 64 /* base of GPP IRQs */
88 #define IRQ_GPP_SUM (32+24) /* GPP[7..0] interrupt */ /* XXX */
89 #define NIRQ 128 /* total # of HW IRQs */
90
91 #define IMASK_ICU_LO 0
92 #define IMASK_ICU_HI 1
93 #define IMASK_ICU_GPP 2
94 #define IMASK_SOFTINT 3
95 #define IMASK_WORDSHIFT 5 /* log2(32) */
96 #define IMASK_BITMASK ~((~0) << IMASK_WORDSHIFT)
97
98 #define IRQ_IS_GPP(irq) ((irq >= IRQ_GPP_BASE) && (irq < ICU_LEN))
99
100 /*
101 * interrupt mask bit vector
102 */
103 typedef u_int32_t imask_t[4] __attribute__ ((aligned(16)));
104
105 static inline void imask_zero __P((imask_t *));
106 static inline void imask_zero_v __P((volatile imask_t *));
107 static inline void imask_dup_v __P((imask_t *, volatile imask_t *));
108 static inline void imask_and __P((imask_t *, imask_t *));
109 static inline void imask_andnot_v __P((volatile imask_t *, imask_t *));
110 static inline void imask_andnot_icu_vv __P((volatile imask_t *, volatile imask_t *));
111 static inline int imask_empty __P((imask_t *));
112 static inline void imask_orbit __P((imask_t *, int));
113 static inline void imask_orbit_v __P((volatile imask_t *, int));
114 static inline void imask_clrbit __P((imask_t *, int));
115 static inline void imask_clrbit_v __P((volatile imask_t *, int));
116 static inline int imask_test_v __P((volatile imask_t *, imask_t *));
117
118 static inline void
119 imask_zero(imask_t *idp)
120 {
121 (*idp)[IMASK_ICU_LO] = 0;
122 (*idp)[IMASK_ICU_HI] = 0;
123 (*idp)[IMASK_ICU_GPP] = 0;
124 (*idp)[IMASK_SOFTINT] = 0;
125 }
126
127 static inline void
128 imask_zero_v(volatile imask_t *idp)
129 {
130 (*idp)[IMASK_ICU_LO] = 0;
131 (*idp)[IMASK_ICU_HI] = 0;
132 (*idp)[IMASK_ICU_GPP] = 0;
133 (*idp)[IMASK_SOFTINT] = 0;
134 }
135
136 static inline void
137 imask_dup_v(imask_t *idp, volatile imask_t *isp)
138 {
139 (*idp)[IMASK_ICU_LO] = (*isp)[IMASK_ICU_LO];
140 (*idp)[IMASK_ICU_HI] = (*isp)[IMASK_ICU_HI];
141 (*idp)[IMASK_ICU_GPP] = (*isp)[IMASK_ICU_GPP];
142 (*idp)[IMASK_SOFTINT] = (*isp)[IMASK_SOFTINT];
143 }
144
145 static inline void
146 imask_and(imask_t *idp, imask_t *isp)
147 {
148 (*idp)[IMASK_ICU_LO] &= (*isp)[IMASK_ICU_LO];
149 (*idp)[IMASK_ICU_HI] &= (*isp)[IMASK_ICU_HI];
150 (*idp)[IMASK_ICU_GPP] &= (*isp)[IMASK_ICU_GPP];
151 (*idp)[IMASK_SOFTINT] &= (*isp)[IMASK_SOFTINT];
152 }
153
154 static inline void
155 imask_andnot_v(volatile imask_t *idp, imask_t *isp)
156 {
157 (*idp)[IMASK_ICU_LO] &= ~(*isp)[IMASK_ICU_LO];
158 (*idp)[IMASK_ICU_HI] &= ~(*isp)[IMASK_ICU_HI];
159 (*idp)[IMASK_ICU_GPP] &= ~(*isp)[IMASK_ICU_GPP];
160 (*idp)[IMASK_SOFTINT] &= ~(*isp)[IMASK_SOFTINT];
161 }
162
163 static inline void
164 imask_andnot_icu_vv(volatile imask_t *idp, volatile imask_t *isp)
165 {
166 (*idp)[IMASK_ICU_LO] &= ~(*isp)[IMASK_ICU_LO];
167 (*idp)[IMASK_ICU_HI] &= ~(*isp)[IMASK_ICU_HI];
168 (*idp)[IMASK_ICU_GPP] &= ~(*isp)[IMASK_ICU_GPP];
169 }
170
171 static inline int
172 imask_empty(imask_t *isp)
173 {
174 return (! ((*isp)[IMASK_ICU_LO] ||
175 (*isp)[IMASK_ICU_HI] ||
176 (*isp)[IMASK_ICU_GPP]||
177 (*isp)[IMASK_SOFTINT]));
178 }
179
180 static inline void
181 imask_orbit(imask_t *idp, int bitno)
182 {
183 (*idp)[bitno>>IMASK_WORDSHIFT] |= (1 << (bitno&IMASK_BITMASK));
184 }
185
186 static inline void
187 imask_orbit_v(volatile imask_t *idp, int bitno)
188 {
189 (*idp)[bitno>>IMASK_WORDSHIFT] |= (1 << (bitno&IMASK_BITMASK));
190 }
191
192 static inline void
193 imask_clrbit(imask_t *idp, int bitno)
194 {
195 (*idp)[bitno>>IMASK_WORDSHIFT] &= ~(1 << (bitno&IMASK_BITMASK));
196 }
197
198 static inline void
199 imask_clrbit_v(volatile imask_t *idp, int bitno)
200 {
201 (*idp)[bitno>>IMASK_WORDSHIFT] &= ~(1 << (bitno&IMASK_BITMASK));
202 }
203
204 static inline u_int32_t
205 imask_andbit_v(volatile imask_t *idp, int bitno)
206 {
207 return (*idp)[bitno>>IMASK_WORDSHIFT] & (1 << (bitno&IMASK_BITMASK));
208 }
209
210 static inline int
211 imask_test_v(volatile imask_t *idp, imask_t *isp)
212 {
213 return (((*idp)[IMASK_ICU_LO] & (*isp)[IMASK_ICU_LO]) ||
214 ((*idp)[IMASK_ICU_HI] & (*isp)[IMASK_ICU_HI]) ||
215 ((*idp)[IMASK_ICU_GPP] & (*isp)[IMASK_ICU_GPP])||
216 ((*idp)[IMASK_SOFTINT] & (*isp)[IMASK_SOFTINT]));
217 }
218
219 #ifdef EXT_INTR_STATS
220 /*
221 * ISR timing stats
222 */
223
224 typedef struct ext_intr_hist {
225 u_int64_t tcause;
226 u_int64_t tcommit;
227 u_int64_t tstart;
228 u_int64_t tfin;
229 } ext_intr_hist_t __attribute__ ((aligned(32)));
230
231 typedef struct ext_intr_stat {
232 struct ext_intr_hist *histp;
233 unsigned int histix;
234 u_int64_t cnt;
235 u_int64_t sum;
236 u_int64_t min;
237 u_int64_t max;
238 u_int64_t pnd;
239 u_int64_t borrowed;
240 struct ext_intr_stat *save;
241 unsigned long preempted[NIRQ]; /* XXX */
242 } ext_intr_stat_t __attribute__ ((aligned(32)));
243
244 extern int intr_depth_max;
245 extern int ext_intr_stats_enb;
246 extern ext_intr_stat_t ext_intr_stats[];
247 extern ext_intr_stat_t *ext_intr_statp;
248
249 extern void ext_intr_stats_init __P((void));
250 extern void ext_intr_stats_cause
251 __P((u_int32_t, u_int32_t, u_int32_t, u_int32_t));
252 extern void ext_intr_stats_pend
253 __P((u_int32_t, u_int32_t, u_int32_t, u_int32_t));
254 extern void ext_intr_stats_commit __P((imask_t *));
255 extern void ext_intr_stats_commit_m __P((imask_t *));
256 extern void ext_intr_stats_commit_irq __P((u_int));
257 extern u_int64_t ext_intr_stats_pre __P((int));
258 extern void ext_intr_stats_post __P((int, u_int64_t));
259
260 #define EXT_INTR_STATS_INIT() ext_intr_stats_init()
261 #define EXT_INTR_STATS_CAUSE(l, h, g, s) ext_intr_stats_cause(l, h, g, s)
262 #define EXT_INTR_STATS_COMMIT_M(m) ext_intr_stats_commit_m(m)
263 #define EXT_INTR_STATS_COMMIT_IRQ(i) ext_intr_stats_commit_irq(i)
264 #define EXT_INTR_STATS_DECL(t) u_int64_t t
265 #define EXT_INTR_STATS_PRE(i, t) t = ext_intr_stats_pre(i)
266 #define EXT_INTR_STATS_POST(i, t) ext_intr_stats_post(i, t)
267 #define EXT_INTR_STATS_PEND(l, h, g, s) ext_intr_stats_pend(l, h, g, s)
268 #define EXT_INTR_STATS_PEND_IRQ(i) ext_intr_stats[i].pnd++
269 #define EXT_INTR_STATS_DEPTH() \
270 intr_depth_max = (intr_depth > intr_depth_max) ? \
271 intr_depth : intr_depth_max
272
273 #else /* EXT_INTR_STATS */
274
275 #define EXT_INTR_STATS_INIT()
276 #define EXT_INTR_STATS_CAUSE(l, h, g, s)
277 #define EXT_INTR_STATS_COMMIT_M(m)
278 #define EXT_INTR_STATS_COMMIT_IRQ(i)
279 #define EXT_INTR_STATS_DECL(t)
280 #define EXT_INTR_STATS_PRE(irq, t)
281 #define EXT_INTR_STATS_POST(i, t)
282 #define EXT_INTR_STATS_PEND(l, h, g, s)
283 #define EXT_INTR_STATS_PEND_IRQ(i)
284 #define EXT_INTR_STATS_DEPTH()
285
286 #endif /* EXT_INTR_STATS */
287
288
289 #ifdef SPL_STATS
290 typedef struct spl_hist {
291 int level;
292 void *addr;
293 u_int64_t time;
294 } spl_hist_t;
295
296 extern void spl_stats_init();
297 extern void spl_stats_log();
298 extern unsigned int spl_stats_enb;
299
300 #define SPL_STATS_INIT() spl_stats_init()
301 #define SPL_STATS_LOG(ipl, cc) spl_stats_log((ipl), (cc))
302
303 #else
304
305 #define SPL_STATS_INIT()
306 #define SPL_STATS_LOG(ipl, cc)
307
308 #endif /* SPL_STATS */
309
310
311 void setsoftclock __P((void));
312 void clearsoftclock __P((void));
313 int splsoftclock __P((void));
314 void setsoftnet __P((void));
315 void clearsoftnet __P((void));
316 int splsoftnet __P((void));
317
318 void intr_dispatch __P((void));
319 #ifdef SPL_INLINE
320 static __inline int splraise __P((int));
321 static __inline int spllower __P((int));
322 static __inline void splx __P((int));
323 static __inline void softintr __P((int));
324 #else
325 extern int splraise __P((int));
326 extern int spllower __P((int));
327 extern void splx __P((int));
328 extern void softintr __P((int));
329 #endif
330
331 extern volatile int tickspending;
332
333 extern volatile imask_t ipending;
334 extern imask_t imask[];
335
336 /*
337 * inlines for manipulating PSL_EE
338 */
339 static __inline void
340 extintr_restore(register_t omsr)
341 {
342 __asm __volatile ("sync; mtmsr %0;" :: "r"(omsr));
343 }
344
345 static __inline register_t
346 extintr_enable(void)
347 {
348 register_t omsr;
349
350 __asm __volatile("sync;");
351 __asm __volatile("mfmsr %0;" : "=r"(omsr));
352 __asm __volatile("mtmsr %0;" :: "r"(omsr | PSL_EE));
353
354 return omsr;
355 }
356
357 static __inline register_t
358 extintr_disable(void)
359 {
360 register_t omsr;
361
362 __asm __volatile("mfmsr %0;" : "=r"(omsr));
363 __asm __volatile("mtmsr %0;" :: "r"(omsr & ~PSL_EE));
364 __asm __volatile("isync;");
365
366 return omsr;
367 }
368
369 #ifdef SPL_INLINE
370 static __inline int
371 splraise(int ncpl)
372 {
373 int ocpl;
374 register_t omsr;
375
376 omsr = extintr_disable();
377 ocpl = cpl;
378 if (ncpl > cpl) {
379 SPL_STATS_LOG(ncpl, 0);
380 cpl = ncpl;
381 if ((ncpl == IPL_HIGH) && ((omsr & PSL_EE) != 0)) {
382 /* leave external interrupts disabled */
383 return (ocpl | IPL_EE);
384 }
385 }
386 extintr_restore(omsr);
387 return (ocpl);
388 }
389
390 static __inline void
391 splx(int xcpl)
392 {
393 imask_t *ncplp;
394 register_t omsr;
395 int ncpl = xcpl & IPL_PRIMASK;
396
397 ncplp = &imask[ncpl];
398
399 omsr = extintr_disable();
400 if (ncpl < cpl) {
401 cpl = ncpl;
402 SPL_STATS_LOG(ncpl, 0);
403 if (imask_test_v(&ipending, ncplp))
404 intr_dispatch();
405 }
406 if (xcpl & IPL_EE)
407 omsr |= PSL_EE;
408 extintr_restore(omsr);
409 }
410
411 static __inline int
412 spllower(int ncpl)
413 {
414 int ocpl;
415 imask_t *ncplp;
416 register_t omsr;
417
418 ncpl &= IPL_PRIMASK;
419 ncplp = &imask[ncpl];
420
421 omsr = extintr_disable();
422 ocpl = cpl;
423 cpl = ncpl;
424 SPL_STATS_LOG(ncpl, 0);
425 #ifdef EXT_INTR_STATS
426 ext_intr_statp = 0;
427 #endif
428 if (imask_test_v(&ipending, ncplp))
429 intr_dispatch();
430
431 if (ncpl < IPL_HIGH)
432 omsr |= PSL_EE;
433 extintr_restore(omsr);
434
435 return (ocpl);
436 }
437
438 static __inline void
439 softintr(int sibit)
440 {
441 register_t omsr;
442
443 omsr = extintr_disable();
444 ipending[IMASK_SOFTINT] |= sibit;
445 extintr_restore(omsr);
446 }
447 #endif /* SPL_INLINE */
448
449
450 /*
451 * Soft interrupt IRQs
452 * see also intrnames[] in locore.S
453 */
454 #define SIR_BASE (NIRQ-32)
455 #define SIR_SOFTCLOCK (NIRQ-5)
456 #define SIR_NET (NIRQ-4)
457 #define SIR_I2C (NIRQ-3)
458 #define SIR_SERIAL (NIRQ-2)
459 #define SIR_HWCLOCK (NIRQ-1)
460 #define SIR_RES ~(SIBIT(SIR_SOFTCLOCK)|SIBIT(SIR_NET)| \
461 SIBIT(SIR_I2C)|SIBIT(SIR_SERIAL)|SIBIT(SIR_HWCLOCK))
462
463 /*
464 * standard hardware interrupt spl's
465 */
466 #define splbio() splraise(IPL_BIO)
467 #define splnet() splraise(IPL_NET)
468 #define spltty() splraise(IPL_TTY)
469 #define splaudio() splraise(IPL_AUDIO)
470 #define splsched() splraise(IPL_SCHED)
471 #define splclock() splraise(IPL_CLOCK)
472 #define splstatclock() splclock()
473 #define splserial() splraise(IPL_SERIAL)
474
475 #define spllpt() spltty()
476
477 /*
478 * Software interrupt spl's
479 *
480 * NOTE: splsoftclock() is used by hardclock() to lower the priority from
481 * clock to softclock before it calls softclock().
482 */
483 #define spllowersoftclock() spllower(IPL_SOFTCLOCK)
484 #define splsoftclock() splraise(IPL_SOFTCLOCK)
485 #define splsoftnet() splraise(IPL_SOFTNET)
486 #define splsoftserial() splraise(IPL_SOFTSERIAL)
487
488 /*
489 * Miscellaneous
490 */
491 #define splvm() splraise(IPL_VM)
492 #define spllock() splraise(IPL_LOCK)
493 #define splhigh() splraise(IPL_HIGH)
494 #define spl0() spllower(IPL_NONE)
495
496 #define SIBIT(ipl) (1 << ((ipl) - SIR_BASE))
497 #define setsoftclock() softintr(SIBIT(SIR_SOFTCLOCK))
498 #define setsoftnet() softintr(SIBIT(SIR_NET))
499 #define setsoftserial() softintr(SIBIT(SIR_SERIAL))
500 #define setsofti2c() softintr(SIBIT(SIR_I2C))
501
502 extern volatile int intrcnt[];
503 void *intr_establish(int, int, int, int (*)(void *), void *);
504 void intr_disestablish(void *);
505 void init_interrupt(void);
506 const char * intr_typename(int);
507 const char * intr_string(int);
508 const struct evcnt * intr_evcnt(int);
509 void ext_intr(struct intrframe *);
510
511 void softserial(void);
512 void strayintr(int);
513
514 /*
515 * defines for indexing intrcnt
516 */
517 #define CNT_IRQ0 0
518 #define CNT_CLOCK SIR_HWCLOCK
519 #define CNT_SOFTCLOCK SIR_SOFTCLOCK
520 #define CNT_SOFTNET SIR_NET
521 #define CNT_SOFTSERIAL SIR_SOFTSERIAL
522 #define CNT_SOFTI2C SIR_I2C
523
524 #endif /* !_LOCORE */
525
526 #endif /* _MVPPPC_INTR_H_ */
527