marvell_intr.h revision 1.11 1 /* $NetBSD: marvell_intr.h,v 1.11 2006/12/21 15:55:24 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #ifndef _MVPPPC_INTR_H_
40 #define _MVPPPC_INTR_H_
41
42 /*
43 * Interrupt Priority Levels
44 */
45 #define IPL_NONE 0 /* nothing */
46 #define IPL_SOFTCLOCK 1 /* timeouts */
47 #define IPL_SOFTNET 2 /* protocol stacks */
48 #define IPL_BIO 3 /* block I/O */
49 #define IPL_NET 4 /* network */
50 #define IPL_NCP 5 /* network processors */
51 #define IPL_SOFTI2C 6 /* i2c */
52 #define IPL_SOFTSERIAL 7 /* serial */
53 #define IPL_TTY 8 /* terminal */
54 #define IPL_LPT IPL_TTY
55 #define IPL_AUDIO 9 /* boom box */
56 #define IPL_EJECT 10 /* card eject */
57 #define IPL_GTERR 10 /* GT-64260 errors */
58 #define IPL_I2C 11 /* i2c */
59 #define IPL_VM 12 /* memory allocation */
60 #define IPL_SERIAL 13 /* serial */
61 #define IPL_CLOCK 14 /* clock */
62 #define IPL_STATCLOCK IPL_CLOCK
63 #define IPL_SCHED 14 /* schedular */
64 #define IPL_LOCK 14 /* same as high for now */
65 #define IPL_HIGH 15 /* everything */
66 #define NIPL 16
67 #define IPL_PRIMASK 0xf
68 #define IPL_EE 0x10 /* enable external interrupts on splx */
69
70 /* Interrupt sharing types. */
71 #define IST_NONE 0 /* none */
72 #define IST_PULSE 1 /* pulsed */
73 #define IST_EDGE 2 /* edge-triggered */
74 #define IST_LEVEL 3 /* level-triggered */
75 #define IST_SOFT 4 /* software-triggered */
76 #define IST_CLOCK 5 /* exclusive for clock */
77 #define NIST 6
78
79 #if !defined(_LOCORE) && defined(_KERNEL)
80
81 #define CLKF_BASEPRI(frame) ((frame)->pri == IPL_NONE)
82
83 /*
84 * we support 128 IRQs:
85 * 96 (ICU_LEN) hard interrupt IRQs:
86 * - 64 Main Cause IRQs,
87 * - 32 GPP IRQs,
88 * and 32 softint IRQs
89 */
90 #define ICU_LEN 96 /* number of HW IRQs */
91 #define IRQ_GPP_BASE 64 /* base of GPP IRQs */
92 #define IRQ_GPP_SUM (32+24) /* GPP[7..0] interrupt */ /* XXX */
93 #define NIRQ 128 /* total # of HW IRQs */
94
95 #define IMASK_ICU_LO 0
96 #define IMASK_ICU_HI 1
97 #define IMASK_ICU_GPP 2
98 #define IMASK_SOFTINT 3
99 #define IMASK_WORDSHIFT 5 /* log2(32) */
100 #define IMASK_BITMASK ~((~0) << IMASK_WORDSHIFT)
101
102 #define IRQ_IS_GPP(irq) ((irq >= IRQ_GPP_BASE) && (irq < ICU_LEN))
103
104 /*
105 * interrupt mask bit vector
106 */
107 typedef struct {
108 u_int32_t bits[4];
109 } imask_t __attribute__ ((aligned(16)));
110
111 static inline void imask_zero(imask_t *);
112 static inline void imask_zero_v(volatile imask_t *);
113 static inline void imask_dup_v(imask_t *, const volatile imask_t *);
114 static inline void imask_and(imask_t *, const imask_t *);
115 static inline void imask_andnot_v(volatile imask_t *, const imask_t *);
116 static inline void imask_andnot_icu_vv(volatile imask_t *, const volatile imask_t *);
117 static inline int imask_empty(const imask_t *);
118 static inline void imask_orbit(imask_t *, int);
119 static inline void imask_orbit_v(volatile imask_t *, int);
120 static inline void imask_clrbit(imask_t *, int);
121 static inline void imask_clrbit_v(volatile imask_t *, int);
122 static inline u_int32_t imask_andbit_v(const volatile imask_t *, int);
123 static inline int imask_test_v(const volatile imask_t *, const imask_t *);
124
125 static inline void
126 imask_zero(imask_t *idp)
127 {
128 idp->bits[IMASK_ICU_LO] = 0;
129 idp->bits[IMASK_ICU_HI] = 0;
130 idp->bits[IMASK_ICU_GPP] = 0;
131 idp->bits[IMASK_SOFTINT] = 0;
132 }
133
134 static inline void
135 imask_zero_v(volatile imask_t *idp)
136 {
137 idp->bits[IMASK_ICU_LO] = 0;
138 idp->bits[IMASK_ICU_HI] = 0;
139 idp->bits[IMASK_ICU_GPP] = 0;
140 idp->bits[IMASK_SOFTINT] = 0;
141 }
142
143 static inline void
144 imask_dup_v(imask_t *idp, const volatile imask_t *isp)
145 {
146 *idp = *isp;
147 }
148
149 static inline void
150 imask_and(imask_t *idp, const imask_t *isp)
151 {
152 idp->bits[IMASK_ICU_LO] &= isp->bits[IMASK_ICU_LO];
153 idp->bits[IMASK_ICU_HI] &= isp->bits[IMASK_ICU_HI];
154 idp->bits[IMASK_ICU_GPP] &= isp->bits[IMASK_ICU_GPP];
155 idp->bits[IMASK_SOFTINT] &= isp->bits[IMASK_SOFTINT];
156 }
157
158 static inline void
159 imask_andnot_v(volatile imask_t *idp, const imask_t *isp)
160 {
161 idp->bits[IMASK_ICU_LO] &= ~isp->bits[IMASK_ICU_LO];
162 idp->bits[IMASK_ICU_HI] &= ~isp->bits[IMASK_ICU_HI];
163 idp->bits[IMASK_ICU_GPP] &= ~isp->bits[IMASK_ICU_GPP];
164 idp->bits[IMASK_SOFTINT] &= ~isp->bits[IMASK_SOFTINT];
165 }
166
167 static inline void
168 imask_andnot_icu_vv(volatile imask_t *idp, const volatile imask_t *isp)
169 {
170 idp->bits[IMASK_ICU_LO] &= ~isp->bits[IMASK_ICU_LO];
171 idp->bits[IMASK_ICU_HI] &= ~isp->bits[IMASK_ICU_HI];
172 idp->bits[IMASK_ICU_GPP] &= ~isp->bits[IMASK_ICU_GPP];
173 }
174
175 static inline int
176 imask_empty(const imask_t *isp)
177 {
178 return (! (isp->bits[IMASK_ICU_LO] | isp->bits[IMASK_ICU_HI] |
179 isp->bits[IMASK_ICU_GPP]| isp->bits[IMASK_SOFTINT]));
180 }
181
182 static inline void
183 imask_orbit(imask_t *idp, int bitno)
184 {
185 idp->bits[bitno>>IMASK_WORDSHIFT] |= (1 << (bitno&IMASK_BITMASK));
186 }
187
188 static inline void
189 imask_orbit_v(volatile imask_t *idp, int bitno)
190 {
191 idp->bits[bitno>>IMASK_WORDSHIFT] |= (1 << (bitno&IMASK_BITMASK));
192 }
193
194 static inline void
195 imask_clrbit(imask_t *idp, int bitno)
196 {
197 idp->bits[bitno>>IMASK_WORDSHIFT] &= ~(1 << (bitno&IMASK_BITMASK));
198 }
199
200 static inline void
201 imask_clrbit_v(volatile imask_t *idp, int bitno)
202 {
203 idp->bits[bitno>>IMASK_WORDSHIFT] &= ~(1 << (bitno&IMASK_BITMASK));
204 }
205
206 static inline u_int32_t
207 imask_andbit_v(const volatile imask_t *idp, int bitno)
208 {
209 return idp->bits[bitno>>IMASK_WORDSHIFT] & (1 << (bitno&IMASK_BITMASK));
210 }
211
212 static inline int
213 imask_test_v(const volatile imask_t *idp, const imask_t *isp)
214 {
215 return ((idp->bits[IMASK_ICU_LO] & isp->bits[IMASK_ICU_LO]) ||
216 (idp->bits[IMASK_ICU_HI] & isp->bits[IMASK_ICU_HI]) ||
217 (idp->bits[IMASK_ICU_GPP] & isp->bits[IMASK_ICU_GPP])||
218 (idp->bits[IMASK_SOFTINT] & isp->bits[IMASK_SOFTINT]));
219 }
220
221 #ifdef EXT_INTR_STATS
222 /*
223 * ISR timing stats
224 */
225
226 typedef struct ext_intr_hist {
227 u_int64_t tcause;
228 u_int64_t tcommit;
229 u_int64_t tstart;
230 u_int64_t tfin;
231 } ext_intr_hist_t __attribute__ ((aligned(32)));
232
233 typedef struct ext_intr_stat {
234 struct ext_intr_hist *histp;
235 unsigned int histix;
236 u_int64_t cnt;
237 u_int64_t sum;
238 u_int64_t min;
239 u_int64_t max;
240 u_int64_t pnd;
241 u_int64_t borrowed;
242 struct ext_intr_stat *save;
243 unsigned long preempted[NIRQ]; /* XXX */
244 } ext_intr_stat_t __attribute__ ((aligned(32)));
245
246 extern int intr_depth_max;
247 extern int ext_intr_stats_enb;
248 extern ext_intr_stat_t ext_intr_stats[];
249 extern ext_intr_stat_t *ext_intr_statp;
250
251 extern void ext_intr_stats_init __P((void));
252 extern void ext_intr_stats_cause
253 __P((u_int32_t, u_int32_t, u_int32_t, u_int32_t));
254 extern void ext_intr_stats_pend
255 __P((u_int32_t, u_int32_t, u_int32_t, u_int32_t));
256 extern void ext_intr_stats_commit __P((imask_t *));
257 extern void ext_intr_stats_commit_m __P((imask_t *));
258 extern void ext_intr_stats_commit_irq __P((u_int));
259 extern u_int64_t ext_intr_stats_pre __P((int));
260 extern void ext_intr_stats_post __P((int, u_int64_t));
261
262 #define EXT_INTR_STATS_INIT() ext_intr_stats_init()
263 #define EXT_INTR_STATS_CAUSE(l, h, g, s) ext_intr_stats_cause(l, h, g, s)
264 #define EXT_INTR_STATS_COMMIT_M(m) ext_intr_stats_commit_m(m)
265 #define EXT_INTR_STATS_COMMIT_IRQ(i) ext_intr_stats_commit_irq(i)
266 #define EXT_INTR_STATS_DECL(t) u_int64_t t
267 #define EXT_INTR_STATS_PRE(i, t) t = ext_intr_stats_pre(i)
268 #define EXT_INTR_STATS_POST(i, t) ext_intr_stats_post(i, t)
269 #define EXT_INTR_STATS_PEND(l, h, g, s) ext_intr_stats_pend(l, h, g, s)
270 #define EXT_INTR_STATS_PEND_IRQ(i) ext_intr_stats[i].pnd++
271 #define EXT_INTR_STATS_DEPTH() \
272 intr_depth_max = (intr_depth > intr_depth_max) ? \
273 intr_depth : intr_depth_max
274
275 #else /* EXT_INTR_STATS */
276
277 #define EXT_INTR_STATS_INIT()
278 #define EXT_INTR_STATS_CAUSE(l, h, g, s)
279 #define EXT_INTR_STATS_COMMIT_M(m)
280 #define EXT_INTR_STATS_COMMIT_IRQ(i)
281 #define EXT_INTR_STATS_DECL(t)
282 #define EXT_INTR_STATS_PRE(irq, t)
283 #define EXT_INTR_STATS_POST(i, t)
284 #define EXT_INTR_STATS_PEND(l, h, g, s)
285 #define EXT_INTR_STATS_PEND_IRQ(i)
286 #define EXT_INTR_STATS_DEPTH()
287
288 #endif /* EXT_INTR_STATS */
289
290
291 #ifdef SPL_STATS
292 typedef struct spl_hist {
293 int level;
294 void *addr;
295 u_int64_t time;
296 } spl_hist_t;
297
298 extern void spl_stats_init();
299 extern void spl_stats_log();
300 extern unsigned int spl_stats_enb;
301
302 #define SPL_STATS_INIT() spl_stats_init()
303 #define SPL_STATS_LOG(ipl, cc) spl_stats_log((ipl), (cc))
304
305 #else
306
307 #define SPL_STATS_INIT()
308 #define SPL_STATS_LOG(ipl, cc)
309
310 #endif /* SPL_STATS */
311
312
313 void setsoftclock __P((void));
314 void clearsoftclock __P((void));
315 void setsoftnet __P((void));
316 void clearsoftnet __P((void));
317
318 void intr_dispatch __P((void));
319 #ifdef SPL_INLINE
320 static inline int splraise __P((int));
321 static inline int spllower __P((int));
322 static inline void splx __P((int));
323 #else
324 extern int splraise __P((int));
325 extern int spllower __P((int));
326 extern void splx __P((int));
327 #endif
328
329 extern volatile int tickspending;
330
331 extern volatile imask_t ipending;
332 extern imask_t imask[];
333
334 /*
335 * inlines for manipulating PSL_EE
336 */
337 static inline void
338 extintr_restore(register_t omsr)
339 {
340 __asm volatile ("sync; mtmsr %0;" :: "r"(omsr));
341 }
342
343 static inline register_t
344 extintr_enable(void)
345 {
346 register_t omsr;
347
348 __asm volatile("sync;");
349 __asm volatile("mfmsr %0;" : "=r"(omsr));
350 __asm volatile("mtmsr %0;" :: "r"(omsr | PSL_EE));
351
352 return omsr;
353 }
354
355 static inline register_t
356 extintr_disable(void)
357 {
358 register_t omsr;
359
360 __asm volatile("mfmsr %0;" : "=r"(omsr));
361 __asm volatile("mtmsr %0;" :: "r"(omsr & ~PSL_EE));
362 __asm volatile("isync;");
363
364 return omsr;
365 }
366
367 #ifdef SPL_INLINE
368 static inline int
369 splraise(int ncpl)
370 {
371 int ocpl;
372 register_t omsr;
373
374 omsr = extintr_disable();
375 ocpl = cpl;
376 if (ncpl > cpl) {
377 SPL_STATS_LOG(ncpl, 0);
378 cpl = ncpl;
379 if ((ncpl == IPL_HIGH) && ((omsr & PSL_EE) != 0)) {
380 /* leave external interrupts disabled */
381 return (ocpl | IPL_EE);
382 }
383 }
384 extintr_restore(omsr);
385 return (ocpl);
386 }
387
388 static inline void
389 splx(int xcpl)
390 {
391 imask_t *ncplp;
392 register_t omsr;
393 int ncpl = xcpl & IPL_PRIMASK;
394
395 ncplp = &imask[ncpl];
396
397 omsr = extintr_disable();
398 if (ncpl < cpl) {
399 cpl = ncpl;
400 SPL_STATS_LOG(ncpl, 0);
401 if (imask_test_v(&ipending, ncplp))
402 intr_dispatch();
403 }
404 if (xcpl & IPL_EE)
405 omsr |= PSL_EE;
406 extintr_restore(omsr);
407 }
408
409 static inline int
410 spllower(int ncpl)
411 {
412 int ocpl;
413 imask_t *ncplp;
414 register_t omsr;
415
416 ncpl &= IPL_PRIMASK;
417 ncplp = &imask[ncpl];
418
419 omsr = extintr_disable();
420 ocpl = cpl;
421 cpl = ncpl;
422 SPL_STATS_LOG(ncpl, 0);
423 #ifdef EXT_INTR_STATS
424 ext_intr_statp = 0;
425 #endif
426 if (imask_test_v(&ipending, ncplp))
427 intr_dispatch();
428
429 if (ncpl < IPL_HIGH)
430 omsr |= PSL_EE;
431 extintr_restore(omsr);
432
433 return (ocpl);
434 }
435 #endif /* SPL_INLINE */
436
437
438 /*
439 * Soft interrupt IRQs
440 * see also intrnames[] in locore.S
441 */
442 #define SIR_BASE (NIRQ-32)
443 #define SIR_SOFTCLOCK (NIRQ-5)
444 #define SIR_SOFTNET (NIRQ-4)
445 #define SIR_SOFTI2C (NIRQ-3)
446 #define SIR_SOFTSERIAL (NIRQ-2)
447 #define SIR_HWCLOCK (NIRQ-1)
448 #define SIR_RES ~(SIBIT(SIR_SOFTCLOCK)|\
449 SIBIT(SIR_SOFTNET)|\
450 SIBIT(SIR_SOFTI2C)|\
451 SIBIT(SIR_SOFTSERIAL)|\
452 SIBIT(SIR_HWCLOCK))
453
454 /*
455 * Software interrupt spl's
456 *
457 * NOTE: splsoftclock() is used by hardclock() to lower the priority from
458 * clock to softclock before it calls softclock().
459 */
460 #define spllowersoftclock() spllower(IPL_SOFTCLOCK)
461
462 struct intrhand;
463 extern struct intrhand *softnet_handlers[];
464 #define schednetisr(an_isr) softintr_schedule(softnet_handlers[(an_isr)])
465
466 void *softintr_establish(int level, void (*fun)(void *), void *arg);
467 void softintr_disestablish(void *cookie);
468 void softintr_schedule(void *cookie);
469
470
471 /*
472 * Miscellaneous
473 */
474 #define spl0() spllower(IPL_NONE)
475
476 typedef int ipl_t;
477 typedef struct {
478 ipl_t _ipl;
479 } ipl_cookie_t;
480
481 static inline ipl_cookie_t
482 makeiplcookie(ipl_t ipl)
483 {
484
485 return (ipl_cookie_t){._ipl = ipl};
486 }
487
488 static inline int
489 splraiseipl(ipl_cookie_t icookie)
490 {
491
492 return splraise(icookie._ipl);
493 }
494
495 #include <sys/spl.h>
496
497 #define SIBIT(ipl) (1 << ((ipl) - SIR_BASE))
498 #if 0
499 #define setsoftclock() softintr(SIBIT(SIR_SOFTCLOCK))
500 #define setsoftnet() softintr(SIBIT(SIR_SOFTNET))
501 #define setsoftserial() softintr(SIBIT(SIR_SOFTSERIAL))
502 #define setsofti2c() softintr(SIBIT(SIR_SOFTI2C))
503 #endif
504
505 extern void *softnet_si;
506 void *intr_establish(int, int, int, int (*)(void *), void *);
507 void intr_disestablish(void *);
508 void init_interrupt(void);
509 const char * intr_typename(int);
510 const char * intr_string(int);
511 const struct evcnt * intr_evcnt(int);
512 void ext_intr(struct intrframe *);
513
514 #if 0
515 void softserial(void);
516 #endif
517 void strayintr(int);
518
519 /*
520 * defines for indexing intrcnt
521 */
522 #define CNT_IRQ0 0
523 #define CNT_CLOCK SIR_HWCLOCK
524 #define CNT_SOFTCLOCK SIR_SOFTCLOCK
525 #define CNT_SOFTNET SIR_NET
526 #define CNT_SOFTSERIAL SIR_SOFTSERIAL
527 #define CNT_SOFTI2C SIR_I2C
528
529 #endif /* !_LOCORE */
530
531 #endif /* _MVPPPC_INTR_H_ */
532