Home | History | Annotate | Line # | Download | only in marvell
marvell_intr.h revision 1.14.2.2
      1  1.14.2.2     yamt /*	$NetBSD: marvell_intr.h,v 1.14.2.2 2009/05/04 08:11:44 yamt Exp $	*/
      2       1.1     matt 
      3       1.1     matt /*-
      4       1.1     matt  * Copyright (c) 1998 The NetBSD Foundation, Inc.
      5       1.1     matt  * All rights reserved.
      6       1.1     matt  *
      7       1.1     matt  * This code is derived from software contributed to The NetBSD Foundation
      8       1.1     matt  * by Charles M. Hannum.
      9       1.1     matt  *
     10       1.1     matt  * Redistribution and use in source and binary forms, with or without
     11       1.1     matt  * modification, are permitted provided that the following conditions
     12       1.1     matt  * are met:
     13       1.1     matt  * 1. Redistributions of source code must retain the above copyright
     14       1.1     matt  *    notice, this list of conditions and the following disclaimer.
     15       1.1     matt  * 2. Redistributions in binary form must reproduce the above copyright
     16       1.1     matt  *    notice, this list of conditions and the following disclaimer in the
     17       1.1     matt  *    documentation and/or other materials provided with the distribution.
     18       1.1     matt  *
     19       1.1     matt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20       1.1     matt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21       1.1     matt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22       1.1     matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23       1.1     matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24       1.1     matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25       1.1     matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26       1.1     matt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27       1.1     matt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28       1.1     matt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29       1.1     matt  * POSSIBILITY OF SUCH DAMAGE.
     30       1.1     matt  */
     31       1.1     matt 
     32       1.1     matt #ifndef _MVPPPC_INTR_H_
     33       1.1     matt #define _MVPPPC_INTR_H_
     34       1.1     matt 
     35      1.14       he #include <powerpc/psl.h>
     36      1.14       he #include <powerpc/frame.h>
     37      1.14       he 
     38       1.1     matt /*
     39       1.1     matt  * Interrupt Priority Levels
     40       1.1     matt  */
     41       1.1     matt #define	IPL_NONE	0	/* nothing */
     42       1.1     matt #define	IPL_SOFTCLOCK	1	/* timeouts */
     43      1.13       ad #define	IPL_SOFTBIO	2	/* block I/O */
     44      1.13       ad #define	IPL_SOFTNET	3	/* protocol stacks */
     45      1.13       ad #define	IPL_SOFTSERIAL	4	/* serial */
     46       1.1     matt #define	IPL_VM		12	/* memory allocation */
     47      1.13       ad #define	IPL_SCHED	14	/* clock */
     48       1.1     matt #define	IPL_HIGH	15	/* everything */
     49       1.1     matt #define	NIPL		16
     50       1.1     matt #define IPL_PRIMASK	0xf
     51       1.1     matt #define IPL_EE		0x10	/* enable external interrupts on splx */
     52       1.1     matt 
     53       1.1     matt /* Interrupt sharing types. */
     54       1.1     matt #define	IST_NONE	0	/* none */
     55       1.1     matt #define	IST_PULSE	1	/* pulsed */
     56       1.1     matt #define	IST_EDGE	2	/* edge-triggered */
     57       1.1     matt #define	IST_LEVEL	3	/* level-triggered */
     58       1.1     matt #define	IST_SOFT	4	/* software-triggered */
     59       1.1     matt #define	IST_CLOCK	5	/* exclusive for clock */
     60       1.1     matt #define	NIST		6
     61       1.1     matt 
     62       1.1     matt #if !defined(_LOCORE) && defined(_KERNEL)
     63       1.6     matt 
     64       1.6     matt #define	CLKF_BASEPRI(frame)	((frame)->pri == IPL_NONE)
     65       1.1     matt 
     66       1.1     matt /*
     67       1.1     matt  * we support 128 IRQs:
     68       1.1     matt  *	96 (ICU_LEN) hard interrupt IRQs:
     69       1.1     matt  *		- 64 Main Cause IRQs,
     70       1.1     matt  *		- 32 GPP IRQs,
     71       1.1     matt  *	and 32 softint IRQs
     72       1.1     matt  */
     73       1.1     matt #define ICU_LEN		96	/* number of  HW IRQs */
     74       1.1     matt #define IRQ_GPP_BASE	64	/* base of GPP IRQs */
     75       1.1     matt #define IRQ_GPP_SUM	(32+24) /* GPP[7..0] interrupt */	/* XXX */
     76       1.1     matt #define NIRQ		128	/* total # of HW IRQs */
     77       1.1     matt 
     78       1.1     matt #define IMASK_ICU_LO	0
     79       1.1     matt #define IMASK_ICU_HI	1
     80       1.1     matt #define IMASK_ICU_GPP	2
     81       1.1     matt #define IMASK_SOFTINT	3
     82       1.1     matt #define IMASK_WORDSHIFT 5	/* log2(32) */
     83       1.1     matt #define IMASK_BITMASK	~((~0) << IMASK_WORDSHIFT)
     84       1.1     matt 
     85       1.1     matt #define IRQ_IS_GPP(irq) ((irq >= IRQ_GPP_BASE) && (irq < ICU_LEN))
     86       1.1     matt 
     87       1.1     matt /*
     88       1.1     matt  * interrupt mask bit vector
     89       1.1     matt  */
     90       1.5     matt typedef struct {
     91       1.5     matt 	u_int32_t bits[4];
     92       1.5     matt } imask_t __attribute__ ((aligned(16)));
     93       1.5     matt 
     94       1.9    perry static inline void imask_zero(imask_t *);
     95       1.9    perry static inline void imask_zero_v(volatile imask_t *);
     96       1.9    perry static inline void imask_dup_v(imask_t *, const volatile imask_t *);
     97       1.9    perry static inline void imask_and(imask_t *, const imask_t *);
     98       1.9    perry static inline void imask_andnot_v(volatile imask_t *, const imask_t *);
     99       1.9    perry static inline void imask_andnot_icu_vv(volatile imask_t *, const volatile imask_t *);
    100       1.9    perry static inline int imask_empty(const imask_t *);
    101       1.9    perry static inline void imask_orbit(imask_t *, int);
    102       1.9    perry static inline void imask_orbit_v(volatile imask_t *, int);
    103       1.9    perry static inline void imask_clrbit(imask_t *, int);
    104       1.9    perry static inline void imask_clrbit_v(volatile imask_t *, int);
    105       1.9    perry static inline u_int32_t imask_andbit_v(const volatile imask_t *, int);
    106       1.9    perry static inline int imask_test_v(const volatile imask_t *, const imask_t *);
    107       1.1     matt 
    108       1.9    perry static inline void
    109       1.1     matt imask_zero(imask_t *idp)
    110       1.1     matt {
    111       1.5     matt 	idp->bits[IMASK_ICU_LO]  = 0;
    112       1.5     matt 	idp->bits[IMASK_ICU_HI]  = 0;
    113       1.5     matt 	idp->bits[IMASK_ICU_GPP] = 0;
    114       1.5     matt 	idp->bits[IMASK_SOFTINT] = 0;
    115       1.1     matt }
    116       1.1     matt 
    117       1.9    perry static inline void
    118       1.1     matt imask_zero_v(volatile imask_t *idp)
    119       1.1     matt {
    120       1.5     matt 	idp->bits[IMASK_ICU_LO]  = 0;
    121       1.5     matt 	idp->bits[IMASK_ICU_HI]  = 0;
    122       1.5     matt 	idp->bits[IMASK_ICU_GPP] = 0;
    123       1.5     matt 	idp->bits[IMASK_SOFTINT] = 0;
    124       1.1     matt }
    125       1.1     matt 
    126       1.9    perry static inline void
    127       1.5     matt imask_dup_v(imask_t *idp, const volatile imask_t *isp)
    128       1.1     matt {
    129       1.5     matt 	*idp = *isp;
    130       1.1     matt }
    131       1.1     matt 
    132       1.9    perry static inline void
    133       1.5     matt imask_and(imask_t *idp, const imask_t *isp)
    134       1.1     matt {
    135       1.5     matt 	idp->bits[IMASK_ICU_LO]  &= isp->bits[IMASK_ICU_LO];
    136       1.5     matt 	idp->bits[IMASK_ICU_HI]  &= isp->bits[IMASK_ICU_HI];
    137       1.5     matt 	idp->bits[IMASK_ICU_GPP] &= isp->bits[IMASK_ICU_GPP];
    138       1.5     matt 	idp->bits[IMASK_SOFTINT] &= isp->bits[IMASK_SOFTINT];
    139       1.1     matt }
    140       1.1     matt 
    141       1.9    perry static inline void
    142       1.5     matt imask_andnot_v(volatile imask_t *idp, const imask_t *isp)
    143       1.1     matt {
    144       1.5     matt 	idp->bits[IMASK_ICU_LO]  &= ~isp->bits[IMASK_ICU_LO];
    145       1.5     matt 	idp->bits[IMASK_ICU_HI]  &= ~isp->bits[IMASK_ICU_HI];
    146       1.5     matt 	idp->bits[IMASK_ICU_GPP] &= ~isp->bits[IMASK_ICU_GPP];
    147       1.5     matt 	idp->bits[IMASK_SOFTINT] &= ~isp->bits[IMASK_SOFTINT];
    148       1.1     matt }
    149       1.1     matt 
    150       1.9    perry static inline void
    151       1.5     matt imask_andnot_icu_vv(volatile imask_t *idp, const volatile imask_t *isp)
    152       1.1     matt {
    153       1.5     matt 	idp->bits[IMASK_ICU_LO]  &= ~isp->bits[IMASK_ICU_LO];
    154       1.5     matt 	idp->bits[IMASK_ICU_HI]  &= ~isp->bits[IMASK_ICU_HI];
    155       1.5     matt 	idp->bits[IMASK_ICU_GPP] &= ~isp->bits[IMASK_ICU_GPP];
    156       1.1     matt }
    157       1.1     matt 
    158       1.9    perry static inline int
    159       1.5     matt imask_empty(const imask_t *isp)
    160       1.1     matt {
    161       1.5     matt 	return (! (isp->bits[IMASK_ICU_LO] | isp->bits[IMASK_ICU_HI] |
    162       1.5     matt 		   isp->bits[IMASK_ICU_GPP]| isp->bits[IMASK_SOFTINT]));
    163       1.1     matt }
    164       1.1     matt 
    165       1.9    perry static inline void
    166       1.1     matt imask_orbit(imask_t *idp, int bitno)
    167       1.1     matt {
    168       1.5     matt 	idp->bits[bitno>>IMASK_WORDSHIFT] |= (1 << (bitno&IMASK_BITMASK));
    169       1.1     matt }
    170       1.1     matt 
    171       1.9    perry static inline void
    172       1.1     matt imask_orbit_v(volatile imask_t *idp, int bitno)
    173       1.1     matt {
    174       1.5     matt 	idp->bits[bitno>>IMASK_WORDSHIFT] |= (1 << (bitno&IMASK_BITMASK));
    175       1.1     matt }
    176       1.1     matt 
    177       1.9    perry static inline void
    178       1.1     matt imask_clrbit(imask_t *idp, int bitno)
    179       1.1     matt {
    180       1.5     matt 	idp->bits[bitno>>IMASK_WORDSHIFT] &= ~(1 << (bitno&IMASK_BITMASK));
    181       1.1     matt }
    182       1.1     matt 
    183       1.9    perry static inline void
    184       1.1     matt imask_clrbit_v(volatile imask_t *idp, int bitno)
    185       1.1     matt {
    186       1.5     matt 	idp->bits[bitno>>IMASK_WORDSHIFT] &= ~(1 << (bitno&IMASK_BITMASK));
    187       1.1     matt }
    188       1.1     matt 
    189       1.9    perry static inline u_int32_t
    190       1.5     matt imask_andbit_v(const volatile imask_t *idp, int bitno)
    191       1.1     matt {
    192       1.5     matt 	return idp->bits[bitno>>IMASK_WORDSHIFT] & (1 << (bitno&IMASK_BITMASK));
    193       1.1     matt }
    194       1.1     matt 
    195       1.9    perry static inline int
    196       1.5     matt imask_test_v(const volatile imask_t *idp, const imask_t *isp)
    197       1.1     matt {
    198       1.5     matt 	return ((idp->bits[IMASK_ICU_LO]  & isp->bits[IMASK_ICU_LO]) ||
    199       1.5     matt 		(idp->bits[IMASK_ICU_HI]  & isp->bits[IMASK_ICU_HI]) ||
    200       1.5     matt 		(idp->bits[IMASK_ICU_GPP] & isp->bits[IMASK_ICU_GPP])||
    201       1.5     matt 		(idp->bits[IMASK_SOFTINT] & isp->bits[IMASK_SOFTINT]));
    202       1.1     matt }
    203       1.1     matt 
    204       1.1     matt #ifdef EXT_INTR_STATS
    205       1.1     matt /*
    206       1.1     matt  * ISR timing stats
    207       1.1     matt  */
    208       1.1     matt 
    209       1.1     matt typedef struct ext_intr_hist {
    210       1.1     matt 	u_int64_t tcause;
    211       1.1     matt 	u_int64_t tcommit;
    212       1.1     matt 	u_int64_t tstart;
    213       1.1     matt 	u_int64_t tfin;
    214       1.1     matt } ext_intr_hist_t __attribute__ ((aligned(32)));
    215       1.1     matt 
    216       1.1     matt typedef struct ext_intr_stat {
    217       1.1     matt         struct ext_intr_hist *histp;
    218       1.1     matt         unsigned int histix;
    219       1.1     matt         u_int64_t cnt;
    220       1.1     matt         u_int64_t sum;
    221       1.1     matt         u_int64_t min;
    222       1.1     matt         u_int64_t max;
    223       1.1     matt         u_int64_t pnd;
    224       1.1     matt         u_int64_t borrowed;
    225       1.1     matt         struct ext_intr_stat *save;
    226       1.1     matt 	unsigned long preempted[NIRQ];	/* XXX */
    227       1.1     matt } ext_intr_stat_t  __attribute__ ((aligned(32)));
    228       1.1     matt 
    229       1.1     matt extern int intr_depth_max;
    230       1.1     matt extern int ext_intr_stats_enb;
    231       1.1     matt extern ext_intr_stat_t ext_intr_stats[];
    232       1.1     matt extern ext_intr_stat_t *ext_intr_statp;
    233       1.1     matt 
    234  1.14.2.2     yamt extern void ext_intr_stats_init(void);
    235       1.1     matt extern void ext_intr_stats_cause
    236  1.14.2.2     yamt (u_int32_t, u_int32_t, u_int32_t, u_int32_t);
    237       1.1     matt extern void ext_intr_stats_pend
    238  1.14.2.2     yamt (u_int32_t, u_int32_t, u_int32_t, u_int32_t);
    239  1.14.2.2     yamt extern void ext_intr_stats_commit(imask_t *);
    240  1.14.2.2     yamt extern void ext_intr_stats_commit_m(imask_t *);
    241  1.14.2.2     yamt extern void ext_intr_stats_commit_irq(u_int);
    242  1.14.2.2     yamt extern u_int64_t ext_intr_stats_pre(int);
    243  1.14.2.2     yamt extern void ext_intr_stats_post(int, u_int64_t);
    244       1.1     matt 
    245       1.1     matt #define EXT_INTR_STATS_INIT() ext_intr_stats_init()
    246       1.1     matt #define EXT_INTR_STATS_CAUSE(l, h, g, s)  ext_intr_stats_cause(l, h, g, s)
    247       1.1     matt #define EXT_INTR_STATS_COMMIT_M(m) ext_intr_stats_commit_m(m)
    248       1.1     matt #define EXT_INTR_STATS_COMMIT_IRQ(i) ext_intr_stats_commit_irq(i)
    249       1.1     matt #define EXT_INTR_STATS_DECL(t) u_int64_t t
    250       1.1     matt #define EXT_INTR_STATS_PRE(i, t) t = ext_intr_stats_pre(i)
    251       1.1     matt #define EXT_INTR_STATS_POST(i, t) ext_intr_stats_post(i, t)
    252       1.1     matt #define EXT_INTR_STATS_PEND(l, h, g, s) ext_intr_stats_pend(l, h, g, s)
    253       1.1     matt #define EXT_INTR_STATS_PEND_IRQ(i) ext_intr_stats[i].pnd++
    254       1.1     matt #define EXT_INTR_STATS_DEPTH() \
    255       1.1     matt 		 intr_depth_max = (intr_depth > intr_depth_max) ? \
    256       1.1     matt 			 intr_depth : intr_depth_max
    257       1.1     matt 
    258       1.1     matt #else /* EXT_INTR_STATS */
    259       1.1     matt 
    260       1.1     matt #define EXT_INTR_STATS_INIT()
    261       1.1     matt #define EXT_INTR_STATS_CAUSE(l, h, g, s)
    262       1.1     matt #define EXT_INTR_STATS_COMMIT_M(m)
    263       1.1     matt #define EXT_INTR_STATS_COMMIT_IRQ(i)
    264       1.1     matt #define EXT_INTR_STATS_DECL(t)
    265       1.1     matt #define EXT_INTR_STATS_PRE(irq, t)
    266       1.1     matt #define EXT_INTR_STATS_POST(i, t)
    267       1.1     matt #define EXT_INTR_STATS_PEND(l, h, g, s)
    268       1.1     matt #define EXT_INTR_STATS_PEND_IRQ(i)
    269       1.1     matt #define EXT_INTR_STATS_DEPTH()
    270       1.1     matt 
    271       1.1     matt #endif	/* EXT_INTR_STATS */
    272       1.1     matt 
    273       1.1     matt 
    274       1.1     matt #ifdef SPL_STATS
    275       1.1     matt typedef struct spl_hist {
    276       1.1     matt 	int level;
    277       1.1     matt 	void *addr;
    278       1.1     matt 	u_int64_t time;
    279       1.1     matt } spl_hist_t;
    280       1.1     matt 
    281       1.1     matt extern  void spl_stats_init();
    282       1.1     matt extern  void spl_stats_log();
    283       1.1     matt extern unsigned int spl_stats_enb;
    284       1.1     matt 
    285       1.1     matt #define SPL_STATS_INIT()	spl_stats_init()
    286       1.1     matt #define SPL_STATS_LOG(ipl, cc)	spl_stats_log((ipl), (cc))
    287       1.1     matt 
    288       1.1     matt #else
    289       1.1     matt 
    290       1.1     matt #define SPL_STATS_INIT()
    291       1.1     matt #define SPL_STATS_LOG(ipl, cc)
    292       1.1     matt 
    293       1.1     matt #endif	/* SPL_STATS */
    294       1.1     matt 
    295       1.1     matt 
    296  1.14.2.2     yamt void intr_dispatch(void);
    297       1.1     matt #ifdef SPL_INLINE
    298  1.14.2.2     yamt static inline int splraise(int);
    299  1.14.2.2     yamt static inline int spllower(int);
    300  1.14.2.2     yamt static inline void splx(int);
    301       1.1     matt #else
    302  1.14.2.2     yamt extern int splraise(int);
    303  1.14.2.2     yamt extern int spllower(int);
    304  1.14.2.2     yamt extern void splx(int);
    305       1.1     matt #endif
    306       1.1     matt 
    307       1.1     matt extern volatile int tickspending;
    308       1.1     matt 
    309       1.1     matt extern volatile imask_t ipending;
    310       1.1     matt extern imask_t imask[];
    311       1.1     matt 
    312       1.1     matt /*
    313       1.1     matt  * inlines for manipulating PSL_EE
    314       1.1     matt  */
    315       1.9    perry static inline void
    316       1.1     matt extintr_restore(register_t omsr)
    317       1.1     matt {
    318       1.9    perry 	__asm volatile ("sync; mtmsr %0;" :: "r"(omsr));
    319       1.1     matt }
    320       1.1     matt 
    321       1.9    perry static inline register_t
    322       1.1     matt extintr_enable(void)
    323       1.1     matt {
    324       1.1     matt 	register_t omsr;
    325       1.1     matt 
    326       1.9    perry 	__asm volatile("sync;");
    327       1.9    perry 	__asm volatile("mfmsr %0;" : "=r"(omsr));
    328       1.9    perry 	__asm volatile("mtmsr %0;" :: "r"(omsr | PSL_EE));
    329       1.1     matt 
    330       1.1     matt 	return omsr;
    331       1.1     matt }
    332       1.1     matt 
    333       1.9    perry static inline register_t
    334       1.1     matt extintr_disable(void)
    335       1.1     matt {
    336       1.1     matt 	register_t omsr;
    337       1.1     matt 
    338       1.9    perry 	__asm volatile("mfmsr %0;" : "=r"(omsr));
    339       1.9    perry 	__asm volatile("mtmsr %0;" :: "r"(omsr & ~PSL_EE));
    340       1.9    perry 	__asm volatile("isync;");
    341       1.1     matt 
    342       1.1     matt 	return omsr;
    343       1.1     matt }
    344       1.1     matt 
    345       1.1     matt #ifdef SPL_INLINE
    346       1.9    perry static inline int
    347       1.1     matt splraise(int ncpl)
    348       1.1     matt {
    349       1.1     matt 	int ocpl;
    350       1.1     matt 	register_t omsr;
    351       1.1     matt 
    352       1.1     matt 	omsr = extintr_disable();
    353       1.1     matt 	ocpl = cpl;
    354       1.1     matt         if (ncpl > cpl) {
    355       1.1     matt 		SPL_STATS_LOG(ncpl, 0);
    356       1.1     matt                 cpl = ncpl;
    357       1.1     matt 		if ((ncpl == IPL_HIGH) && ((omsr & PSL_EE) != 0)) {
    358       1.1     matt 			/* leave external interrupts disabled */
    359       1.1     matt 			return (ocpl | IPL_EE);
    360       1.1     matt 		}
    361       1.1     matt 	}
    362       1.1     matt         extintr_restore(omsr);
    363       1.1     matt         return (ocpl);
    364       1.1     matt }
    365       1.1     matt 
    366       1.9    perry static inline void
    367       1.1     matt splx(int xcpl)
    368       1.1     matt {
    369       1.1     matt 	imask_t *ncplp;
    370       1.1     matt 	register_t omsr;
    371       1.1     matt 	int ncpl = xcpl & IPL_PRIMASK;
    372       1.1     matt 
    373       1.1     matt 	ncplp = &imask[ncpl];
    374       1.1     matt 
    375       1.1     matt 	omsr = extintr_disable();
    376       1.1     matt 	if (ncpl < cpl) {
    377       1.1     matt 		cpl = ncpl;
    378       1.1     matt 		SPL_STATS_LOG(ncpl, 0);
    379       1.1     matt 		if (imask_test_v(&ipending, ncplp))
    380       1.1     matt 			intr_dispatch();
    381       1.1     matt 	}
    382       1.1     matt 	if (xcpl & IPL_EE)
    383       1.1     matt 		omsr |= PSL_EE;
    384       1.1     matt 	extintr_restore(omsr);
    385       1.1     matt }
    386       1.1     matt 
    387       1.9    perry static inline int
    388       1.1     matt spllower(int ncpl)
    389       1.1     matt {
    390       1.1     matt 	int ocpl;
    391       1.1     matt 	imask_t *ncplp;
    392       1.1     matt 	register_t omsr;
    393       1.1     matt 
    394       1.1     matt 	ncpl &= IPL_PRIMASK;
    395       1.1     matt 	ncplp = &imask[ncpl];
    396       1.1     matt 
    397       1.1     matt 	omsr = extintr_disable();
    398       1.1     matt 	ocpl = cpl;
    399       1.1     matt 	cpl = ncpl;
    400       1.1     matt 	SPL_STATS_LOG(ncpl, 0);
    401       1.1     matt #ifdef EXT_INTR_STATS
    402       1.1     matt         ext_intr_statp = 0;
    403       1.1     matt #endif
    404       1.1     matt 	if (imask_test_v(&ipending, ncplp))
    405       1.1     matt 		intr_dispatch();
    406       1.1     matt 
    407       1.1     matt 	if (ncpl < IPL_HIGH)
    408       1.1     matt 		omsr |= PSL_EE;
    409       1.1     matt 	extintr_restore(omsr);
    410       1.1     matt 
    411       1.1     matt 	return (ocpl);
    412       1.1     matt }
    413       1.1     matt #endif	/* SPL_INLINE */
    414       1.1     matt 
    415       1.1     matt 
    416       1.1     matt /*
    417       1.1     matt  * Soft interrupt IRQs
    418       1.1     matt  * see also intrnames[] in locore.S
    419       1.1     matt  */
    420       1.1     matt #define SIR_BASE	(NIRQ-32)
    421      1.12  garbled #define SIXBIT(ipl)	((ipl) - SIR_BASE) /* XXX rennovate later */
    422       1.1     matt #define SIR_SOFTCLOCK	(NIRQ-5)
    423      1.12  garbled #define SIR_CLOCK	SIXBIT(SIR_SOFTCLOCK) /* XXX rennovate later */
    424       1.4     matt #define SIR_SOFTNET	(NIRQ-4)
    425      1.13       ad #define SIR_SOFTBIO	(NIRQ-3)
    426       1.4     matt #define SIR_SOFTSERIAL	(NIRQ-2)
    427       1.1     matt #define SIR_HWCLOCK	(NIRQ-1)
    428      1.12  garbled #define SPL_CLOCK	SIXBIT(SIR_HWCLOCK) /* XXX rennovate later */
    429       1.4     matt #define SIR_RES		~(SIBIT(SIR_SOFTCLOCK)|\
    430       1.4     matt 			  SIBIT(SIR_SOFTNET)|\
    431      1.13       ad 			  SIBIT(SIR_SOFTBIO)|\
    432       1.4     matt 			  SIBIT(SIR_SOFTSERIAL)|\
    433       1.4     matt 			  SIBIT(SIR_HWCLOCK))
    434       1.1     matt 
    435       1.7     matt struct intrhand;
    436       1.4     matt 
    437       1.1     matt /*
    438       1.1     matt  * Miscellaneous
    439       1.1     matt  */
    440       1.1     matt #define	spl0()		spllower(IPL_NONE)
    441       1.1     matt 
    442      1.11     yamt typedef int ipl_t;
    443      1.11     yamt typedef struct {
    444      1.11     yamt 	ipl_t _ipl;
    445      1.11     yamt } ipl_cookie_t;
    446      1.11     yamt 
    447      1.11     yamt static inline ipl_cookie_t
    448      1.11     yamt makeiplcookie(ipl_t ipl)
    449      1.11     yamt {
    450      1.11     yamt 
    451      1.11     yamt 	return (ipl_cookie_t){._ipl = ipl};
    452      1.11     yamt }
    453      1.11     yamt 
    454      1.11     yamt static inline int
    455      1.11     yamt splraiseipl(ipl_cookie_t icookie)
    456      1.11     yamt {
    457      1.11     yamt 
    458      1.11     yamt 	return splraise(icookie._ipl);
    459      1.11     yamt }
    460       1.8     yamt 
    461       1.8     yamt #include <sys/spl.h>
    462       1.8     yamt 
    463       1.1     matt #define SIBIT(ipl)	(1 << ((ipl) - SIR_BASE))
    464       1.1     matt 
    465       1.1     matt void	*intr_establish(int, int, int, int (*)(void *), void *);
    466       1.1     matt void	intr_disestablish(void *);
    467       1.1     matt void	init_interrupt(void);
    468       1.1     matt const char * intr_typename(int);
    469       1.1     matt const char * intr_string(int);
    470       1.1     matt const struct evcnt * intr_evcnt(int);
    471       1.1     matt void	ext_intr(struct intrframe *);
    472       1.1     matt 
    473      1.12  garbled /* the following are needed to compile until this port is properly
    474      1.12  garbled  * converted to ppcoea-rennovation.
    475      1.12  garbled  */
    476      1.12  garbled void genppc_cpu_configure(void);
    477      1.12  garbled 
    478       1.1     matt void	strayintr(int);
    479       1.2     matt 
    480       1.1     matt /*
    481       1.1     matt  * defines for indexing intrcnt
    482       1.1     matt  */
    483       1.1     matt #define CNT_IRQ0	0
    484       1.1     matt #define CNT_CLOCK	SIR_HWCLOCK
    485       1.1     matt #define CNT_SOFTCLOCK	SIR_SOFTCLOCK
    486       1.1     matt #define CNT_SOFTNET	SIR_NET
    487       1.1     matt #define CNT_SOFTSERIAL	SIR_SOFTSERIAL
    488      1.13       ad #define CNT_SOFTBIO	SIR_BIO
    489       1.1     matt 
    490       1.1     matt #endif /* !_LOCORE */
    491       1.1     matt 
    492       1.1     matt #endif /* _MVPPPC_INTR_H_ */
    493