Home | History | Annotate | Line # | Download | only in marvell
marvell_intr.h revision 1.11.24.2
      1  1.11.24.2   matt /*	$NetBSD: marvell_intr.h,v 1.11.24.2 2008/01/09 01:47:50 matt Exp $	*/
      2        1.1   matt 
      3        1.1   matt /*-
      4        1.1   matt  * Copyright (c) 1998 The NetBSD Foundation, Inc.
      5        1.1   matt  * All rights reserved.
      6        1.1   matt  *
      7        1.1   matt  * This code is derived from software contributed to The NetBSD Foundation
      8        1.1   matt  * by Charles M. Hannum.
      9        1.1   matt  *
     10        1.1   matt  * Redistribution and use in source and binary forms, with or without
     11        1.1   matt  * modification, are permitted provided that the following conditions
     12        1.1   matt  * are met:
     13        1.1   matt  * 1. Redistributions of source code must retain the above copyright
     14        1.1   matt  *    notice, this list of conditions and the following disclaimer.
     15        1.1   matt  * 2. Redistributions in binary form must reproduce the above copyright
     16        1.1   matt  *    notice, this list of conditions and the following disclaimer in the
     17        1.1   matt  *    documentation and/or other materials provided with the distribution.
     18        1.1   matt  * 3. All advertising materials mentioning features or use of this software
     19        1.1   matt  *    must display the following acknowledgement:
     20        1.1   matt  *        This product includes software developed by the NetBSD
     21        1.1   matt  *        Foundation, Inc. and its contributors.
     22        1.1   matt  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23        1.1   matt  *    contributors may be used to endorse or promote products derived
     24        1.1   matt  *    from this software without specific prior written permission.
     25        1.1   matt  *
     26        1.1   matt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27        1.1   matt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28        1.1   matt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29        1.1   matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30        1.1   matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31        1.1   matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32        1.1   matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33        1.1   matt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34        1.1   matt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35        1.1   matt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36        1.1   matt  * POSSIBILITY OF SUCH DAMAGE.
     37        1.1   matt  */
     38        1.1   matt 
     39        1.1   matt #ifndef _MVPPPC_INTR_H_
     40        1.1   matt #define _MVPPPC_INTR_H_
     41        1.1   matt 
     42        1.1   matt /*
     43        1.1   matt  * Interrupt Priority Levels
     44        1.1   matt  */
     45        1.1   matt #define	IPL_NONE	0	/* nothing */
     46        1.1   matt #define	IPL_SOFTCLOCK	1	/* timeouts */
     47  1.11.24.2   matt #define	IPL_SOFTBIO	2	/* block I/O */
     48  1.11.24.2   matt #define	IPL_SOFTNET	3	/* protocol stacks */
     49  1.11.24.2   matt #define	IPL_SOFTSERIAL	4	/* serial */
     50        1.1   matt #define	IPL_VM		12	/* memory allocation */
     51  1.11.24.2   matt #define	IPL_SCHED	14	/* clock */
     52        1.1   matt #define	IPL_HIGH	15	/* everything */
     53        1.1   matt #define	NIPL		16
     54        1.1   matt #define IPL_PRIMASK	0xf
     55        1.1   matt #define IPL_EE		0x10	/* enable external interrupts on splx */
     56        1.1   matt 
     57        1.1   matt /* Interrupt sharing types. */
     58        1.1   matt #define	IST_NONE	0	/* none */
     59        1.1   matt #define	IST_PULSE	1	/* pulsed */
     60        1.1   matt #define	IST_EDGE	2	/* edge-triggered */
     61        1.1   matt #define	IST_LEVEL	3	/* level-triggered */
     62        1.1   matt #define	IST_SOFT	4	/* software-triggered */
     63        1.1   matt #define	IST_CLOCK	5	/* exclusive for clock */
     64        1.1   matt #define	NIST		6
     65        1.1   matt 
     66        1.1   matt #if !defined(_LOCORE) && defined(_KERNEL)
     67        1.6   matt 
     68        1.6   matt #define	CLKF_BASEPRI(frame)	((frame)->pri == IPL_NONE)
     69        1.1   matt 
     70        1.1   matt /*
     71        1.1   matt  * we support 128 IRQs:
     72        1.1   matt  *	96 (ICU_LEN) hard interrupt IRQs:
     73        1.1   matt  *		- 64 Main Cause IRQs,
     74        1.1   matt  *		- 32 GPP IRQs,
     75        1.1   matt  *	and 32 softint IRQs
     76        1.1   matt  */
     77        1.1   matt #define ICU_LEN		96	/* number of  HW IRQs */
     78        1.1   matt #define IRQ_GPP_BASE	64	/* base of GPP IRQs */
     79        1.1   matt #define IRQ_GPP_SUM	(32+24) /* GPP[7..0] interrupt */	/* XXX */
     80        1.1   matt #define NIRQ		128	/* total # of HW IRQs */
     81        1.1   matt 
     82        1.1   matt #define IMASK_ICU_LO	0
     83        1.1   matt #define IMASK_ICU_HI	1
     84        1.1   matt #define IMASK_ICU_GPP	2
     85        1.1   matt #define IMASK_SOFTINT	3
     86        1.1   matt #define IMASK_WORDSHIFT 5	/* log2(32) */
     87        1.1   matt #define IMASK_BITMASK	~((~0) << IMASK_WORDSHIFT)
     88        1.1   matt 
     89        1.1   matt #define IRQ_IS_GPP(irq) ((irq >= IRQ_GPP_BASE) && (irq < ICU_LEN))
     90        1.1   matt 
     91        1.1   matt /*
     92        1.1   matt  * interrupt mask bit vector
     93        1.1   matt  */
     94        1.5   matt typedef struct {
     95        1.5   matt 	u_int32_t bits[4];
     96        1.5   matt } imask_t __attribute__ ((aligned(16)));
     97        1.5   matt 
     98        1.9  perry static inline void imask_zero(imask_t *);
     99        1.9  perry static inline void imask_zero_v(volatile imask_t *);
    100        1.9  perry static inline void imask_dup_v(imask_t *, const volatile imask_t *);
    101        1.9  perry static inline void imask_and(imask_t *, const imask_t *);
    102        1.9  perry static inline void imask_andnot_v(volatile imask_t *, const imask_t *);
    103        1.9  perry static inline void imask_andnot_icu_vv(volatile imask_t *, const volatile imask_t *);
    104        1.9  perry static inline int imask_empty(const imask_t *);
    105        1.9  perry static inline void imask_orbit(imask_t *, int);
    106        1.9  perry static inline void imask_orbit_v(volatile imask_t *, int);
    107        1.9  perry static inline void imask_clrbit(imask_t *, int);
    108        1.9  perry static inline void imask_clrbit_v(volatile imask_t *, int);
    109        1.9  perry static inline u_int32_t imask_andbit_v(const volatile imask_t *, int);
    110        1.9  perry static inline int imask_test_v(const volatile imask_t *, const imask_t *);
    111        1.1   matt 
    112        1.9  perry static inline void
    113        1.1   matt imask_zero(imask_t *idp)
    114        1.1   matt {
    115        1.5   matt 	idp->bits[IMASK_ICU_LO]  = 0;
    116        1.5   matt 	idp->bits[IMASK_ICU_HI]  = 0;
    117        1.5   matt 	idp->bits[IMASK_ICU_GPP] = 0;
    118        1.5   matt 	idp->bits[IMASK_SOFTINT] = 0;
    119        1.1   matt }
    120        1.1   matt 
    121        1.9  perry static inline void
    122        1.1   matt imask_zero_v(volatile imask_t *idp)
    123        1.1   matt {
    124        1.5   matt 	idp->bits[IMASK_ICU_LO]  = 0;
    125        1.5   matt 	idp->bits[IMASK_ICU_HI]  = 0;
    126        1.5   matt 	idp->bits[IMASK_ICU_GPP] = 0;
    127        1.5   matt 	idp->bits[IMASK_SOFTINT] = 0;
    128        1.1   matt }
    129        1.1   matt 
    130        1.9  perry static inline void
    131        1.5   matt imask_dup_v(imask_t *idp, const volatile imask_t *isp)
    132        1.1   matt {
    133        1.5   matt 	*idp = *isp;
    134        1.1   matt }
    135        1.1   matt 
    136        1.9  perry static inline void
    137        1.5   matt imask_and(imask_t *idp, const imask_t *isp)
    138        1.1   matt {
    139        1.5   matt 	idp->bits[IMASK_ICU_LO]  &= isp->bits[IMASK_ICU_LO];
    140        1.5   matt 	idp->bits[IMASK_ICU_HI]  &= isp->bits[IMASK_ICU_HI];
    141        1.5   matt 	idp->bits[IMASK_ICU_GPP] &= isp->bits[IMASK_ICU_GPP];
    142        1.5   matt 	idp->bits[IMASK_SOFTINT] &= isp->bits[IMASK_SOFTINT];
    143        1.1   matt }
    144        1.1   matt 
    145        1.9  perry static inline void
    146        1.5   matt imask_andnot_v(volatile imask_t *idp, const imask_t *isp)
    147        1.1   matt {
    148        1.5   matt 	idp->bits[IMASK_ICU_LO]  &= ~isp->bits[IMASK_ICU_LO];
    149        1.5   matt 	idp->bits[IMASK_ICU_HI]  &= ~isp->bits[IMASK_ICU_HI];
    150        1.5   matt 	idp->bits[IMASK_ICU_GPP] &= ~isp->bits[IMASK_ICU_GPP];
    151        1.5   matt 	idp->bits[IMASK_SOFTINT] &= ~isp->bits[IMASK_SOFTINT];
    152        1.1   matt }
    153        1.1   matt 
    154        1.9  perry static inline void
    155        1.5   matt imask_andnot_icu_vv(volatile imask_t *idp, const volatile imask_t *isp)
    156        1.1   matt {
    157        1.5   matt 	idp->bits[IMASK_ICU_LO]  &= ~isp->bits[IMASK_ICU_LO];
    158        1.5   matt 	idp->bits[IMASK_ICU_HI]  &= ~isp->bits[IMASK_ICU_HI];
    159        1.5   matt 	idp->bits[IMASK_ICU_GPP] &= ~isp->bits[IMASK_ICU_GPP];
    160        1.1   matt }
    161        1.1   matt 
    162        1.9  perry static inline int
    163        1.5   matt imask_empty(const imask_t *isp)
    164        1.1   matt {
    165        1.5   matt 	return (! (isp->bits[IMASK_ICU_LO] | isp->bits[IMASK_ICU_HI] |
    166        1.5   matt 		   isp->bits[IMASK_ICU_GPP]| isp->bits[IMASK_SOFTINT]));
    167        1.1   matt }
    168        1.1   matt 
    169        1.9  perry static inline void
    170        1.1   matt imask_orbit(imask_t *idp, int bitno)
    171        1.1   matt {
    172        1.5   matt 	idp->bits[bitno>>IMASK_WORDSHIFT] |= (1 << (bitno&IMASK_BITMASK));
    173        1.1   matt }
    174        1.1   matt 
    175        1.9  perry static inline void
    176        1.1   matt imask_orbit_v(volatile imask_t *idp, int bitno)
    177        1.1   matt {
    178        1.5   matt 	idp->bits[bitno>>IMASK_WORDSHIFT] |= (1 << (bitno&IMASK_BITMASK));
    179        1.1   matt }
    180        1.1   matt 
    181        1.9  perry static inline void
    182        1.1   matt imask_clrbit(imask_t *idp, int bitno)
    183        1.1   matt {
    184        1.5   matt 	idp->bits[bitno>>IMASK_WORDSHIFT] &= ~(1 << (bitno&IMASK_BITMASK));
    185        1.1   matt }
    186        1.1   matt 
    187        1.9  perry static inline void
    188        1.1   matt imask_clrbit_v(volatile imask_t *idp, int bitno)
    189        1.1   matt {
    190        1.5   matt 	idp->bits[bitno>>IMASK_WORDSHIFT] &= ~(1 << (bitno&IMASK_BITMASK));
    191        1.1   matt }
    192        1.1   matt 
    193        1.9  perry static inline u_int32_t
    194        1.5   matt imask_andbit_v(const volatile imask_t *idp, int bitno)
    195        1.1   matt {
    196        1.5   matt 	return idp->bits[bitno>>IMASK_WORDSHIFT] & (1 << (bitno&IMASK_BITMASK));
    197        1.1   matt }
    198        1.1   matt 
    199        1.9  perry static inline int
    200        1.5   matt imask_test_v(const volatile imask_t *idp, const imask_t *isp)
    201        1.1   matt {
    202        1.5   matt 	return ((idp->bits[IMASK_ICU_LO]  & isp->bits[IMASK_ICU_LO]) ||
    203        1.5   matt 		(idp->bits[IMASK_ICU_HI]  & isp->bits[IMASK_ICU_HI]) ||
    204        1.5   matt 		(idp->bits[IMASK_ICU_GPP] & isp->bits[IMASK_ICU_GPP])||
    205        1.5   matt 		(idp->bits[IMASK_SOFTINT] & isp->bits[IMASK_SOFTINT]));
    206        1.1   matt }
    207        1.1   matt 
    208        1.1   matt #ifdef EXT_INTR_STATS
    209        1.1   matt /*
    210        1.1   matt  * ISR timing stats
    211        1.1   matt  */
    212        1.1   matt 
    213        1.1   matt typedef struct ext_intr_hist {
    214        1.1   matt 	u_int64_t tcause;
    215        1.1   matt 	u_int64_t tcommit;
    216        1.1   matt 	u_int64_t tstart;
    217        1.1   matt 	u_int64_t tfin;
    218        1.1   matt } ext_intr_hist_t __attribute__ ((aligned(32)));
    219        1.1   matt 
    220        1.1   matt typedef struct ext_intr_stat {
    221        1.1   matt         struct ext_intr_hist *histp;
    222        1.1   matt         unsigned int histix;
    223        1.1   matt         u_int64_t cnt;
    224        1.1   matt         u_int64_t sum;
    225        1.1   matt         u_int64_t min;
    226        1.1   matt         u_int64_t max;
    227        1.1   matt         u_int64_t pnd;
    228        1.1   matt         u_int64_t borrowed;
    229        1.1   matt         struct ext_intr_stat *save;
    230        1.1   matt 	unsigned long preempted[NIRQ];	/* XXX */
    231        1.1   matt } ext_intr_stat_t  __attribute__ ((aligned(32)));
    232        1.1   matt 
    233        1.1   matt extern int intr_depth_max;
    234        1.1   matt extern int ext_intr_stats_enb;
    235        1.1   matt extern ext_intr_stat_t ext_intr_stats[];
    236        1.1   matt extern ext_intr_stat_t *ext_intr_statp;
    237        1.1   matt 
    238        1.1   matt extern void ext_intr_stats_init __P((void));
    239        1.1   matt extern void ext_intr_stats_cause
    240        1.1   matt 	__P((u_int32_t, u_int32_t, u_int32_t, u_int32_t));
    241        1.1   matt extern void ext_intr_stats_pend
    242        1.1   matt 	__P((u_int32_t, u_int32_t, u_int32_t, u_int32_t));
    243        1.1   matt extern void ext_intr_stats_commit __P((imask_t *));
    244        1.1   matt extern void ext_intr_stats_commit_m __P((imask_t *));
    245        1.1   matt extern void ext_intr_stats_commit_irq __P((u_int));
    246        1.1   matt extern u_int64_t ext_intr_stats_pre  __P((int));
    247        1.1   matt extern void ext_intr_stats_post __P((int, u_int64_t));
    248        1.1   matt 
    249        1.1   matt #define EXT_INTR_STATS_INIT() ext_intr_stats_init()
    250        1.1   matt #define EXT_INTR_STATS_CAUSE(l, h, g, s)  ext_intr_stats_cause(l, h, g, s)
    251        1.1   matt #define EXT_INTR_STATS_COMMIT_M(m) ext_intr_stats_commit_m(m)
    252        1.1   matt #define EXT_INTR_STATS_COMMIT_IRQ(i) ext_intr_stats_commit_irq(i)
    253        1.1   matt #define EXT_INTR_STATS_DECL(t) u_int64_t t
    254        1.1   matt #define EXT_INTR_STATS_PRE(i, t) t = ext_intr_stats_pre(i)
    255        1.1   matt #define EXT_INTR_STATS_POST(i, t) ext_intr_stats_post(i, t)
    256        1.1   matt #define EXT_INTR_STATS_PEND(l, h, g, s) ext_intr_stats_pend(l, h, g, s)
    257        1.1   matt #define EXT_INTR_STATS_PEND_IRQ(i) ext_intr_stats[i].pnd++
    258        1.1   matt #define EXT_INTR_STATS_DEPTH() \
    259        1.1   matt 		 intr_depth_max = (intr_depth > intr_depth_max) ? \
    260        1.1   matt 			 intr_depth : intr_depth_max
    261        1.1   matt 
    262        1.1   matt #else /* EXT_INTR_STATS */
    263        1.1   matt 
    264        1.1   matt #define EXT_INTR_STATS_INIT()
    265        1.1   matt #define EXT_INTR_STATS_CAUSE(l, h, g, s)
    266        1.1   matt #define EXT_INTR_STATS_COMMIT_M(m)
    267        1.1   matt #define EXT_INTR_STATS_COMMIT_IRQ(i)
    268        1.1   matt #define EXT_INTR_STATS_DECL(t)
    269        1.1   matt #define EXT_INTR_STATS_PRE(irq, t)
    270        1.1   matt #define EXT_INTR_STATS_POST(i, t)
    271        1.1   matt #define EXT_INTR_STATS_PEND(l, h, g, s)
    272        1.1   matt #define EXT_INTR_STATS_PEND_IRQ(i)
    273        1.1   matt #define EXT_INTR_STATS_DEPTH()
    274        1.1   matt 
    275        1.1   matt #endif	/* EXT_INTR_STATS */
    276        1.1   matt 
    277        1.1   matt 
    278        1.1   matt #ifdef SPL_STATS
    279        1.1   matt typedef struct spl_hist {
    280        1.1   matt 	int level;
    281        1.1   matt 	void *addr;
    282        1.1   matt 	u_int64_t time;
    283        1.1   matt } spl_hist_t;
    284        1.1   matt 
    285        1.1   matt extern  void spl_stats_init();
    286        1.1   matt extern  void spl_stats_log();
    287        1.1   matt extern unsigned int spl_stats_enb;
    288        1.1   matt 
    289        1.1   matt #define SPL_STATS_INIT()	spl_stats_init()
    290        1.1   matt #define SPL_STATS_LOG(ipl, cc)	spl_stats_log((ipl), (cc))
    291        1.1   matt 
    292        1.1   matt #else
    293        1.1   matt 
    294        1.1   matt #define SPL_STATS_INIT()
    295        1.1   matt #define SPL_STATS_LOG(ipl, cc)
    296        1.1   matt 
    297        1.1   matt #endif	/* SPL_STATS */
    298        1.1   matt 
    299        1.1   matt 
    300        1.1   matt void intr_dispatch __P((void));
    301        1.1   matt #ifdef SPL_INLINE
    302        1.9  perry static inline int splraise __P((int));
    303        1.9  perry static inline int spllower __P((int));
    304        1.9  perry static inline void splx __P((int));
    305        1.1   matt #else
    306        1.1   matt extern int splraise __P((int));
    307        1.1   matt extern int spllower __P((int));
    308        1.1   matt extern void splx __P((int));
    309        1.1   matt #endif
    310        1.1   matt 
    311        1.1   matt extern volatile int tickspending;
    312        1.1   matt 
    313        1.1   matt extern volatile imask_t ipending;
    314        1.1   matt extern imask_t imask[];
    315        1.1   matt 
    316        1.1   matt /*
    317        1.1   matt  * inlines for manipulating PSL_EE
    318        1.1   matt  */
    319        1.9  perry static inline void
    320        1.1   matt extintr_restore(register_t omsr)
    321        1.1   matt {
    322        1.9  perry 	__asm volatile ("sync; mtmsr %0;" :: "r"(omsr));
    323        1.1   matt }
    324        1.1   matt 
    325        1.9  perry static inline register_t
    326        1.1   matt extintr_enable(void)
    327        1.1   matt {
    328        1.1   matt 	register_t omsr;
    329        1.1   matt 
    330        1.9  perry 	__asm volatile("sync;");
    331        1.9  perry 	__asm volatile("mfmsr %0;" : "=r"(omsr));
    332        1.9  perry 	__asm volatile("mtmsr %0;" :: "r"(omsr | PSL_EE));
    333        1.1   matt 
    334        1.1   matt 	return omsr;
    335        1.1   matt }
    336        1.1   matt 
    337        1.9  perry static inline register_t
    338        1.1   matt extintr_disable(void)
    339        1.1   matt {
    340        1.1   matt 	register_t omsr;
    341        1.1   matt 
    342        1.9  perry 	__asm volatile("mfmsr %0;" : "=r"(omsr));
    343        1.9  perry 	__asm volatile("mtmsr %0;" :: "r"(omsr & ~PSL_EE));
    344        1.9  perry 	__asm volatile("isync;");
    345        1.1   matt 
    346        1.1   matt 	return omsr;
    347        1.1   matt }
    348        1.1   matt 
    349        1.1   matt #ifdef SPL_INLINE
    350        1.9  perry static inline int
    351        1.1   matt splraise(int ncpl)
    352        1.1   matt {
    353        1.1   matt 	int ocpl;
    354        1.1   matt 	register_t omsr;
    355        1.1   matt 
    356        1.1   matt 	omsr = extintr_disable();
    357        1.1   matt 	ocpl = cpl;
    358        1.1   matt         if (ncpl > cpl) {
    359        1.1   matt 		SPL_STATS_LOG(ncpl, 0);
    360        1.1   matt                 cpl = ncpl;
    361        1.1   matt 		if ((ncpl == IPL_HIGH) && ((omsr & PSL_EE) != 0)) {
    362        1.1   matt 			/* leave external interrupts disabled */
    363        1.1   matt 			return (ocpl | IPL_EE);
    364        1.1   matt 		}
    365        1.1   matt 	}
    366        1.1   matt         extintr_restore(omsr);
    367        1.1   matt         return (ocpl);
    368        1.1   matt }
    369        1.1   matt 
    370        1.9  perry static inline void
    371        1.1   matt splx(int xcpl)
    372        1.1   matt {
    373        1.1   matt 	imask_t *ncplp;
    374        1.1   matt 	register_t omsr;
    375        1.1   matt 	int ncpl = xcpl & IPL_PRIMASK;
    376        1.1   matt 
    377        1.1   matt 	ncplp = &imask[ncpl];
    378        1.1   matt 
    379        1.1   matt 	omsr = extintr_disable();
    380        1.1   matt 	if (ncpl < cpl) {
    381        1.1   matt 		cpl = ncpl;
    382        1.1   matt 		SPL_STATS_LOG(ncpl, 0);
    383        1.1   matt 		if (imask_test_v(&ipending, ncplp))
    384        1.1   matt 			intr_dispatch();
    385        1.1   matt 	}
    386        1.1   matt 	if (xcpl & IPL_EE)
    387        1.1   matt 		omsr |= PSL_EE;
    388        1.1   matt 	extintr_restore(omsr);
    389        1.1   matt }
    390        1.1   matt 
    391        1.9  perry static inline int
    392        1.1   matt spllower(int ncpl)
    393        1.1   matt {
    394        1.1   matt 	int ocpl;
    395        1.1   matt 	imask_t *ncplp;
    396        1.1   matt 	register_t omsr;
    397        1.1   matt 
    398        1.1   matt 	ncpl &= IPL_PRIMASK;
    399        1.1   matt 	ncplp = &imask[ncpl];
    400        1.1   matt 
    401        1.1   matt 	omsr = extintr_disable();
    402        1.1   matt 	ocpl = cpl;
    403        1.1   matt 	cpl = ncpl;
    404        1.1   matt 	SPL_STATS_LOG(ncpl, 0);
    405        1.1   matt #ifdef EXT_INTR_STATS
    406        1.1   matt         ext_intr_statp = 0;
    407        1.1   matt #endif
    408        1.1   matt 	if (imask_test_v(&ipending, ncplp))
    409        1.1   matt 		intr_dispatch();
    410        1.1   matt 
    411        1.1   matt 	if (ncpl < IPL_HIGH)
    412        1.1   matt 		omsr |= PSL_EE;
    413        1.1   matt 	extintr_restore(omsr);
    414        1.1   matt 
    415        1.1   matt 	return (ocpl);
    416        1.1   matt }
    417        1.1   matt #endif	/* SPL_INLINE */
    418        1.1   matt 
    419        1.1   matt 
    420        1.1   matt /*
    421        1.1   matt  * Soft interrupt IRQs
    422        1.1   matt  * see also intrnames[] in locore.S
    423        1.1   matt  */
    424        1.1   matt #define SIR_BASE	(NIRQ-32)
    425  1.11.24.1   matt #define SIXBIT(ipl)	((ipl) - SIR_BASE) /* XXX rennovate later */
    426        1.1   matt #define SIR_SOFTCLOCK	(NIRQ-5)
    427  1.11.24.1   matt #define SIR_CLOCK	SIXBIT(SIR_SOFTCLOCK) /* XXX rennovate later */
    428        1.4   matt #define SIR_SOFTNET	(NIRQ-4)
    429  1.11.24.2   matt #define SIR_SOFTBIO	(NIRQ-3)
    430        1.4   matt #define SIR_SOFTSERIAL	(NIRQ-2)
    431        1.1   matt #define SIR_HWCLOCK	(NIRQ-1)
    432  1.11.24.1   matt #define SPL_CLOCK	SIXBIT(SIR_HWCLOCK) /* XXX rennovate later */
    433        1.4   matt #define SIR_RES		~(SIBIT(SIR_SOFTCLOCK)|\
    434        1.4   matt 			  SIBIT(SIR_SOFTNET)|\
    435  1.11.24.2   matt 			  SIBIT(SIR_SOFTBIO)|\
    436        1.4   matt 			  SIBIT(SIR_SOFTSERIAL)|\
    437        1.4   matt 			  SIBIT(SIR_HWCLOCK))
    438        1.1   matt 
    439        1.7   matt struct intrhand;
    440        1.4   matt 
    441        1.1   matt /*
    442        1.1   matt  * Miscellaneous
    443        1.1   matt  */
    444        1.1   matt #define	spl0()		spllower(IPL_NONE)
    445        1.1   matt 
    446       1.11   yamt typedef int ipl_t;
    447       1.11   yamt typedef struct {
    448       1.11   yamt 	ipl_t _ipl;
    449       1.11   yamt } ipl_cookie_t;
    450       1.11   yamt 
    451       1.11   yamt static inline ipl_cookie_t
    452       1.11   yamt makeiplcookie(ipl_t ipl)
    453       1.11   yamt {
    454       1.11   yamt 
    455       1.11   yamt 	return (ipl_cookie_t){._ipl = ipl};
    456       1.11   yamt }
    457       1.11   yamt 
    458       1.11   yamt static inline int
    459       1.11   yamt splraiseipl(ipl_cookie_t icookie)
    460       1.11   yamt {
    461       1.11   yamt 
    462       1.11   yamt 	return splraise(icookie._ipl);
    463       1.11   yamt }
    464        1.8   yamt 
    465        1.8   yamt #include <sys/spl.h>
    466        1.8   yamt 
    467        1.1   matt #define SIBIT(ipl)	(1 << ((ipl) - SIR_BASE))
    468        1.1   matt 
    469        1.1   matt void	*intr_establish(int, int, int, int (*)(void *), void *);
    470        1.1   matt void	intr_disestablish(void *);
    471        1.1   matt void	init_interrupt(void);
    472        1.1   matt const char * intr_typename(int);
    473        1.1   matt const char * intr_string(int);
    474        1.1   matt const struct evcnt * intr_evcnt(int);
    475        1.1   matt void	ext_intr(struct intrframe *);
    476        1.1   matt 
    477  1.11.24.1   matt /* the following are needed to compile until this port is properly
    478  1.11.24.1   matt  * converted to ppcoea-rennovation.
    479  1.11.24.1   matt  */
    480  1.11.24.1   matt void genppc_cpu_configure(void);
    481  1.11.24.1   matt 
    482        1.1   matt void	strayintr(int);
    483        1.2   matt 
    484        1.1   matt /*
    485        1.1   matt  * defines for indexing intrcnt
    486        1.1   matt  */
    487        1.1   matt #define CNT_IRQ0	0
    488        1.1   matt #define CNT_CLOCK	SIR_HWCLOCK
    489        1.1   matt #define CNT_SOFTCLOCK	SIR_SOFTCLOCK
    490        1.1   matt #define CNT_SOFTNET	SIR_NET
    491        1.1   matt #define CNT_SOFTSERIAL	SIR_SOFTSERIAL
    492  1.11.24.2   matt #define CNT_SOFTBIO	SIR_BIO
    493        1.1   matt 
    494        1.1   matt #endif /* !_LOCORE */
    495        1.1   matt 
    496        1.1   matt #endif /* _MVPPPC_INTR_H_ */
    497