Home | History | Annotate | Line # | Download | only in marvell
marvell_intr.h revision 1.12
      1  1.12  garbled /*	$NetBSD: marvell_intr.h,v 1.12 2007/10/17 19:56:42 garbled Exp $	*/
      2   1.1     matt 
      3   1.1     matt /*-
      4   1.1     matt  * Copyright (c) 1998 The NetBSD Foundation, Inc.
      5   1.1     matt  * All rights reserved.
      6   1.1     matt  *
      7   1.1     matt  * This code is derived from software contributed to The NetBSD Foundation
      8   1.1     matt  * by Charles M. Hannum.
      9   1.1     matt  *
     10   1.1     matt  * Redistribution and use in source and binary forms, with or without
     11   1.1     matt  * modification, are permitted provided that the following conditions
     12   1.1     matt  * are met:
     13   1.1     matt  * 1. Redistributions of source code must retain the above copyright
     14   1.1     matt  *    notice, this list of conditions and the following disclaimer.
     15   1.1     matt  * 2. Redistributions in binary form must reproduce the above copyright
     16   1.1     matt  *    notice, this list of conditions and the following disclaimer in the
     17   1.1     matt  *    documentation and/or other materials provided with the distribution.
     18   1.1     matt  * 3. All advertising materials mentioning features or use of this software
     19   1.1     matt  *    must display the following acknowledgement:
     20   1.1     matt  *        This product includes software developed by the NetBSD
     21   1.1     matt  *        Foundation, Inc. and its contributors.
     22   1.1     matt  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23   1.1     matt  *    contributors may be used to endorse or promote products derived
     24   1.1     matt  *    from this software without specific prior written permission.
     25   1.1     matt  *
     26   1.1     matt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27   1.1     matt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28   1.1     matt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29   1.1     matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30   1.1     matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31   1.1     matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32   1.1     matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33   1.1     matt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34   1.1     matt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35   1.1     matt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36   1.1     matt  * POSSIBILITY OF SUCH DAMAGE.
     37   1.1     matt  */
     38   1.1     matt 
     39   1.1     matt #ifndef _MVPPPC_INTR_H_
     40   1.1     matt #define _MVPPPC_INTR_H_
     41   1.1     matt 
     42   1.1     matt /*
     43   1.1     matt  * Interrupt Priority Levels
     44   1.1     matt  */
     45   1.1     matt #define	IPL_NONE	0	/* nothing */
     46   1.1     matt #define	IPL_SOFTCLOCK	1	/* timeouts */
     47   1.1     matt #define	IPL_SOFTNET	2	/* protocol stacks */
     48   1.1     matt #define	IPL_BIO		3	/* block I/O */
     49   1.1     matt #define	IPL_NET		4	/* network */
     50   1.1     matt #define IPL_NCP		5	/* network processors */
     51   1.1     matt #define IPL_SOFTI2C	6	/* i2c */
     52   1.1     matt #define	IPL_SOFTSERIAL	7	/* serial */
     53   1.1     matt #define	IPL_TTY		8	/* terminal */
     54   1.8     yamt #define	IPL_LPT		IPL_TTY
     55   1.1     matt #define IPL_AUDIO       9       /* boom box */
     56   1.1     matt #define IPL_EJECT	10	/* card eject */
     57   1.1     matt #define IPL_GTERR	10	/* GT-64260 errors */
     58   1.1     matt #define	IPL_I2C		11	/* i2c */
     59   1.1     matt #define	IPL_VM		12	/* memory allocation */
     60   1.1     matt #define	IPL_SERIAL	13	/* serial */
     61   1.1     matt #define	IPL_CLOCK	14	/* clock */
     62   1.8     yamt #define	IPL_STATCLOCK	IPL_CLOCK
     63   1.1     matt #define	IPL_SCHED	14	/* schedular */
     64   1.1     matt #define	IPL_LOCK	14	/* same as high for now */
     65   1.1     matt #define	IPL_HIGH	15	/* everything */
     66   1.1     matt #define	NIPL		16
     67   1.1     matt #define IPL_PRIMASK	0xf
     68   1.1     matt #define IPL_EE		0x10	/* enable external interrupts on splx */
     69   1.1     matt 
     70   1.1     matt /* Interrupt sharing types. */
     71   1.1     matt #define	IST_NONE	0	/* none */
     72   1.1     matt #define	IST_PULSE	1	/* pulsed */
     73   1.1     matt #define	IST_EDGE	2	/* edge-triggered */
     74   1.1     matt #define	IST_LEVEL	3	/* level-triggered */
     75   1.1     matt #define	IST_SOFT	4	/* software-triggered */
     76   1.1     matt #define	IST_CLOCK	5	/* exclusive for clock */
     77   1.1     matt #define	NIST		6
     78   1.1     matt 
     79   1.1     matt #if !defined(_LOCORE) && defined(_KERNEL)
     80   1.6     matt 
     81   1.6     matt #define	CLKF_BASEPRI(frame)	((frame)->pri == IPL_NONE)
     82   1.1     matt 
     83   1.1     matt /*
     84   1.1     matt  * we support 128 IRQs:
     85   1.1     matt  *	96 (ICU_LEN) hard interrupt IRQs:
     86   1.1     matt  *		- 64 Main Cause IRQs,
     87   1.1     matt  *		- 32 GPP IRQs,
     88   1.1     matt  *	and 32 softint IRQs
     89   1.1     matt  */
     90   1.1     matt #define ICU_LEN		96	/* number of  HW IRQs */
     91   1.1     matt #define IRQ_GPP_BASE	64	/* base of GPP IRQs */
     92   1.1     matt #define IRQ_GPP_SUM	(32+24) /* GPP[7..0] interrupt */	/* XXX */
     93   1.1     matt #define NIRQ		128	/* total # of HW IRQs */
     94   1.1     matt 
     95   1.1     matt #define IMASK_ICU_LO	0
     96   1.1     matt #define IMASK_ICU_HI	1
     97   1.1     matt #define IMASK_ICU_GPP	2
     98   1.1     matt #define IMASK_SOFTINT	3
     99   1.1     matt #define IMASK_WORDSHIFT 5	/* log2(32) */
    100   1.1     matt #define IMASK_BITMASK	~((~0) << IMASK_WORDSHIFT)
    101   1.1     matt 
    102   1.1     matt #define IRQ_IS_GPP(irq) ((irq >= IRQ_GPP_BASE) && (irq < ICU_LEN))
    103   1.1     matt 
    104   1.1     matt /*
    105   1.1     matt  * interrupt mask bit vector
    106   1.1     matt  */
    107   1.5     matt typedef struct {
    108   1.5     matt 	u_int32_t bits[4];
    109   1.5     matt } imask_t __attribute__ ((aligned(16)));
    110   1.5     matt 
    111   1.9    perry static inline void imask_zero(imask_t *);
    112   1.9    perry static inline void imask_zero_v(volatile imask_t *);
    113   1.9    perry static inline void imask_dup_v(imask_t *, const volatile imask_t *);
    114   1.9    perry static inline void imask_and(imask_t *, const imask_t *);
    115   1.9    perry static inline void imask_andnot_v(volatile imask_t *, const imask_t *);
    116   1.9    perry static inline void imask_andnot_icu_vv(volatile imask_t *, const volatile imask_t *);
    117   1.9    perry static inline int imask_empty(const imask_t *);
    118   1.9    perry static inline void imask_orbit(imask_t *, int);
    119   1.9    perry static inline void imask_orbit_v(volatile imask_t *, int);
    120   1.9    perry static inline void imask_clrbit(imask_t *, int);
    121   1.9    perry static inline void imask_clrbit_v(volatile imask_t *, int);
    122   1.9    perry static inline u_int32_t imask_andbit_v(const volatile imask_t *, int);
    123   1.9    perry static inline int imask_test_v(const volatile imask_t *, const imask_t *);
    124   1.1     matt 
    125   1.9    perry static inline void
    126   1.1     matt imask_zero(imask_t *idp)
    127   1.1     matt {
    128   1.5     matt 	idp->bits[IMASK_ICU_LO]  = 0;
    129   1.5     matt 	idp->bits[IMASK_ICU_HI]  = 0;
    130   1.5     matt 	idp->bits[IMASK_ICU_GPP] = 0;
    131   1.5     matt 	idp->bits[IMASK_SOFTINT] = 0;
    132   1.1     matt }
    133   1.1     matt 
    134   1.9    perry static inline void
    135   1.1     matt imask_zero_v(volatile imask_t *idp)
    136   1.1     matt {
    137   1.5     matt 	idp->bits[IMASK_ICU_LO]  = 0;
    138   1.5     matt 	idp->bits[IMASK_ICU_HI]  = 0;
    139   1.5     matt 	idp->bits[IMASK_ICU_GPP] = 0;
    140   1.5     matt 	idp->bits[IMASK_SOFTINT] = 0;
    141   1.1     matt }
    142   1.1     matt 
    143   1.9    perry static inline void
    144   1.5     matt imask_dup_v(imask_t *idp, const volatile imask_t *isp)
    145   1.1     matt {
    146   1.5     matt 	*idp = *isp;
    147   1.1     matt }
    148   1.1     matt 
    149   1.9    perry static inline void
    150   1.5     matt imask_and(imask_t *idp, const imask_t *isp)
    151   1.1     matt {
    152   1.5     matt 	idp->bits[IMASK_ICU_LO]  &= isp->bits[IMASK_ICU_LO];
    153   1.5     matt 	idp->bits[IMASK_ICU_HI]  &= isp->bits[IMASK_ICU_HI];
    154   1.5     matt 	idp->bits[IMASK_ICU_GPP] &= isp->bits[IMASK_ICU_GPP];
    155   1.5     matt 	idp->bits[IMASK_SOFTINT] &= isp->bits[IMASK_SOFTINT];
    156   1.1     matt }
    157   1.1     matt 
    158   1.9    perry static inline void
    159   1.5     matt imask_andnot_v(volatile imask_t *idp, const imask_t *isp)
    160   1.1     matt {
    161   1.5     matt 	idp->bits[IMASK_ICU_LO]  &= ~isp->bits[IMASK_ICU_LO];
    162   1.5     matt 	idp->bits[IMASK_ICU_HI]  &= ~isp->bits[IMASK_ICU_HI];
    163   1.5     matt 	idp->bits[IMASK_ICU_GPP] &= ~isp->bits[IMASK_ICU_GPP];
    164   1.5     matt 	idp->bits[IMASK_SOFTINT] &= ~isp->bits[IMASK_SOFTINT];
    165   1.1     matt }
    166   1.1     matt 
    167   1.9    perry static inline void
    168   1.5     matt imask_andnot_icu_vv(volatile imask_t *idp, const volatile imask_t *isp)
    169   1.1     matt {
    170   1.5     matt 	idp->bits[IMASK_ICU_LO]  &= ~isp->bits[IMASK_ICU_LO];
    171   1.5     matt 	idp->bits[IMASK_ICU_HI]  &= ~isp->bits[IMASK_ICU_HI];
    172   1.5     matt 	idp->bits[IMASK_ICU_GPP] &= ~isp->bits[IMASK_ICU_GPP];
    173   1.1     matt }
    174   1.1     matt 
    175   1.9    perry static inline int
    176   1.5     matt imask_empty(const imask_t *isp)
    177   1.1     matt {
    178   1.5     matt 	return (! (isp->bits[IMASK_ICU_LO] | isp->bits[IMASK_ICU_HI] |
    179   1.5     matt 		   isp->bits[IMASK_ICU_GPP]| isp->bits[IMASK_SOFTINT]));
    180   1.1     matt }
    181   1.1     matt 
    182   1.9    perry static inline void
    183   1.1     matt imask_orbit(imask_t *idp, int bitno)
    184   1.1     matt {
    185   1.5     matt 	idp->bits[bitno>>IMASK_WORDSHIFT] |= (1 << (bitno&IMASK_BITMASK));
    186   1.1     matt }
    187   1.1     matt 
    188   1.9    perry static inline void
    189   1.1     matt imask_orbit_v(volatile imask_t *idp, int bitno)
    190   1.1     matt {
    191   1.5     matt 	idp->bits[bitno>>IMASK_WORDSHIFT] |= (1 << (bitno&IMASK_BITMASK));
    192   1.1     matt }
    193   1.1     matt 
    194   1.9    perry static inline void
    195   1.1     matt imask_clrbit(imask_t *idp, int bitno)
    196   1.1     matt {
    197   1.5     matt 	idp->bits[bitno>>IMASK_WORDSHIFT] &= ~(1 << (bitno&IMASK_BITMASK));
    198   1.1     matt }
    199   1.1     matt 
    200   1.9    perry static inline void
    201   1.1     matt imask_clrbit_v(volatile imask_t *idp, int bitno)
    202   1.1     matt {
    203   1.5     matt 	idp->bits[bitno>>IMASK_WORDSHIFT] &= ~(1 << (bitno&IMASK_BITMASK));
    204   1.1     matt }
    205   1.1     matt 
    206   1.9    perry static inline u_int32_t
    207   1.5     matt imask_andbit_v(const volatile imask_t *idp, int bitno)
    208   1.1     matt {
    209   1.5     matt 	return idp->bits[bitno>>IMASK_WORDSHIFT] & (1 << (bitno&IMASK_BITMASK));
    210   1.1     matt }
    211   1.1     matt 
    212   1.9    perry static inline int
    213   1.5     matt imask_test_v(const volatile imask_t *idp, const imask_t *isp)
    214   1.1     matt {
    215   1.5     matt 	return ((idp->bits[IMASK_ICU_LO]  & isp->bits[IMASK_ICU_LO]) ||
    216   1.5     matt 		(idp->bits[IMASK_ICU_HI]  & isp->bits[IMASK_ICU_HI]) ||
    217   1.5     matt 		(idp->bits[IMASK_ICU_GPP] & isp->bits[IMASK_ICU_GPP])||
    218   1.5     matt 		(idp->bits[IMASK_SOFTINT] & isp->bits[IMASK_SOFTINT]));
    219   1.1     matt }
    220   1.1     matt 
    221   1.1     matt #ifdef EXT_INTR_STATS
    222   1.1     matt /*
    223   1.1     matt  * ISR timing stats
    224   1.1     matt  */
    225   1.1     matt 
    226   1.1     matt typedef struct ext_intr_hist {
    227   1.1     matt 	u_int64_t tcause;
    228   1.1     matt 	u_int64_t tcommit;
    229   1.1     matt 	u_int64_t tstart;
    230   1.1     matt 	u_int64_t tfin;
    231   1.1     matt } ext_intr_hist_t __attribute__ ((aligned(32)));
    232   1.1     matt 
    233   1.1     matt typedef struct ext_intr_stat {
    234   1.1     matt         struct ext_intr_hist *histp;
    235   1.1     matt         unsigned int histix;
    236   1.1     matt         u_int64_t cnt;
    237   1.1     matt         u_int64_t sum;
    238   1.1     matt         u_int64_t min;
    239   1.1     matt         u_int64_t max;
    240   1.1     matt         u_int64_t pnd;
    241   1.1     matt         u_int64_t borrowed;
    242   1.1     matt         struct ext_intr_stat *save;
    243   1.1     matt 	unsigned long preempted[NIRQ];	/* XXX */
    244   1.1     matt } ext_intr_stat_t  __attribute__ ((aligned(32)));
    245   1.1     matt 
    246   1.1     matt extern int intr_depth_max;
    247   1.1     matt extern int ext_intr_stats_enb;
    248   1.1     matt extern ext_intr_stat_t ext_intr_stats[];
    249   1.1     matt extern ext_intr_stat_t *ext_intr_statp;
    250   1.1     matt 
    251   1.1     matt extern void ext_intr_stats_init __P((void));
    252   1.1     matt extern void ext_intr_stats_cause
    253   1.1     matt 	__P((u_int32_t, u_int32_t, u_int32_t, u_int32_t));
    254   1.1     matt extern void ext_intr_stats_pend
    255   1.1     matt 	__P((u_int32_t, u_int32_t, u_int32_t, u_int32_t));
    256   1.1     matt extern void ext_intr_stats_commit __P((imask_t *));
    257   1.1     matt extern void ext_intr_stats_commit_m __P((imask_t *));
    258   1.1     matt extern void ext_intr_stats_commit_irq __P((u_int));
    259   1.1     matt extern u_int64_t ext_intr_stats_pre  __P((int));
    260   1.1     matt extern void ext_intr_stats_post __P((int, u_int64_t));
    261   1.1     matt 
    262   1.1     matt #define EXT_INTR_STATS_INIT() ext_intr_stats_init()
    263   1.1     matt #define EXT_INTR_STATS_CAUSE(l, h, g, s)  ext_intr_stats_cause(l, h, g, s)
    264   1.1     matt #define EXT_INTR_STATS_COMMIT_M(m) ext_intr_stats_commit_m(m)
    265   1.1     matt #define EXT_INTR_STATS_COMMIT_IRQ(i) ext_intr_stats_commit_irq(i)
    266   1.1     matt #define EXT_INTR_STATS_DECL(t) u_int64_t t
    267   1.1     matt #define EXT_INTR_STATS_PRE(i, t) t = ext_intr_stats_pre(i)
    268   1.1     matt #define EXT_INTR_STATS_POST(i, t) ext_intr_stats_post(i, t)
    269   1.1     matt #define EXT_INTR_STATS_PEND(l, h, g, s) ext_intr_stats_pend(l, h, g, s)
    270   1.1     matt #define EXT_INTR_STATS_PEND_IRQ(i) ext_intr_stats[i].pnd++
    271   1.1     matt #define EXT_INTR_STATS_DEPTH() \
    272   1.1     matt 		 intr_depth_max = (intr_depth > intr_depth_max) ? \
    273   1.1     matt 			 intr_depth : intr_depth_max
    274   1.1     matt 
    275   1.1     matt #else /* EXT_INTR_STATS */
    276   1.1     matt 
    277   1.1     matt #define EXT_INTR_STATS_INIT()
    278   1.1     matt #define EXT_INTR_STATS_CAUSE(l, h, g, s)
    279   1.1     matt #define EXT_INTR_STATS_COMMIT_M(m)
    280   1.1     matt #define EXT_INTR_STATS_COMMIT_IRQ(i)
    281   1.1     matt #define EXT_INTR_STATS_DECL(t)
    282   1.1     matt #define EXT_INTR_STATS_PRE(irq, t)
    283   1.1     matt #define EXT_INTR_STATS_POST(i, t)
    284   1.1     matt #define EXT_INTR_STATS_PEND(l, h, g, s)
    285   1.1     matt #define EXT_INTR_STATS_PEND_IRQ(i)
    286   1.1     matt #define EXT_INTR_STATS_DEPTH()
    287   1.1     matt 
    288   1.1     matt #endif	/* EXT_INTR_STATS */
    289   1.1     matt 
    290   1.1     matt 
    291   1.1     matt #ifdef SPL_STATS
    292   1.1     matt typedef struct spl_hist {
    293   1.1     matt 	int level;
    294   1.1     matt 	void *addr;
    295   1.1     matt 	u_int64_t time;
    296   1.1     matt } spl_hist_t;
    297   1.1     matt 
    298   1.1     matt extern  void spl_stats_init();
    299   1.1     matt extern  void spl_stats_log();
    300   1.1     matt extern unsigned int spl_stats_enb;
    301   1.1     matt 
    302   1.1     matt #define SPL_STATS_INIT()	spl_stats_init()
    303   1.1     matt #define SPL_STATS_LOG(ipl, cc)	spl_stats_log((ipl), (cc))
    304   1.1     matt 
    305   1.1     matt #else
    306   1.1     matt 
    307   1.1     matt #define SPL_STATS_INIT()
    308   1.1     matt #define SPL_STATS_LOG(ipl, cc)
    309   1.1     matt 
    310   1.1     matt #endif	/* SPL_STATS */
    311   1.1     matt 
    312   1.1     matt 
    313   1.1     matt void setsoftclock __P((void));
    314   1.1     matt void clearsoftclock __P((void));
    315   1.1     matt void setsoftnet   __P((void));
    316   1.1     matt void clearsoftnet __P((void));
    317   1.1     matt 
    318   1.1     matt void intr_dispatch __P((void));
    319   1.1     matt #ifdef SPL_INLINE
    320   1.9    perry static inline int splraise __P((int));
    321   1.9    perry static inline int spllower __P((int));
    322   1.9    perry static inline void splx __P((int));
    323   1.1     matt #else
    324   1.1     matt extern int splraise __P((int));
    325   1.1     matt extern int spllower __P((int));
    326   1.1     matt extern void splx __P((int));
    327   1.1     matt #endif
    328   1.1     matt 
    329   1.1     matt extern volatile int tickspending;
    330   1.1     matt 
    331   1.1     matt extern volatile imask_t ipending;
    332   1.1     matt extern imask_t imask[];
    333   1.1     matt 
    334   1.1     matt /*
    335   1.1     matt  * inlines for manipulating PSL_EE
    336   1.1     matt  */
    337   1.9    perry static inline void
    338   1.1     matt extintr_restore(register_t omsr)
    339   1.1     matt {
    340   1.9    perry 	__asm volatile ("sync; mtmsr %0;" :: "r"(omsr));
    341   1.1     matt }
    342   1.1     matt 
    343   1.9    perry static inline register_t
    344   1.1     matt extintr_enable(void)
    345   1.1     matt {
    346   1.1     matt 	register_t omsr;
    347   1.1     matt 
    348   1.9    perry 	__asm volatile("sync;");
    349   1.9    perry 	__asm volatile("mfmsr %0;" : "=r"(omsr));
    350   1.9    perry 	__asm volatile("mtmsr %0;" :: "r"(omsr | PSL_EE));
    351   1.1     matt 
    352   1.1     matt 	return omsr;
    353   1.1     matt }
    354   1.1     matt 
    355   1.9    perry static inline register_t
    356   1.1     matt extintr_disable(void)
    357   1.1     matt {
    358   1.1     matt 	register_t omsr;
    359   1.1     matt 
    360   1.9    perry 	__asm volatile("mfmsr %0;" : "=r"(omsr));
    361   1.9    perry 	__asm volatile("mtmsr %0;" :: "r"(omsr & ~PSL_EE));
    362   1.9    perry 	__asm volatile("isync;");
    363   1.1     matt 
    364   1.1     matt 	return omsr;
    365   1.1     matt }
    366   1.1     matt 
    367   1.1     matt #ifdef SPL_INLINE
    368   1.9    perry static inline int
    369   1.1     matt splraise(int ncpl)
    370   1.1     matt {
    371   1.1     matt 	int ocpl;
    372   1.1     matt 	register_t omsr;
    373   1.1     matt 
    374   1.1     matt 	omsr = extintr_disable();
    375   1.1     matt 	ocpl = cpl;
    376   1.1     matt         if (ncpl > cpl) {
    377   1.1     matt 		SPL_STATS_LOG(ncpl, 0);
    378   1.1     matt                 cpl = ncpl;
    379   1.1     matt 		if ((ncpl == IPL_HIGH) && ((omsr & PSL_EE) != 0)) {
    380   1.1     matt 			/* leave external interrupts disabled */
    381   1.1     matt 			return (ocpl | IPL_EE);
    382   1.1     matt 		}
    383   1.1     matt 	}
    384   1.1     matt         extintr_restore(omsr);
    385   1.1     matt         return (ocpl);
    386   1.1     matt }
    387   1.1     matt 
    388   1.9    perry static inline void
    389   1.1     matt splx(int xcpl)
    390   1.1     matt {
    391   1.1     matt 	imask_t *ncplp;
    392   1.1     matt 	register_t omsr;
    393   1.1     matt 	int ncpl = xcpl & IPL_PRIMASK;
    394   1.1     matt 
    395   1.1     matt 	ncplp = &imask[ncpl];
    396   1.1     matt 
    397   1.1     matt 	omsr = extintr_disable();
    398   1.1     matt 	if (ncpl < cpl) {
    399   1.1     matt 		cpl = ncpl;
    400   1.1     matt 		SPL_STATS_LOG(ncpl, 0);
    401   1.1     matt 		if (imask_test_v(&ipending, ncplp))
    402   1.1     matt 			intr_dispatch();
    403   1.1     matt 	}
    404   1.1     matt 	if (xcpl & IPL_EE)
    405   1.1     matt 		omsr |= PSL_EE;
    406   1.1     matt 	extintr_restore(omsr);
    407   1.1     matt }
    408   1.1     matt 
    409   1.9    perry static inline int
    410   1.1     matt spllower(int ncpl)
    411   1.1     matt {
    412   1.1     matt 	int ocpl;
    413   1.1     matt 	imask_t *ncplp;
    414   1.1     matt 	register_t omsr;
    415   1.1     matt 
    416   1.1     matt 	ncpl &= IPL_PRIMASK;
    417   1.1     matt 	ncplp = &imask[ncpl];
    418   1.1     matt 
    419   1.1     matt 	omsr = extintr_disable();
    420   1.1     matt 	ocpl = cpl;
    421   1.1     matt 	cpl = ncpl;
    422   1.1     matt 	SPL_STATS_LOG(ncpl, 0);
    423   1.1     matt #ifdef EXT_INTR_STATS
    424   1.1     matt         ext_intr_statp = 0;
    425   1.1     matt #endif
    426   1.1     matt 	if (imask_test_v(&ipending, ncplp))
    427   1.1     matt 		intr_dispatch();
    428   1.1     matt 
    429   1.1     matt 	if (ncpl < IPL_HIGH)
    430   1.1     matt 		omsr |= PSL_EE;
    431   1.1     matt 	extintr_restore(omsr);
    432   1.1     matt 
    433   1.1     matt 	return (ocpl);
    434   1.1     matt }
    435   1.1     matt #endif	/* SPL_INLINE */
    436   1.1     matt 
    437   1.1     matt 
    438   1.1     matt /*
    439   1.1     matt  * Soft interrupt IRQs
    440   1.1     matt  * see also intrnames[] in locore.S
    441   1.1     matt  */
    442   1.1     matt #define SIR_BASE	(NIRQ-32)
    443  1.12  garbled #define SIXBIT(ipl)	((ipl) - SIR_BASE) /* XXX rennovate later */
    444   1.1     matt #define SIR_SOFTCLOCK	(NIRQ-5)
    445  1.12  garbled #define SIR_CLOCK	SIXBIT(SIR_SOFTCLOCK) /* XXX rennovate later */
    446   1.4     matt #define SIR_SOFTNET	(NIRQ-4)
    447   1.4     matt #define SIR_SOFTI2C	(NIRQ-3)
    448   1.4     matt #define SIR_SOFTSERIAL	(NIRQ-2)
    449   1.1     matt #define SIR_HWCLOCK	(NIRQ-1)
    450  1.12  garbled #define SPL_CLOCK	SIXBIT(SIR_HWCLOCK) /* XXX rennovate later */
    451   1.4     matt #define SIR_RES		~(SIBIT(SIR_SOFTCLOCK)|\
    452   1.4     matt 			  SIBIT(SIR_SOFTNET)|\
    453   1.4     matt 			  SIBIT(SIR_SOFTI2C)|\
    454   1.4     matt 			  SIBIT(SIR_SOFTSERIAL)|\
    455   1.4     matt 			  SIBIT(SIR_HWCLOCK))
    456   1.1     matt 
    457   1.1     matt /*
    458   1.1     matt  * Software interrupt spl's
    459   1.1     matt  *
    460   1.1     matt  * NOTE: splsoftclock() is used by hardclock() to lower the priority from
    461   1.1     matt  * clock to softclock before it calls softclock().
    462   1.1     matt  */
    463   1.1     matt #define	spllowersoftclock()	spllower(IPL_SOFTCLOCK)
    464   1.1     matt 
    465   1.7     matt struct intrhand;
    466   1.7     matt extern struct intrhand *softnet_handlers[];
    467   1.7     matt #define	schednetisr(an_isr)	softintr_schedule(softnet_handlers[(an_isr)])
    468   1.7     matt 
    469   1.4     matt void *softintr_establish(int level, void (*fun)(void *), void *arg);
    470   1.4     matt void softintr_disestablish(void *cookie);
    471   1.4     matt void softintr_schedule(void *cookie);
    472   1.4     matt 
    473   1.4     matt 
    474   1.1     matt /*
    475   1.1     matt  * Miscellaneous
    476   1.1     matt  */
    477   1.1     matt #define	spl0()		spllower(IPL_NONE)
    478   1.1     matt 
    479  1.11     yamt typedef int ipl_t;
    480  1.11     yamt typedef struct {
    481  1.11     yamt 	ipl_t _ipl;
    482  1.11     yamt } ipl_cookie_t;
    483  1.11     yamt 
    484  1.11     yamt static inline ipl_cookie_t
    485  1.11     yamt makeiplcookie(ipl_t ipl)
    486  1.11     yamt {
    487  1.11     yamt 
    488  1.11     yamt 	return (ipl_cookie_t){._ipl = ipl};
    489  1.11     yamt }
    490  1.11     yamt 
    491  1.11     yamt static inline int
    492  1.11     yamt splraiseipl(ipl_cookie_t icookie)
    493  1.11     yamt {
    494  1.11     yamt 
    495  1.11     yamt 	return splraise(icookie._ipl);
    496  1.11     yamt }
    497   1.8     yamt 
    498   1.8     yamt #include <sys/spl.h>
    499   1.8     yamt 
    500   1.1     matt #define SIBIT(ipl)	(1 << ((ipl) - SIR_BASE))
    501   1.4     matt #if 0
    502   1.1     matt #define	setsoftclock()	softintr(SIBIT(SIR_SOFTCLOCK))
    503   1.4     matt #define	setsoftnet()	softintr(SIBIT(SIR_SOFTNET))
    504   1.4     matt #define	setsoftserial()	softintr(SIBIT(SIR_SOFTSERIAL))
    505   1.4     matt #define	setsofti2c()	softintr(SIBIT(SIR_SOFTI2C))
    506   1.4     matt #endif
    507   1.1     matt 
    508   1.4     matt extern void *softnet_si;
    509   1.1     matt void	*intr_establish(int, int, int, int (*)(void *), void *);
    510   1.1     matt void	intr_disestablish(void *);
    511   1.1     matt void	init_interrupt(void);
    512   1.1     matt const char * intr_typename(int);
    513   1.1     matt const char * intr_string(int);
    514   1.1     matt const struct evcnt * intr_evcnt(int);
    515   1.1     matt void	ext_intr(struct intrframe *);
    516   1.1     matt 
    517  1.12  garbled /* the following are needed to compile until this port is properly
    518  1.12  garbled  * converted to ppcoea-rennovation.
    519  1.12  garbled  */
    520  1.12  garbled void genppc_cpu_configure(void);
    521  1.12  garbled 
    522   1.4     matt #if 0
    523   1.1     matt void	softserial(void);
    524   1.4     matt #endif
    525   1.1     matt void	strayintr(int);
    526   1.2     matt 
    527   1.1     matt /*
    528   1.1     matt  * defines for indexing intrcnt
    529   1.1     matt  */
    530   1.1     matt #define CNT_IRQ0	0
    531   1.1     matt #define CNT_CLOCK	SIR_HWCLOCK
    532   1.1     matt #define CNT_SOFTCLOCK	SIR_SOFTCLOCK
    533   1.1     matt #define CNT_SOFTNET	SIR_NET
    534   1.1     matt #define CNT_SOFTSERIAL	SIR_SOFTSERIAL
    535   1.1     matt #define CNT_SOFTI2C	SIR_I2C
    536   1.1     matt 
    537   1.1     matt #endif /* !_LOCORE */
    538   1.1     matt 
    539   1.1     matt #endif /* _MVPPPC_INTR_H_ */
    540