Home | History | Annotate | Line # | Download | only in include
cpufunc.h revision 1.29
      1  1.29  rearnsha /*	$NetBSD: cpufunc.h,v 1.29 2003/09/06 09:08:35 rearnsha Exp $	*/
      2   1.1   reinoud 
      3   1.1   reinoud /*
      4   1.1   reinoud  * Copyright (c) 1997 Mark Brinicombe.
      5   1.1   reinoud  * Copyright (c) 1997 Causality Limited
      6   1.1   reinoud  * All rights reserved.
      7   1.1   reinoud  *
      8   1.1   reinoud  * Redistribution and use in source and binary forms, with or without
      9   1.1   reinoud  * modification, are permitted provided that the following conditions
     10   1.1   reinoud  * are met:
     11   1.1   reinoud  * 1. Redistributions of source code must retain the above copyright
     12   1.1   reinoud  *    notice, this list of conditions and the following disclaimer.
     13   1.1   reinoud  * 2. Redistributions in binary form must reproduce the above copyright
     14   1.1   reinoud  *    notice, this list of conditions and the following disclaimer in the
     15   1.1   reinoud  *    documentation and/or other materials provided with the distribution.
     16   1.1   reinoud  * 3. All advertising materials mentioning features or use of this software
     17   1.1   reinoud  *    must display the following acknowledgement:
     18   1.1   reinoud  *	This product includes software developed by Causality Limited.
     19   1.1   reinoud  * 4. The name of Causality Limited may not be used to endorse or promote
     20   1.1   reinoud  *    products derived from this software without specific prior written
     21   1.1   reinoud  *    permission.
     22   1.1   reinoud  *
     23   1.1   reinoud  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
     24   1.1   reinoud  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     25   1.1   reinoud  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     26   1.1   reinoud  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
     27   1.1   reinoud  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     28   1.1   reinoud  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     29   1.1   reinoud  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     30   1.1   reinoud  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     31   1.1   reinoud  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     32   1.1   reinoud  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     33   1.1   reinoud  * SUCH DAMAGE.
     34   1.1   reinoud  *
     35   1.1   reinoud  * RiscBSD kernel project
     36   1.1   reinoud  *
     37   1.1   reinoud  * cpufunc.h
     38   1.1   reinoud  *
     39   1.1   reinoud  * Prototypes for cpu, mmu and tlb related functions.
     40   1.1   reinoud  */
     41   1.1   reinoud 
     42   1.1   reinoud #ifndef _ARM32_CPUFUNC_H_
     43   1.1   reinoud #define _ARM32_CPUFUNC_H_
     44   1.1   reinoud 
     45  1.21   thorpej #ifdef _KERNEL
     46  1.21   thorpej 
     47   1.1   reinoud #include <sys/types.h>
     48  1.21   thorpej #include <arm/cpuconf.h>
     49   1.1   reinoud 
     50   1.1   reinoud struct cpu_functions {
     51   1.1   reinoud 
     52   1.1   reinoud 	/* CPU functions */
     53   1.1   reinoud 
     54   1.1   reinoud 	u_int	(*cf_id)		__P((void));
     55  1.12   thorpej 	void	(*cf_cpwait)		__P((void));
     56   1.1   reinoud 
     57   1.1   reinoud 	/* MMU functions */
     58   1.1   reinoud 
     59   1.1   reinoud 	u_int	(*cf_control)		__P((u_int bic, u_int eor));
     60   1.1   reinoud 	void	(*cf_domains)		__P((u_int domains));
     61   1.1   reinoud 	void	(*cf_setttb)		__P((u_int ttb));
     62   1.1   reinoud 	u_int	(*cf_faultstatus)	__P((void));
     63   1.1   reinoud 	u_int	(*cf_faultaddress)	__P((void));
     64   1.1   reinoud 
     65   1.1   reinoud 	/* TLB functions */
     66   1.1   reinoud 
     67   1.1   reinoud 	void	(*cf_tlb_flushID)	__P((void));
     68   1.1   reinoud 	void	(*cf_tlb_flushID_SE)	__P((u_int va));
     69   1.1   reinoud 	void	(*cf_tlb_flushI)	__P((void));
     70   1.1   reinoud 	void	(*cf_tlb_flushI_SE)	__P((u_int va));
     71   1.1   reinoud 	void	(*cf_tlb_flushD)	__P((void));
     72   1.1   reinoud 	void	(*cf_tlb_flushD_SE)	__P((u_int va));
     73   1.1   reinoud 
     74  1.17   thorpej 	/*
     75  1.17   thorpej 	 * Cache operations:
     76  1.17   thorpej 	 *
     77  1.17   thorpej 	 * We define the following primitives:
     78  1.17   thorpej 	 *
     79  1.17   thorpej 	 *	icache_sync_all		Synchronize I-cache
     80  1.17   thorpej 	 *	icache_sync_range	Synchronize I-cache range
     81  1.17   thorpej 	 *
     82  1.17   thorpej 	 *	dcache_wbinv_all	Write-back and Invalidate D-cache
     83  1.17   thorpej 	 *	dcache_wbinv_range	Write-back and Invalidate D-cache range
     84  1.17   thorpej 	 *	dcache_inv_range	Invalidate D-cache range
     85  1.17   thorpej 	 *	dcache_wb_range		Write-back D-cache range
     86  1.17   thorpej 	 *
     87  1.17   thorpej 	 *	idcache_wbinv_all	Write-back and Invalidate D-cache,
     88  1.17   thorpej 	 *				Invalidate I-cache
     89  1.17   thorpej 	 *	idcache_wbinv_range	Write-back and Invalidate D-cache,
     90  1.17   thorpej 	 *				Invalidate I-cache range
     91  1.17   thorpej 	 *
     92  1.17   thorpej 	 * Note that the ARM term for "write-back" is "clean".  We use
     93  1.17   thorpej 	 * the term "write-back" since it's a more common way to describe
     94  1.17   thorpej 	 * the operation.
     95  1.17   thorpej 	 *
     96  1.17   thorpej 	 * There are some rules that must be followed:
     97  1.17   thorpej 	 *
     98  1.17   thorpej 	 *	I-cache Synch (all or range):
     99  1.17   thorpej 	 *		The goal is to synchronize the instruction stream,
    100  1.17   thorpej 	 *		so you may beed to write-back dirty D-cache blocks
    101  1.17   thorpej 	 *		first.  If a range is requested, and you can't
    102  1.17   thorpej 	 *		synchronize just a range, you have to hit the whole
    103  1.17   thorpej 	 *		thing.
    104  1.17   thorpej 	 *
    105  1.17   thorpej 	 *	D-cache Write-Back and Invalidate range:
    106  1.17   thorpej 	 *		If you can't WB-Inv a range, you must WB-Inv the
    107  1.17   thorpej 	 *		entire D-cache.
    108  1.17   thorpej 	 *
    109  1.17   thorpej 	 *	D-cache Invalidate:
    110  1.17   thorpej 	 *		If you can't Inv the D-cache, you must Write-Back
    111  1.17   thorpej 	 *		and Invalidate.  Code that uses this operation
    112  1.17   thorpej 	 *		MUST NOT assume that the D-cache will not be written
    113  1.17   thorpej 	 *		back to memory.
    114  1.17   thorpej 	 *
    115  1.17   thorpej 	 *	D-cache Write-Back:
    116  1.17   thorpej 	 *		If you can't Write-back without doing an Inv,
    117  1.17   thorpej 	 *		that's fine.  Then treat this as a WB-Inv.
    118  1.17   thorpej 	 *		Skipping the invalidate is merely an optimization.
    119  1.17   thorpej 	 *
    120  1.17   thorpej 	 *	All operations:
    121  1.17   thorpej 	 *		Valid virtual addresses must be passed to each
    122  1.17   thorpej 	 *		cache operation.
    123  1.17   thorpej 	 */
    124  1.17   thorpej 	void	(*cf_icache_sync_all)	__P((void));
    125  1.17   thorpej 	void	(*cf_icache_sync_range)	__P((vaddr_t, vsize_t));
    126  1.17   thorpej 
    127  1.17   thorpej 	void	(*cf_dcache_wbinv_all)	__P((void));
    128  1.17   thorpej 	void	(*cf_dcache_wbinv_range) __P((vaddr_t, vsize_t));
    129  1.17   thorpej 	void	(*cf_dcache_inv_range)	__P((vaddr_t, vsize_t));
    130  1.17   thorpej 	void	(*cf_dcache_wb_range)	__P((vaddr_t, vsize_t));
    131   1.1   reinoud 
    132  1.17   thorpej 	void	(*cf_idcache_wbinv_all)	__P((void));
    133  1.17   thorpej 	void	(*cf_idcache_wbinv_range) __P((vaddr_t, vsize_t));
    134   1.1   reinoud 
    135   1.1   reinoud 	/* Other functions */
    136   1.1   reinoud 
    137   1.1   reinoud 	void	(*cf_flush_prefetchbuf)	__P((void));
    138   1.1   reinoud 	void	(*cf_drain_writebuf)	__P((void));
    139   1.1   reinoud 	void	(*cf_flush_brnchtgt_C)	__P((void));
    140   1.1   reinoud 	void	(*cf_flush_brnchtgt_E)	__P((u_int va));
    141   1.1   reinoud 
    142   1.1   reinoud 	void	(*cf_sleep)		__P((int mode));
    143   1.1   reinoud 
    144   1.1   reinoud 	/* Soft functions */
    145   1.1   reinoud 
    146   1.1   reinoud 	int	(*cf_dataabt_fixup)	__P((void *arg));
    147   1.1   reinoud 	int	(*cf_prefetchabt_fixup)	__P((void *arg));
    148   1.1   reinoud 
    149   1.1   reinoud 	void	(*cf_context_switch)	__P((void));
    150   1.1   reinoud 
    151   1.1   reinoud 	void	(*cf_setup)		__P((char *string));
    152   1.1   reinoud };
    153   1.1   reinoud 
    154   1.1   reinoud extern struct cpu_functions cpufuncs;
    155   1.1   reinoud extern u_int cputype;
    156   1.1   reinoud 
    157   1.1   reinoud #define cpu_id()		cpufuncs.cf_id()
    158  1.12   thorpej #define	cpu_cpwait()		cpufuncs.cf_cpwait()
    159   1.1   reinoud 
    160   1.1   reinoud #define cpu_control(c, e)	cpufuncs.cf_control(c, e)
    161   1.1   reinoud #define cpu_domains(d)		cpufuncs.cf_domains(d)
    162   1.1   reinoud #define cpu_setttb(t)		cpufuncs.cf_setttb(t)
    163   1.1   reinoud #define cpu_faultstatus()	cpufuncs.cf_faultstatus()
    164   1.1   reinoud #define cpu_faultaddress()	cpufuncs.cf_faultaddress()
    165   1.1   reinoud 
    166   1.1   reinoud #define	cpu_tlb_flushID()	cpufuncs.cf_tlb_flushID()
    167   1.1   reinoud #define	cpu_tlb_flushID_SE(e)	cpufuncs.cf_tlb_flushID_SE(e)
    168   1.1   reinoud #define	cpu_tlb_flushI()	cpufuncs.cf_tlb_flushI()
    169   1.1   reinoud #define	cpu_tlb_flushI_SE(e)	cpufuncs.cf_tlb_flushI_SE(e)
    170   1.1   reinoud #define	cpu_tlb_flushD()	cpufuncs.cf_tlb_flushD()
    171   1.1   reinoud #define	cpu_tlb_flushD_SE(e)	cpufuncs.cf_tlb_flushD_SE(e)
    172   1.1   reinoud 
    173  1.17   thorpej #define	cpu_icache_sync_all()	cpufuncs.cf_icache_sync_all()
    174  1.17   thorpej #define	cpu_icache_sync_range(a, s) cpufuncs.cf_icache_sync_range((a), (s))
    175  1.17   thorpej 
    176  1.17   thorpej #define	cpu_dcache_wbinv_all()	cpufuncs.cf_dcache_wbinv_all()
    177  1.17   thorpej #define	cpu_dcache_wbinv_range(a, s) cpufuncs.cf_dcache_wbinv_range((a), (s))
    178  1.17   thorpej #define	cpu_dcache_inv_range(a, s) cpufuncs.cf_dcache_inv_range((a), (s))
    179  1.17   thorpej #define	cpu_dcache_wb_range(a, s) cpufuncs.cf_dcache_wb_range((a), (s))
    180  1.17   thorpej 
    181  1.17   thorpej #define	cpu_idcache_wbinv_all()	cpufuncs.cf_idcache_wbinv_all()
    182  1.17   thorpej #define	cpu_idcache_wbinv_range(a, s) cpufuncs.cf_idcache_wbinv_range((a), (s))
    183   1.1   reinoud 
    184   1.1   reinoud #define	cpu_flush_prefetchbuf()	cpufuncs.cf_flush_prefetchbuf()
    185   1.1   reinoud #define	cpu_drain_writebuf()	cpufuncs.cf_drain_writebuf()
    186   1.1   reinoud #define	cpu_flush_brnchtgt_C()	cpufuncs.cf_flush_brnchtgt_C()
    187   1.1   reinoud #define	cpu_flush_brnchtgt_E(e)	cpufuncs.cf_flush_brnchtgt_E(e)
    188   1.1   reinoud 
    189   1.1   reinoud #define cpu_sleep(m)		cpufuncs.cf_sleep(m)
    190   1.1   reinoud 
    191   1.1   reinoud #define cpu_dataabt_fixup(a)		cpufuncs.cf_dataabt_fixup(a)
    192   1.1   reinoud #define cpu_prefetchabt_fixup(a)	cpufuncs.cf_prefetchabt_fixup(a)
    193   1.7       wiz #define ABORT_FIXUP_OK		0	/* fixup succeeded */
    194   1.1   reinoud #define ABORT_FIXUP_FAILED	1	/* fixup failed */
    195   1.1   reinoud #define ABORT_FIXUP_RETURN	2	/* abort handler should return */
    196   1.1   reinoud 
    197   1.1   reinoud #define cpu_setup(a)			cpufuncs.cf_setup(a)
    198   1.1   reinoud 
    199   1.1   reinoud int	set_cpufuncs		__P((void));
    200   1.1   reinoud #define ARCHITECTURE_NOT_PRESENT	1	/* known but not configured */
    201   1.1   reinoud #define ARCHITECTURE_NOT_SUPPORTED	2	/* not known */
    202   1.1   reinoud 
    203   1.1   reinoud void	cpufunc_nullop		__P((void));
    204   1.2     bjh21 int	cpufunc_null_fixup	__P((void *));
    205   1.4     bjh21 int	early_abort_fixup	__P((void *));
    206   1.4     bjh21 int	late_abort_fixup	__P((void *));
    207   1.1   reinoud u_int	cpufunc_id		__P((void));
    208   1.1   reinoud u_int	cpufunc_control		__P((u_int clear, u_int bic));
    209   1.1   reinoud void	cpufunc_domains		__P((u_int domains));
    210   1.1   reinoud u_int	cpufunc_faultstatus	__P((void));
    211   1.1   reinoud u_int	cpufunc_faultaddress	__P((void));
    212   1.3     bjh21 
    213   1.3     bjh21 #ifdef CPU_ARM3
    214   1.3     bjh21 u_int	arm3_control		__P((u_int clear, u_int bic));
    215   1.3     bjh21 void	arm3_cache_flush	__P((void));
    216   1.3     bjh21 #endif	/* CPU_ARM3 */
    217   1.1   reinoud 
    218   1.1   reinoud #if defined(CPU_ARM6) || defined(CPU_ARM7)
    219   1.1   reinoud void	arm67_setttb		__P((u_int ttb));
    220   1.1   reinoud void	arm67_tlb_flush		__P((void));
    221   1.1   reinoud void	arm67_tlb_purge		__P((u_int va));
    222   1.1   reinoud void	arm67_cache_flush	__P((void));
    223   1.1   reinoud void	arm67_context_switch	__P((void));
    224   1.1   reinoud #endif	/* CPU_ARM6 || CPU_ARM7 */
    225   1.1   reinoud 
    226   1.1   reinoud #ifdef CPU_ARM6
    227   1.1   reinoud void	arm6_setup		__P((char *string));
    228   1.1   reinoud #endif	/* CPU_ARM6 */
    229   1.1   reinoud 
    230   1.1   reinoud #ifdef CPU_ARM7
    231   1.1   reinoud void	arm7_setup		__P((char *string));
    232   1.1   reinoud #endif	/* CPU_ARM7 */
    233   1.5     chris 
    234   1.5     chris #ifdef CPU_ARM7TDMI
    235   1.5     chris int	arm7_dataabt_fixup	__P((void *arg));
    236   1.5     chris void	arm7tdmi_setup		__P((char *string));
    237   1.5     chris void	arm7tdmi_setttb		__P((u_int ttb));
    238   1.5     chris void	arm7tdmi_tlb_flushID	__P((void));
    239   1.5     chris void	arm7tdmi_tlb_flushID_SE	__P((u_int va));
    240   1.5     chris void	arm7tdmi_cache_flushID	__P((void));
    241   1.5     chris void	arm7tdmi_context_switch	__P((void));
    242   1.5     chris #endif /* CPU_ARM7TDMI */
    243   1.1   reinoud 
    244   1.1   reinoud #ifdef CPU_ARM8
    245   1.1   reinoud void	arm8_setttb		__P((u_int ttb));
    246   1.1   reinoud void	arm8_tlb_flushID	__P((void));
    247   1.1   reinoud void	arm8_tlb_flushID_SE	__P((u_int va));
    248   1.1   reinoud void	arm8_cache_flushID	__P((void));
    249   1.1   reinoud void	arm8_cache_flushID_E	__P((u_int entry));
    250   1.1   reinoud void	arm8_cache_cleanID	__P((void));
    251   1.1   reinoud void	arm8_cache_cleanID_E	__P((u_int entry));
    252   1.1   reinoud void	arm8_cache_purgeID	__P((void));
    253   1.1   reinoud void	arm8_cache_purgeID_E	__P((u_int entry));
    254   1.1   reinoud 
    255   1.1   reinoud void	arm8_cache_syncI	__P((void));
    256  1.17   thorpej void	arm8_cache_cleanID_rng	__P((vaddr_t start, vsize_t end));
    257  1.17   thorpej void	arm8_cache_cleanD_rng	__P((vaddr_t start, vsize_t end));
    258  1.17   thorpej void	arm8_cache_purgeID_rng	__P((vaddr_t start, vsize_t end));
    259  1.17   thorpej void	arm8_cache_purgeD_rng	__P((vaddr_t start, vsize_t end));
    260  1.17   thorpej void	arm8_cache_syncI_rng	__P((vaddr_t start, vsize_t end));
    261   1.1   reinoud 
    262   1.1   reinoud void	arm8_context_switch	__P((void));
    263   1.1   reinoud 
    264   1.1   reinoud void	arm8_setup		__P((char *string));
    265   1.1   reinoud 
    266   1.1   reinoud u_int	arm8_clock_config	__P((u_int, u_int));
    267   1.1   reinoud #endif
    268   1.1   reinoud 
    269  1.23       rjs #ifdef CPU_SA110
    270  1.23       rjs void	sa110_setup		__P((char *string));
    271  1.23       rjs void	sa110_context_switch	__P((void));
    272  1.23       rjs #endif	/* CPU_SA110 */
    273  1.23       rjs 
    274  1.23       rjs #if defined(CPU_SA1100) || defined(CPU_SA1110)
    275  1.23       rjs void	sa11x0_drain_readbuf	__P((void));
    276  1.23       rjs 
    277  1.23       rjs void	sa11x0_context_switch	__P((void));
    278  1.23       rjs void	sa11x0_cpu_sleep	__P((int mode));
    279  1.23       rjs 
    280  1.23       rjs void	sa11x0_setup		__P((char *string));
    281  1.23       rjs #endif
    282  1.23       rjs 
    283  1.23       rjs #if defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110)
    284  1.23       rjs void	sa1_setttb		__P((u_int ttb));
    285  1.23       rjs 
    286  1.23       rjs void	sa1_tlb_flushID_SE	__P((u_int va));
    287  1.23       rjs 
    288  1.23       rjs void	sa1_cache_flushID	__P((void));
    289  1.23       rjs void	sa1_cache_flushI	__P((void));
    290  1.23       rjs void	sa1_cache_flushD	__P((void));
    291  1.23       rjs void	sa1_cache_flushD_SE	__P((u_int entry));
    292  1.23       rjs 
    293  1.23       rjs void	sa1_cache_cleanID	__P((void));
    294  1.23       rjs void	sa1_cache_cleanD	__P((void));
    295  1.23       rjs void	sa1_cache_cleanD_E	__P((u_int entry));
    296  1.23       rjs 
    297  1.23       rjs void	sa1_cache_purgeID	__P((void));
    298  1.23       rjs void	sa1_cache_purgeID_E	__P((u_int entry));
    299  1.23       rjs void	sa1_cache_purgeD	__P((void));
    300  1.23       rjs void	sa1_cache_purgeD_E	__P((u_int entry));
    301  1.23       rjs 
    302  1.23       rjs void	sa1_cache_syncI		__P((void));
    303  1.23       rjs void	sa1_cache_cleanID_rng	__P((vaddr_t start, vsize_t end));
    304  1.23       rjs void	sa1_cache_cleanD_rng	__P((vaddr_t start, vsize_t end));
    305  1.23       rjs void	sa1_cache_purgeID_rng	__P((vaddr_t start, vsize_t end));
    306  1.23       rjs void	sa1_cache_purgeD_rng	__P((vaddr_t start, vsize_t end));
    307  1.23       rjs void	sa1_cache_syncI_rng	__P((vaddr_t start, vsize_t end));
    308  1.23       rjs 
    309  1.23       rjs #endif
    310  1.23       rjs 
    311  1.10  rearnsha #ifdef CPU_ARM9
    312  1.10  rearnsha void	arm9_setttb		__P((u_int));
    313  1.10  rearnsha 
    314  1.10  rearnsha void	arm9_tlb_flushID_SE	__P((u_int va));
    315  1.10  rearnsha 
    316  1.10  rearnsha void	arm9_cache_flushID	__P((void));
    317  1.10  rearnsha void	arm9_cache_flushID_SE	__P((u_int));
    318  1.10  rearnsha void	arm9_cache_flushI	__P((void));
    319  1.10  rearnsha void	arm9_cache_flushI_SE	__P((u_int));
    320  1.10  rearnsha void	arm9_cache_flushD	__P((void));
    321  1.10  rearnsha void	arm9_cache_flushD_SE	__P((u_int));
    322  1.10  rearnsha 
    323  1.10  rearnsha void	arm9_cache_cleanID	__P((void));
    324  1.10  rearnsha 
    325  1.10  rearnsha void	arm9_cache_syncI	__P((void));
    326  1.17   thorpej void	arm9_cache_flushID_rng	__P((vaddr_t, vsize_t));
    327  1.17   thorpej void	arm9_cache_flushD_rng	__P((vaddr_t, vsize_t));
    328  1.17   thorpej void	arm9_cache_syncI_rng	__P((vaddr_t, vsize_t));
    329  1.10  rearnsha 
    330  1.10  rearnsha void	arm9_context_switch	__P((void));
    331  1.10  rearnsha 
    332  1.10  rearnsha void	arm9_setup		__P((char *string));
    333  1.10  rearnsha #endif
    334  1.10  rearnsha 
    335  1.29  rearnsha #ifdef CPU_ARM10
    336  1.29  rearnsha void	arm10_setttb		__P((u_int));
    337  1.29  rearnsha 
    338  1.29  rearnsha void	arm10_tlb_flushID_SE	__P((u_int));
    339  1.29  rearnsha void	arm10_tlb_flushI_SE	__P((u_int));
    340  1.29  rearnsha 
    341  1.29  rearnsha void	arm10_icache_sync_all	__P((void));
    342  1.29  rearnsha void	arm10_icache_sync_range	__P((vaddr_t, vsize_t));
    343  1.29  rearnsha 
    344  1.29  rearnsha void	arm10_dcache_wbinv_all	__P((void));
    345  1.29  rearnsha void	arm10_dcache_wbinv_range __P((vaddr_t, vsize_t));
    346  1.29  rearnsha void	arm10_dcache_inv_range	__P((vaddr_t, vsize_t));
    347  1.29  rearnsha void	arm10_dcache_wb_range	__P((vaddr_t, vsize_t));
    348  1.29  rearnsha 
    349  1.29  rearnsha void	arm10_idcache_wbinv_all	__P((void));
    350  1.29  rearnsha void	arm10_idcache_wbinv_range __P((vaddr_t, vsize_t));
    351  1.29  rearnsha 
    352  1.29  rearnsha void	arm10_context_switch	__P((void));
    353  1.29  rearnsha 
    354  1.29  rearnsha void	arm10_setup		__P((char *string));
    355  1.29  rearnsha 
    356  1.29  rearnsha extern unsigned arm10_dcache_sets_max;
    357  1.29  rearnsha extern unsigned arm10_dcache_sets_inc;
    358  1.29  rearnsha extern unsigned arm10_dcache_index_max;
    359  1.29  rearnsha extern unsigned arm10_dcache_index_inc;
    360  1.29  rearnsha #endif
    361  1.29  rearnsha 
    362  1.29  rearnsha #if defined(CPU_ARM9) || defined(CPU_ARM10) || defined(CPU_SA110) || \
    363  1.29  rearnsha     defined(CPU_SA1100) || defined(CPU_SA1110) || \
    364  1.29  rearnsha     defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
    365  1.29  rearnsha     defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
    366  1.23       rjs 
    367  1.10  rearnsha void	armv4_tlb_flushID	__P((void));
    368  1.10  rearnsha void	armv4_tlb_flushI	__P((void));
    369  1.10  rearnsha void	armv4_tlb_flushD	__P((void));
    370  1.10  rearnsha void	armv4_tlb_flushD_SE	__P((u_int va));
    371  1.10  rearnsha 
    372  1.10  rearnsha void	armv4_drain_writebuf	__P((void));
    373  1.24    ichiro #endif
    374  1.24    ichiro 
    375  1.24    ichiro #if defined(CPU_IXP12X0)
    376  1.24    ichiro void	ixp12x0_drain_readbuf	__P((void));
    377  1.24    ichiro void	ixp12x0_context_switch	__P((void));
    378  1.24    ichiro void	ixp12x0_setup		__P((char *string));
    379  1.10  rearnsha #endif
    380   1.1   reinoud 
    381  1.22   thorpej #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
    382  1.27    ichiro     defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
    383  1.12   thorpej void	xscale_cpwait		__P((void));
    384  1.16    briggs 
    385  1.16    briggs void	xscale_cpu_sleep	__P((int mode));
    386  1.12   thorpej 
    387  1.11   thorpej u_int	xscale_control		__P((u_int clear, u_int bic));
    388  1.11   thorpej 
    389   1.8      matt void	xscale_setttb		__P((u_int ttb));
    390  1.10  rearnsha 
    391   1.8      matt void	xscale_tlb_flushID_SE	__P((u_int va));
    392   1.8      matt 
    393   1.8      matt void	xscale_cache_flushID	__P((void));
    394   1.8      matt void	xscale_cache_flushI	__P((void));
    395   1.8      matt void	xscale_cache_flushD	__P((void));
    396   1.8      matt void	xscale_cache_flushD_SE	__P((u_int entry));
    397   1.8      matt 
    398   1.8      matt void	xscale_cache_cleanID	__P((void));
    399   1.8      matt void	xscale_cache_cleanD	__P((void));
    400   1.8      matt void	xscale_cache_cleanD_E	__P((u_int entry));
    401  1.20   thorpej 
    402  1.20   thorpej void	xscale_cache_clean_minidata __P((void));
    403   1.8      matt 
    404   1.8      matt void	xscale_cache_purgeID	__P((void));
    405   1.8      matt void	xscale_cache_purgeID_E	__P((u_int entry));
    406   1.8      matt void	xscale_cache_purgeD	__P((void));
    407   1.8      matt void	xscale_cache_purgeD_E	__P((u_int entry));
    408   1.8      matt 
    409   1.8      matt void	xscale_cache_syncI	__P((void));
    410  1.17   thorpej void	xscale_cache_cleanID_rng __P((vaddr_t start, vsize_t end));
    411  1.17   thorpej void	xscale_cache_cleanD_rng	__P((vaddr_t start, vsize_t end));
    412  1.17   thorpej void	xscale_cache_purgeID_rng __P((vaddr_t start, vsize_t end));
    413  1.17   thorpej void	xscale_cache_purgeD_rng	__P((vaddr_t start, vsize_t end));
    414  1.17   thorpej void	xscale_cache_syncI_rng	__P((vaddr_t start, vsize_t end));
    415  1.17   thorpej void	xscale_cache_flushD_rng	__P((vaddr_t start, vsize_t end));
    416   1.8      matt 
    417   1.8      matt void	xscale_context_switch	__P((void));
    418   1.8      matt 
    419   1.8      matt void	xscale_setup		__P((char *string));
    420  1.27    ichiro #endif	/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */
    421   1.8      matt 
    422   1.1   reinoud #define tlb_flush	cpu_tlb_flushID
    423   1.1   reinoud #define setttb		cpu_setttb
    424   1.1   reinoud #define drain_writebuf	cpu_drain_writebuf
    425   1.1   reinoud 
    426   1.1   reinoud /*
    427   1.1   reinoud  * Macros for manipulating CPU interrupts
    428   1.1   reinoud  */
    429  1.15   thorpej #ifdef __PROG32
    430  1.25    briggs static __inline u_int32_t __set_cpsr_c(u_int bic, u_int eor) __attribute__((__unused__));
    431  1.25    briggs 
    432  1.25    briggs static __inline u_int32_t
    433  1.25    briggs __set_cpsr_c(u_int bic, u_int eor)
    434  1.25    briggs {
    435  1.25    briggs 	u_int32_t	tmp, ret;
    436  1.25    briggs 
    437  1.25    briggs 	__asm __volatile(
    438  1.25    briggs 		"mrs     %0, cpsr\n"	/* Get the CPSR */
    439  1.25    briggs 		"bic	 %1, %0, %2\n"	/* Clear bits */
    440  1.25    briggs 		"eor	 %1, %1, %3\n"	/* XOR bits */
    441  1.25    briggs 		"msr     cpsr_c, %1\n"	/* Set the control field of CPSR */
    442  1.25    briggs 	: "=&r" (ret), "=&r" (tmp)
    443  1.25    briggs 	: "r" (bic), "r" (eor));
    444  1.25    briggs 
    445  1.25    briggs 	return ret;
    446  1.25    briggs }
    447  1.25    briggs 
    448  1.15   thorpej #define disable_interrupts(mask)					\
    449  1.25    briggs 	(__set_cpsr_c((mask) & (I32_bit | F32_bit), \
    450  1.25    briggs 		      (mask) & (I32_bit | F32_bit)))
    451   1.1   reinoud 
    452  1.15   thorpej #define enable_interrupts(mask)						\
    453  1.25    briggs 	(__set_cpsr_c((mask) & (I32_bit | F32_bit), 0))
    454   1.1   reinoud 
    455  1.15   thorpej #define restore_interrupts(old_cpsr)					\
    456  1.25    briggs 	(__set_cpsr_c((I32_bit | F32_bit), (old_cpsr) & (I32_bit | F32_bit)))
    457  1.15   thorpej #else /* ! __PROG32 */
    458  1.15   thorpej #define	disable_interrupts(mask)					\
    459  1.15   thorpej 	(set_r15((mask) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE),		\
    460  1.15   thorpej 		 (mask) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE)))
    461  1.15   thorpej 
    462  1.15   thorpej #define	enable_interrupts(mask)						\
    463  1.15   thorpej 	(set_r15((mask) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE), 0))
    464  1.15   thorpej 
    465  1.15   thorpej #define	restore_interrupts(old_r15)					\
    466  1.15   thorpej 	(set_r15((R15_IRQ_DISABLE | R15_FIQ_DISABLE),			\
    467  1.15   thorpej 		 (old_r15) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE)))
    468  1.15   thorpej #endif /* __PROG32 */
    469  1.15   thorpej 
    470  1.15   thorpej #ifdef __PROG32
    471  1.15   thorpej /* Functions to manipulate the CPSR. */
    472  1.15   thorpej u_int	SetCPSR(u_int bic, u_int eor);
    473  1.15   thorpej u_int	GetCPSR(void);
    474  1.15   thorpej #else
    475  1.15   thorpej /* Functions to manipulate the processor control bits in r15. */
    476  1.15   thorpej u_int	set_r15(u_int bic, u_int eor);
    477  1.15   thorpej u_int	get_r15(void);
    478  1.15   thorpej #endif /* __PROG32 */
    479   1.1   reinoud 
    480   1.1   reinoud /*
    481   1.1   reinoud  * Functions to manipulate cpu r13
    482   1.8      matt  * (in arm/arm32/setstack.S)
    483   1.1   reinoud  */
    484   1.1   reinoud 
    485   1.1   reinoud void set_stackptr	__P((u_int mode, u_int address));
    486   1.1   reinoud u_int get_stackptr	__P((u_int mode));
    487   1.6     bjh21 
    488   1.6     bjh21 /*
    489   1.6     bjh21  * Miscellany
    490   1.6     bjh21  */
    491   1.6     bjh21 
    492   1.9     bjh21 int get_pc_str_offset	__P((void));
    493   1.1   reinoud 
    494   1.1   reinoud /*
    495   1.1   reinoud  * CPU functions from locore.S
    496   1.1   reinoud  */
    497   1.1   reinoud 
    498   1.1   reinoud void cpu_reset		__P((void)) __attribute__((__noreturn__));
    499  1.14   thorpej 
    500  1.14   thorpej /*
    501  1.14   thorpej  * Cache info variables.
    502  1.14   thorpej  */
    503  1.14   thorpej 
    504  1.14   thorpej /* PRIMARY CACHE VARIABLES */
    505  1.28  rearnsha extern int	arm_picache_size;
    506  1.28  rearnsha extern int	arm_picache_line_size;
    507  1.28  rearnsha extern int	arm_picache_ways;
    508  1.28  rearnsha 
    509  1.28  rearnsha extern int	arm_pdcache_size;	/* and unified */
    510  1.28  rearnsha extern int	arm_pdcache_line_size;
    511  1.28  rearnsha extern int	arm_pdcache_ways;
    512  1.14   thorpej 
    513  1.28  rearnsha extern int	arm_pcache_type;
    514  1.28  rearnsha extern int	arm_pcache_unified;
    515  1.14   thorpej 
    516  1.28  rearnsha extern int	arm_dcache_align;
    517  1.28  rearnsha extern int	arm_dcache_align_mask;
    518   1.1   reinoud 
    519   1.1   reinoud #endif	/* _KERNEL */
    520   1.1   reinoud #endif	/* _ARM32_CPUFUNC_H_ */
    521   1.1   reinoud 
    522   1.1   reinoud /* End of cpufunc.h */
    523