Home | History | Annotate | Line # | Download | only in include
cpufunc.h revision 1.77
      1 /*	cpufunc.h,v 1.40.22.4 2007/11/08 10:59:33 matt Exp	*/
      2 
      3 /*
      4  * Copyright (c) 1997 Mark Brinicombe.
      5  * Copyright (c) 1997 Causality Limited
      6  * All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *	This product includes software developed by Causality Limited.
     19  * 4. The name of Causality Limited may not be used to endorse or promote
     20  *    products derived from this software without specific prior written
     21  *    permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
     24  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     25  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     26  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
     27  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     28  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     29  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     33  * SUCH DAMAGE.
     34  *
     35  * RiscBSD kernel project
     36  *
     37  * cpufunc.h
     38  *
     39  * Prototypes for cpu, mmu and tlb related functions.
     40  */
     41 
     42 #ifndef _ARM_CPUFUNC_H_
     43 #define _ARM_CPUFUNC_H_
     44 
     45 #ifdef _KERNEL
     46 
     47 #include <sys/types.h>
     48 #include <arm/armreg.h>
     49 #include <arm/cpuconf.h>
     50 #include <arm/armreg.h>
     51 #include <arm/cpufunc_proto.h>
     52 
     53 struct cpu_functions {
     54 
     55 	/* CPU functions */
     56 
     57 	u_int	(*cf_id)		(void);
     58 	void	(*cf_cpwait)		(void);
     59 
     60 	/* MMU functions */
     61 
     62 	u_int	(*cf_control)		(u_int, u_int);
     63 	void	(*cf_domains)		(u_int);
     64 #if defined(ARM_MMU_EXTENDED)
     65 	void	(*cf_setttb)		(u_int, tlb_asid_t);
     66 #else
     67 	void	(*cf_setttb)		(u_int, bool);
     68 #endif
     69 	u_int	(*cf_faultstatus)	(void);
     70 	u_int	(*cf_faultaddress)	(void);
     71 
     72 	/* TLB functions */
     73 
     74 	void	(*cf_tlb_flushID)	(void);
     75 	void	(*cf_tlb_flushID_SE)	(vaddr_t);
     76 	void	(*cf_tlb_flushI)	(void);
     77 	void	(*cf_tlb_flushI_SE)	(vaddr_t);
     78 	void	(*cf_tlb_flushD)	(void);
     79 	void	(*cf_tlb_flushD_SE)	(vaddr_t);
     80 
     81 	/*
     82 	 * Cache operations:
     83 	 *
     84 	 * We define the following primitives:
     85 	 *
     86 	 *	icache_sync_all		Synchronize I-cache
     87 	 *	icache_sync_range	Synchronize I-cache range
     88 	 *
     89 	 *	dcache_wbinv_all	Write-back and Invalidate D-cache
     90 	 *	dcache_wbinv_range	Write-back and Invalidate D-cache range
     91 	 *	dcache_inv_range	Invalidate D-cache range
     92 	 *	dcache_wb_range		Write-back D-cache range
     93 	 *
     94 	 *	idcache_wbinv_all	Write-back and Invalidate D-cache,
     95 	 *				Invalidate I-cache
     96 	 *	idcache_wbinv_range	Write-back and Invalidate D-cache,
     97 	 *				Invalidate I-cache range
     98 	 *
     99 	 * Note that the ARM term for "write-back" is "clean".  We use
    100 	 * the term "write-back" since it's a more common way to describe
    101 	 * the operation.
    102 	 *
    103 	 * There are some rules that must be followed:
    104 	 *
    105 	 *	I-cache Synch (all or range):
    106 	 *		The goal is to synchronize the instruction stream,
    107 	 *		so you may beed to write-back dirty D-cache blocks
    108 	 *		first.  If a range is requested, and you can't
    109 	 *		synchronize just a range, you have to hit the whole
    110 	 *		thing.
    111 	 *
    112 	 *	D-cache Write-Back and Invalidate range:
    113 	 *		If you can't WB-Inv a range, you must WB-Inv the
    114 	 *		entire D-cache.
    115 	 *
    116 	 *	D-cache Invalidate:
    117 	 *		If you can't Inv the D-cache, you must Write-Back
    118 	 *		and Invalidate.  Code that uses this operation
    119 	 *		MUST NOT assume that the D-cache will not be written
    120 	 *		back to memory.
    121 	 *
    122 	 *	D-cache Write-Back:
    123 	 *		If you can't Write-back without doing an Inv,
    124 	 *		that's fine.  Then treat this as a WB-Inv.
    125 	 *		Skipping the invalidate is merely an optimization.
    126 	 *
    127 	 *	All operations:
    128 	 *		Valid virtual addresses must be passed to each
    129 	 *		cache operation.
    130 	 */
    131 	void	(*cf_icache_sync_all)	(void);
    132 	void	(*cf_icache_sync_range)	(vaddr_t, vsize_t);
    133 
    134 	void	(*cf_dcache_wbinv_all)	(void);
    135 	void	(*cf_dcache_wbinv_range)(vaddr_t, vsize_t);
    136 	void	(*cf_dcache_inv_range)	(vaddr_t, vsize_t);
    137 	void	(*cf_dcache_wb_range)	(vaddr_t, vsize_t);
    138 
    139 	void	(*cf_sdcache_wbinv_range)(vaddr_t, paddr_t, psize_t);
    140 	void	(*cf_sdcache_inv_range)	(vaddr_t, paddr_t, psize_t);
    141 	void	(*cf_sdcache_wb_range)	(vaddr_t, paddr_t, psize_t);
    142 
    143 	void	(*cf_idcache_wbinv_all)	(void);
    144 	void	(*cf_idcache_wbinv_range)(vaddr_t, vsize_t);
    145 
    146 	/* Other functions */
    147 
    148 	void	(*cf_flush_prefetchbuf)	(void);
    149 	void	(*cf_drain_writebuf)	(void);
    150 	void	(*cf_flush_brnchtgt_C)	(void);
    151 	void	(*cf_flush_brnchtgt_E)	(u_int);
    152 
    153 	void	(*cf_sleep)		(int mode);
    154 
    155 	/* Soft functions */
    156 
    157 	int	(*cf_dataabt_fixup)	(void *);
    158 	int	(*cf_prefetchabt_fixup)	(void *);
    159 
    160 #if defined(ARM_MMU_EXTENDED)
    161 	void	(*cf_context_switch)	(u_int, tlb_asid_t);
    162 #else
    163 	void	(*cf_context_switch)	(u_int);
    164 #endif
    165 
    166 	void	(*cf_setup)		(char *);
    167 };
    168 
    169 extern struct cpu_functions cpufuncs;
    170 extern u_int cputype;
    171 
    172 #define cpu_idnum()		cpufuncs.cf_id()
    173 
    174 #define cpu_control(c, e)	cpufuncs.cf_control(c, e)
    175 #define cpu_domains(d)		cpufuncs.cf_domains(d)
    176 #define cpu_setttb(t, f)	cpufuncs.cf_setttb(t, f)
    177 #define cpu_faultstatus()	cpufuncs.cf_faultstatus()
    178 #define cpu_faultaddress()	cpufuncs.cf_faultaddress()
    179 
    180 #define	cpu_tlb_flushID()	cpufuncs.cf_tlb_flushID()
    181 #define	cpu_tlb_flushID_SE(e)	cpufuncs.cf_tlb_flushID_SE(e)
    182 #define	cpu_tlb_flushI()	cpufuncs.cf_tlb_flushI()
    183 #define	cpu_tlb_flushI_SE(e)	cpufuncs.cf_tlb_flushI_SE(e)
    184 #define	cpu_tlb_flushD()	cpufuncs.cf_tlb_flushD()
    185 #define	cpu_tlb_flushD_SE(e)	cpufuncs.cf_tlb_flushD_SE(e)
    186 
    187 #define	cpu_icache_sync_all()	cpufuncs.cf_icache_sync_all()
    188 #define	cpu_icache_sync_range(a, s) cpufuncs.cf_icache_sync_range((a), (s))
    189 
    190 #define	cpu_dcache_wbinv_all()	cpufuncs.cf_dcache_wbinv_all()
    191 #define	cpu_dcache_wbinv_range(a, s) cpufuncs.cf_dcache_wbinv_range((a), (s))
    192 #define	cpu_dcache_inv_range(a, s) cpufuncs.cf_dcache_inv_range((a), (s))
    193 #define	cpu_dcache_wb_range(a, s) cpufuncs.cf_dcache_wb_range((a), (s))
    194 
    195 #define	cpu_sdcache_wbinv_range(a, b, s) cpufuncs.cf_sdcache_wbinv_range((a), (b), (s))
    196 #define	cpu_sdcache_inv_range(a, b, s) cpufuncs.cf_sdcache_inv_range((a), (b), (s))
    197 #define	cpu_sdcache_wb_range(a, b, s) cpufuncs.cf_sdcache_wb_range((a), (b), (s))
    198 
    199 #define	cpu_idcache_wbinv_all()	cpufuncs.cf_idcache_wbinv_all()
    200 #define	cpu_idcache_wbinv_range(a, s) cpufuncs.cf_idcache_wbinv_range((a), (s))
    201 
    202 #define	cpu_flush_prefetchbuf()	cpufuncs.cf_flush_prefetchbuf()
    203 #define	cpu_drain_writebuf()	cpufuncs.cf_drain_writebuf()
    204 #define	cpu_flush_brnchtgt_C()	cpufuncs.cf_flush_brnchtgt_C()
    205 #define	cpu_flush_brnchtgt_E(e)	cpufuncs.cf_flush_brnchtgt_E(e)
    206 
    207 #define cpu_sleep(m)		cpufuncs.cf_sleep(m)
    208 
    209 #define cpu_dataabt_fixup(a)		cpufuncs.cf_dataabt_fixup(a)
    210 #define cpu_prefetchabt_fixup(a)	cpufuncs.cf_prefetchabt_fixup(a)
    211 #define ABORT_FIXUP_OK		0	/* fixup succeeded */
    212 #define ABORT_FIXUP_FAILED	1	/* fixup failed */
    213 #define ABORT_FIXUP_RETURN	2	/* abort handler should return */
    214 
    215 #define cpu_context_switch(a)		cpufuncs.cf_context_switch(a)
    216 #define cpu_setup(a)			cpufuncs.cf_setup(a)
    217 
    218 int	set_cpufuncs		(void);
    219 int	set_cpufuncs_id		(u_int);
    220 #define ARCHITECTURE_NOT_PRESENT	1	/* known but not configured */
    221 #define ARCHITECTURE_NOT_SUPPORTED	2	/* not known */
    222 
    223 void	cpufunc_nullop		(void);
    224 int	cpufunc_null_fixup	(void *);
    225 int	early_abort_fixup	(void *);
    226 int	late_abort_fixup	(void *);
    227 u_int	cpufunc_id		(void);
    228 u_int	cpufunc_control		(u_int, u_int);
    229 void	cpufunc_domains		(u_int);
    230 u_int	cpufunc_faultstatus	(void);
    231 u_int	cpufunc_faultaddress	(void);
    232 
    233 #define setttb		cpu_setttb
    234 #define drain_writebuf	cpu_drain_writebuf
    235 
    236 
    237 #if defined(CPU_XSCALE)
    238 #define	cpu_cpwait()		cpufuncs.cf_cpwait()
    239 #endif
    240 
    241 #ifndef cpu_cpwait
    242 #define	cpu_cpwait()
    243 #endif
    244 
    245 /*
    246  * Macros for manipulating CPU interrupts
    247  */
    248 #ifdef __PROG32
    249 static __inline uint32_t __set_cpsr_c(uint32_t bic, uint32_t eor) __attribute__((__unused__));
    250 static __inline uint32_t disable_interrupts(uint32_t mask) __attribute__((__unused__));
    251 static __inline uint32_t enable_interrupts(uint32_t mask) __attribute__((__unused__));
    252 
    253 static __inline uint32_t
    254 __set_cpsr_c(uint32_t bic, uint32_t eor)
    255 {
    256 	uint32_t	tmp, ret;
    257 
    258 	__asm volatile(
    259 		"mrs     %0, cpsr\n"	/* Get the CPSR */
    260 		"bic	 %1, %0, %2\n"	/* Clear bits */
    261 		"eor	 %1, %1, %3\n"	/* XOR bits */
    262 		"msr     cpsr_c, %1\n"	/* Set the control field of CPSR */
    263 	: "=&r" (ret), "=&r" (tmp)
    264 	: "r" (bic), "r" (eor) : "memory");
    265 
    266 	return ret;
    267 }
    268 
    269 static __inline uint32_t
    270 disable_interrupts(uint32_t mask)
    271 {
    272 	uint32_t	tmp, ret;
    273 	mask &= (I32_bit | F32_bit);
    274 
    275 	__asm volatile(
    276 		"mrs     %0, cpsr\n"	/* Get the CPSR */
    277 		"orr	 %1, %0, %2\n"	/* set bits */
    278 		"msr     cpsr_c, %1\n"	/* Set the control field of CPSR */
    279 	: "=&r" (ret), "=&r" (tmp)
    280 	: "r" (mask)
    281 	: "memory");
    282 
    283 	return ret;
    284 }
    285 
    286 static __inline uint32_t
    287 enable_interrupts(uint32_t mask)
    288 {
    289 	uint32_t	ret;
    290 	mask &= (I32_bit | F32_bit);
    291 
    292 	/* Get the CPSR */
    293 	__asm __volatile("mrs\t%0, cpsr\n" : "=r"(ret));
    294 #ifdef _ARM_ARCH_6
    295 	if (__builtin_constant_p(mask)) {
    296 		switch (mask) {
    297 		case I32_bit | F32_bit:
    298 			__asm __volatile("cpsie\tif");
    299 			break;
    300 		case I32_bit:
    301 			__asm __volatile("cpsie\ti");
    302 			break;
    303 		case F32_bit:
    304 			__asm __volatile("cpsie\tf");
    305 			break;
    306 		default:
    307 			break;
    308 		}
    309 		return ret;
    310 	}
    311 #endif /* _ARM_ARCH_6 */
    312 
    313 	/* Set the control field of CPSR */
    314 	__asm volatile("msr\tcpsr_c, %0" :: "r"(ret & ~mask));
    315 
    316 	return ret;
    317 }
    318 
    319 #define restore_interrupts(old_cpsr)					\
    320 	(__set_cpsr_c((I32_bit | F32_bit), (old_cpsr) & (I32_bit | F32_bit)))
    321 
    322 static inline void cpsie(register_t psw) __attribute__((__unused__));
    323 static inline register_t cpsid(register_t psw) __attribute__((__unused__));
    324 
    325 static inline void
    326 cpsie(register_t psw)
    327 {
    328 #ifdef _ARM_ARCH_6
    329 	if (!__builtin_constant_p(psw)) {
    330 		enable_interrupts(psw);
    331 		return;
    332 	}
    333 	switch (psw & (I32_bit|F32_bit)) {
    334 	case I32_bit:		__asm("cpsie\ti"); break;
    335 	case F32_bit:		__asm("cpsie\tf"); break;
    336 	case I32_bit|F32_bit:	__asm("cpsie\tif"); break;
    337 	}
    338 #else
    339 	enable_interrupts(psw);
    340 #endif
    341 }
    342 
    343 static inline register_t
    344 cpsid(register_t psw)
    345 {
    346 #ifdef _ARM_ARCH_6
    347 	register_t oldpsw;
    348 	if (!__builtin_constant_p(psw))
    349 		return disable_interrupts(psw);
    350 
    351 	__asm("mrs	%0, cpsr" : "=r"(oldpsw));
    352 	switch (psw & (I32_bit|F32_bit)) {
    353 	case I32_bit:		__asm("cpsid\ti"); break;
    354 	case F32_bit:		__asm("cpsid\tf"); break;
    355 	case I32_bit|F32_bit:	__asm("cpsid\tif"); break;
    356 	}
    357 	return oldpsw;
    358 #else
    359 	return disable_interrupts(psw);
    360 #endif
    361 }
    362 
    363 #else /* ! __PROG32 */
    364 #define	disable_interrupts(mask)					\
    365 	(set_r15((mask) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE),		\
    366 		 (mask) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE)))
    367 
    368 #define	enable_interrupts(mask)						\
    369 	(set_r15((mask) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE), 0))
    370 
    371 #define	restore_interrupts(old_r15)					\
    372 	(set_r15((R15_IRQ_DISABLE | R15_FIQ_DISABLE),			\
    373 		 (old_r15) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE)))
    374 #endif /* __PROG32 */
    375 
    376 #ifdef __PROG32
    377 /* Functions to manipulate the CPSR. */
    378 u_int	SetCPSR(u_int, u_int);
    379 u_int	GetCPSR(void);
    380 #else
    381 /* Functions to manipulate the processor control bits in r15. */
    382 u_int	set_r15(u_int, u_int);
    383 u_int	get_r15(void);
    384 #endif /* __PROG32 */
    385 
    386 
    387 /*
    388  * CPU functions from locore.S
    389  */
    390 
    391 void cpu_reset		(void) __dead;
    392 
    393 /*
    394  * Cache info variables.
    395  */
    396 #define	CACHE_TYPE_VIVT		0
    397 #define	CACHE_TYPE_xxPT		1
    398 #define	CACHE_TYPE_VIPT		1
    399 #define	CACHE_TYPE_PIxx		2
    400 #define	CACHE_TYPE_PIPT		3
    401 
    402 /* PRIMARY CACHE VARIABLES */
    403 struct arm_cache_info {
    404 	u_int icache_size;
    405 	u_int icache_line_size;
    406 	u_int icache_ways;
    407 	u_int icache_way_size;
    408 	u_int icache_sets;
    409 
    410 	u_int dcache_size;
    411 	u_int dcache_line_size;
    412 	u_int dcache_ways;
    413 	u_int dcache_way_size;
    414 	u_int dcache_sets;
    415 
    416 	uint8_t cache_type;
    417 	bool cache_unified;
    418 	uint8_t icache_type;
    419 	uint8_t dcache_type;
    420 };
    421 
    422 #if (ARM_MMU_V6 + ARM_MMU_V7) != 0
    423 extern u_int arm_cache_prefer_mask;
    424 #endif
    425 extern u_int arm_dcache_align;
    426 extern u_int arm_dcache_align_mask;
    427 
    428 extern struct arm_cache_info arm_pcache;
    429 extern struct arm_cache_info arm_scache;
    430 #endif	/* _KERNEL */
    431 
    432 #if defined(_KERNEL) || defined(_KMEMUSER)
    433 /*
    434  * Miscellany
    435  */
    436 
    437 int get_pc_str_offset	(void);
    438 
    439 /*
    440  * Functions to manipulate cpu r13
    441  * (in arm/arm32/setstack.S)
    442  */
    443 
    444 void set_stackptr	(u_int, u_int);
    445 u_int get_stackptr	(u_int);
    446 
    447 #endif /* _KERNEL || _KMEMUSER */
    448 
    449 #endif	/* _ARM_CPUFUNC_H_ */
    450 
    451 /* End of cpufunc.h */
    452