Home | History | Annotate | Line # | Download | only in include
cpufunc.h revision 1.80
      1 /*	cpufunc.h,v 1.40.22.4 2007/11/08 10:59:33 matt Exp	*/
      2 
      3 /*
      4  * Copyright (c) 1997 Mark Brinicombe.
      5  * Copyright (c) 1997 Causality Limited
      6  * All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *	This product includes software developed by Causality Limited.
     19  * 4. The name of Causality Limited may not be used to endorse or promote
     20  *    products derived from this software without specific prior written
     21  *    permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
     24  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     25  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     26  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
     27  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     28  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     29  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     33  * SUCH DAMAGE.
     34  *
     35  * RiscBSD kernel project
     36  *
     37  * cpufunc.h
     38  *
     39  * Prototypes for cpu, mmu and tlb related functions.
     40  */
     41 
     42 #ifndef _ARM_CPUFUNC_H_
     43 #define _ARM_CPUFUNC_H_
     44 
     45 #ifdef __arm__
     46 
     47 #ifdef _KERNEL
     48 
     49 #if !defined(_RUMPKERNEL)
     50 
     51 #include <sys/types.h>
     52 
     53 #include <arm/armreg.h>
     54 #include <arm/cpuconf.h>
     55 #include <arm/cpufunc_proto.h>
     56 
     57 struct cpu_functions {
     58 
     59 	/* CPU functions */
     60 
     61 	u_int	(*cf_id)		(void);
     62 	void	(*cf_cpwait)		(void);
     63 
     64 	/* MMU functions */
     65 
     66 	u_int	(*cf_control)		(u_int, u_int);
     67 	void	(*cf_domains)		(u_int);
     68 #if defined(ARM_MMU_EXTENDED)
     69 	void	(*cf_setttb)		(u_int, tlb_asid_t);
     70 #else
     71 	void	(*cf_setttb)		(u_int, bool);
     72 #endif
     73 	u_int	(*cf_faultstatus)	(void);
     74 	u_int	(*cf_faultaddress)	(void);
     75 
     76 	/* TLB functions */
     77 
     78 	void	(*cf_tlb_flushID)	(void);
     79 	void	(*cf_tlb_flushID_SE)	(vaddr_t);
     80 	void	(*cf_tlb_flushI)	(void);
     81 	void	(*cf_tlb_flushI_SE)	(vaddr_t);
     82 	void	(*cf_tlb_flushD)	(void);
     83 	void	(*cf_tlb_flushD_SE)	(vaddr_t);
     84 
     85 	/*
     86 	 * Cache operations:
     87 	 *
     88 	 * We define the following primitives:
     89 	 *
     90 	 *	icache_sync_all		Synchronize I-cache
     91 	 *	icache_sync_range	Synchronize I-cache range
     92 	 *
     93 	 *	dcache_wbinv_all	Write-back and Invalidate D-cache
     94 	 *	dcache_wbinv_range	Write-back and Invalidate D-cache range
     95 	 *	dcache_inv_range	Invalidate D-cache range
     96 	 *	dcache_wb_range		Write-back D-cache range
     97 	 *
     98 	 *	idcache_wbinv_all	Write-back and Invalidate D-cache,
     99 	 *				Invalidate I-cache
    100 	 *	idcache_wbinv_range	Write-back and Invalidate D-cache,
    101 	 *				Invalidate I-cache range
    102 	 *
    103 	 * Note that the ARM term for "write-back" is "clean".  We use
    104 	 * the term "write-back" since it's a more common way to describe
    105 	 * the operation.
    106 	 *
    107 	 * There are some rules that must be followed:
    108 	 *
    109 	 *	I-cache Synch (all or range):
    110 	 *		The goal is to synchronize the instruction stream,
    111 	 *		so you may beed to write-back dirty D-cache blocks
    112 	 *		first.  If a range is requested, and you can't
    113 	 *		synchronize just a range, you have to hit the whole
    114 	 *		thing.
    115 	 *
    116 	 *	D-cache Write-Back and Invalidate range:
    117 	 *		If you can't WB-Inv a range, you must WB-Inv the
    118 	 *		entire D-cache.
    119 	 *
    120 	 *	D-cache Invalidate:
    121 	 *		If you can't Inv the D-cache, you must Write-Back
    122 	 *		and Invalidate.  Code that uses this operation
    123 	 *		MUST NOT assume that the D-cache will not be written
    124 	 *		back to memory.
    125 	 *
    126 	 *	D-cache Write-Back:
    127 	 *		If you can't Write-back without doing an Inv,
    128 	 *		that's fine.  Then treat this as a WB-Inv.
    129 	 *		Skipping the invalidate is merely an optimization.
    130 	 *
    131 	 *	All operations:
    132 	 *		Valid virtual addresses must be passed to each
    133 	 *		cache operation.
    134 	 */
    135 	void	(*cf_icache_sync_all)	(void);
    136 	void	(*cf_icache_sync_range)	(vaddr_t, vsize_t);
    137 
    138 	void	(*cf_dcache_wbinv_all)	(void);
    139 	void	(*cf_dcache_wbinv_range)(vaddr_t, vsize_t);
    140 	void	(*cf_dcache_inv_range)	(vaddr_t, vsize_t);
    141 	void	(*cf_dcache_wb_range)	(vaddr_t, vsize_t);
    142 
    143 	void	(*cf_sdcache_wbinv_range)(vaddr_t, paddr_t, psize_t);
    144 	void	(*cf_sdcache_inv_range)	(vaddr_t, paddr_t, psize_t);
    145 	void	(*cf_sdcache_wb_range)	(vaddr_t, paddr_t, psize_t);
    146 
    147 	void	(*cf_idcache_wbinv_all)	(void);
    148 	void	(*cf_idcache_wbinv_range)(vaddr_t, vsize_t);
    149 
    150 	/* Other functions */
    151 
    152 	void	(*cf_flush_prefetchbuf)	(void);
    153 	void	(*cf_drain_writebuf)	(void);
    154 	void	(*cf_flush_brnchtgt_C)	(void);
    155 	void	(*cf_flush_brnchtgt_E)	(u_int);
    156 
    157 	void	(*cf_sleep)		(int mode);
    158 
    159 	/* Soft functions */
    160 
    161 	int	(*cf_dataabt_fixup)	(void *);
    162 	int	(*cf_prefetchabt_fixup)	(void *);
    163 
    164 #if defined(ARM_MMU_EXTENDED)
    165 	void	(*cf_context_switch)	(u_int, tlb_asid_t);
    166 #else
    167 	void	(*cf_context_switch)	(u_int);
    168 #endif
    169 
    170 	void	(*cf_setup)		(char *);
    171 };
    172 
    173 extern struct cpu_functions cpufuncs;
    174 extern u_int cputype;
    175 
    176 #define cpu_idnum()		cpufuncs.cf_id()
    177 
    178 #define cpu_control(c, e)	cpufuncs.cf_control(c, e)
    179 #define cpu_domains(d)		cpufuncs.cf_domains(d)
    180 #define cpu_setttb(t, f)	cpufuncs.cf_setttb(t, f)
    181 #define cpu_faultstatus()	cpufuncs.cf_faultstatus()
    182 #define cpu_faultaddress()	cpufuncs.cf_faultaddress()
    183 
    184 #define	cpu_tlb_flushID()	cpufuncs.cf_tlb_flushID()
    185 #define	cpu_tlb_flushID_SE(e)	cpufuncs.cf_tlb_flushID_SE(e)
    186 #define	cpu_tlb_flushI()	cpufuncs.cf_tlb_flushI()
    187 #define	cpu_tlb_flushI_SE(e)	cpufuncs.cf_tlb_flushI_SE(e)
    188 #define	cpu_tlb_flushD()	cpufuncs.cf_tlb_flushD()
    189 #define	cpu_tlb_flushD_SE(e)	cpufuncs.cf_tlb_flushD_SE(e)
    190 
    191 #define	cpu_icache_sync_all()	cpufuncs.cf_icache_sync_all()
    192 #define	cpu_icache_sync_range(a, s) cpufuncs.cf_icache_sync_range((a), (s))
    193 
    194 #define	cpu_dcache_wbinv_all()	cpufuncs.cf_dcache_wbinv_all()
    195 #define	cpu_dcache_wbinv_range(a, s) cpufuncs.cf_dcache_wbinv_range((a), (s))
    196 #define	cpu_dcache_inv_range(a, s) cpufuncs.cf_dcache_inv_range((a), (s))
    197 #define	cpu_dcache_wb_range(a, s) cpufuncs.cf_dcache_wb_range((a), (s))
    198 
    199 #define	cpu_sdcache_wbinv_range(a, b, s) cpufuncs.cf_sdcache_wbinv_range((a), (b), (s))
    200 #define	cpu_sdcache_inv_range(a, b, s) cpufuncs.cf_sdcache_inv_range((a), (b), (s))
    201 #define	cpu_sdcache_wb_range(a, b, s) cpufuncs.cf_sdcache_wb_range((a), (b), (s))
    202 
    203 #define	cpu_idcache_wbinv_all()	cpufuncs.cf_idcache_wbinv_all()
    204 #define	cpu_idcache_wbinv_range(a, s) cpufuncs.cf_idcache_wbinv_range((a), (s))
    205 
    206 #define	cpu_flush_prefetchbuf()	cpufuncs.cf_flush_prefetchbuf()
    207 #define	cpu_drain_writebuf()	cpufuncs.cf_drain_writebuf()
    208 #define	cpu_flush_brnchtgt_C()	cpufuncs.cf_flush_brnchtgt_C()
    209 #define	cpu_flush_brnchtgt_E(e)	cpufuncs.cf_flush_brnchtgt_E(e)
    210 
    211 #define cpu_sleep(m)		cpufuncs.cf_sleep(m)
    212 
    213 #define cpu_dataabt_fixup(a)		cpufuncs.cf_dataabt_fixup(a)
    214 #define cpu_prefetchabt_fixup(a)	cpufuncs.cf_prefetchabt_fixup(a)
    215 #define ABORT_FIXUP_OK		0	/* fixup succeeded */
    216 #define ABORT_FIXUP_FAILED	1	/* fixup failed */
    217 #define ABORT_FIXUP_RETURN	2	/* abort handler should return */
    218 
    219 #define cpu_context_switch(a)		cpufuncs.cf_context_switch(a)
    220 #define cpu_setup(a)			cpufuncs.cf_setup(a)
    221 
    222 int	set_cpufuncs		(void);
    223 int	set_cpufuncs_id		(u_int);
    224 #define ARCHITECTURE_NOT_PRESENT	1	/* known but not configured */
    225 #define ARCHITECTURE_NOT_SUPPORTED	2	/* not known */
    226 
    227 void	cpufunc_nullop		(void);
    228 int	cpufunc_null_fixup	(void *);
    229 int	early_abort_fixup	(void *);
    230 int	late_abort_fixup	(void *);
    231 u_int	cpufunc_id		(void);
    232 u_int	cpufunc_control		(u_int, u_int);
    233 void	cpufunc_domains		(u_int);
    234 u_int	cpufunc_faultstatus	(void);
    235 u_int	cpufunc_faultaddress	(void);
    236 
    237 #define setttb		cpu_setttb
    238 #define drain_writebuf	cpu_drain_writebuf
    239 
    240 
    241 #if defined(CPU_XSCALE)
    242 #define	cpu_cpwait()		cpufuncs.cf_cpwait()
    243 #endif
    244 
    245 #ifndef cpu_cpwait
    246 #define	cpu_cpwait()
    247 #endif
    248 
    249 /*
    250  * Macros for manipulating CPU interrupts
    251  */
    252 static __inline uint32_t __set_cpsr_c(uint32_t bic, uint32_t eor) __attribute__((__unused__));
    253 static __inline uint32_t disable_interrupts(uint32_t mask) __attribute__((__unused__));
    254 static __inline uint32_t enable_interrupts(uint32_t mask) __attribute__((__unused__));
    255 
    256 static __inline uint32_t
    257 __set_cpsr_c(uint32_t bic, uint32_t eor)
    258 {
    259 	uint32_t	tmp, ret;
    260 
    261 	__asm volatile(
    262 		"mrs     %0, cpsr\n"	/* Get the CPSR */
    263 		"bic	 %1, %0, %2\n"	/* Clear bits */
    264 		"eor	 %1, %1, %3\n"	/* XOR bits */
    265 		"msr     cpsr_c, %1\n"	/* Set the control field of CPSR */
    266 	: "=&r" (ret), "=&r" (tmp)
    267 	: "r" (bic), "r" (eor) : "memory");
    268 
    269 	return ret;
    270 }
    271 
    272 static __inline uint32_t
    273 disable_interrupts(uint32_t mask)
    274 {
    275 	uint32_t	tmp, ret;
    276 	mask &= (I32_bit | F32_bit);
    277 
    278 	__asm volatile(
    279 		"mrs     %0, cpsr\n"	/* Get the CPSR */
    280 		"orr	 %1, %0, %2\n"	/* set bits */
    281 		"msr     cpsr_c, %1\n"	/* Set the control field of CPSR */
    282 	: "=&r" (ret), "=&r" (tmp)
    283 	: "r" (mask)
    284 	: "memory");
    285 
    286 	return ret;
    287 }
    288 
    289 static __inline uint32_t
    290 enable_interrupts(uint32_t mask)
    291 {
    292 	uint32_t	ret;
    293 	mask &= (I32_bit | F32_bit);
    294 
    295 	/* Get the CPSR */
    296 	__asm __volatile("mrs\t%0, cpsr\n" : "=r"(ret));
    297 #ifdef _ARM_ARCH_6
    298 	if (__builtin_constant_p(mask)) {
    299 		switch (mask) {
    300 		case I32_bit | F32_bit:
    301 			__asm __volatile("cpsie\tif");
    302 			break;
    303 		case I32_bit:
    304 			__asm __volatile("cpsie\ti");
    305 			break;
    306 		case F32_bit:
    307 			__asm __volatile("cpsie\tf");
    308 			break;
    309 		default:
    310 			break;
    311 		}
    312 		return ret;
    313 	}
    314 #endif /* _ARM_ARCH_6 */
    315 
    316 	/* Set the control field of CPSR */
    317 	__asm volatile("msr\tcpsr_c, %0" :: "r"(ret & ~mask));
    318 
    319 	return ret;
    320 }
    321 
    322 #define restore_interrupts(old_cpsr)					\
    323 	(__set_cpsr_c((I32_bit | F32_bit), (old_cpsr) & (I32_bit | F32_bit)))
    324 
    325 static inline void cpsie(register_t psw) __attribute__((__unused__));
    326 static inline register_t cpsid(register_t psw) __attribute__((__unused__));
    327 
    328 static inline void
    329 cpsie(register_t psw)
    330 {
    331 #ifdef _ARM_ARCH_6
    332 	if (!__builtin_constant_p(psw)) {
    333 		enable_interrupts(psw);
    334 		return;
    335 	}
    336 	switch (psw & (I32_bit|F32_bit)) {
    337 	case I32_bit:		__asm("cpsie\ti"); break;
    338 	case F32_bit:		__asm("cpsie\tf"); break;
    339 	case I32_bit|F32_bit:	__asm("cpsie\tif"); break;
    340 	}
    341 #else
    342 	enable_interrupts(psw);
    343 #endif
    344 }
    345 
    346 static inline register_t
    347 cpsid(register_t psw)
    348 {
    349 #ifdef _ARM_ARCH_6
    350 	register_t oldpsw;
    351 	if (!__builtin_constant_p(psw))
    352 		return disable_interrupts(psw);
    353 
    354 	__asm("mrs	%0, cpsr" : "=r"(oldpsw));
    355 	switch (psw & (I32_bit|F32_bit)) {
    356 	case I32_bit:		__asm("cpsid\ti"); break;
    357 	case F32_bit:		__asm("cpsid\tf"); break;
    358 	case I32_bit|F32_bit:	__asm("cpsid\tif"); break;
    359 	}
    360 	return oldpsw;
    361 #else
    362 	return disable_interrupts(psw);
    363 #endif
    364 }
    365 
    366 
    367 /* Functions to manipulate the CPSR. */
    368 u_int	SetCPSR(u_int, u_int);
    369 u_int	GetCPSR(void);
    370 
    371 
    372 /*
    373  * CPU functions from locore.S
    374  */
    375 
    376 void cpu_reset		(void) __dead;
    377 
    378 /*
    379  * Cache info variables.
    380  */
    381 #define	CACHE_TYPE_VIVT		0
    382 #define	CACHE_TYPE_xxPT		1
    383 #define	CACHE_TYPE_VIPT		1
    384 #define	CACHE_TYPE_PIxx		2
    385 #define	CACHE_TYPE_PIPT		3
    386 
    387 /* PRIMARY CACHE VARIABLES */
    388 struct arm_cache_info {
    389 	u_int icache_size;
    390 	u_int icache_line_size;
    391 	u_int icache_ways;
    392 	u_int icache_way_size;
    393 	u_int icache_sets;
    394 
    395 	u_int dcache_size;
    396 	u_int dcache_line_size;
    397 	u_int dcache_ways;
    398 	u_int dcache_way_size;
    399 	u_int dcache_sets;
    400 
    401 	uint8_t cache_type;
    402 	bool cache_unified;
    403 	uint8_t icache_type;
    404 	uint8_t dcache_type;
    405 };
    406 
    407 #if (ARM_MMU_V6 + ARM_MMU_V7) != 0
    408 extern u_int arm_cache_prefer_mask;
    409 #endif
    410 extern u_int arm_dcache_align;
    411 extern u_int arm_dcache_align_mask;
    412 
    413 extern struct arm_cache_info arm_pcache;
    414 extern struct arm_cache_info arm_scache;
    415 
    416 #endif	/* _GRRRRUMP */
    417 
    418 #endif	/* _KERNEL */
    419 
    420 #if defined(_KERNEL) || defined(_KMEMUSER)
    421 /*
    422  * Miscellany
    423  */
    424 
    425 int get_pc_str_offset	(void);
    426 
    427 bool cpu_gtmr_exists_p(void);
    428 u_int cpu_clusterid(void);
    429 bool cpu_earlydevice_va_p(void);
    430 
    431 /*
    432  * Functions to manipulate cpu r13
    433  * (in arm/arm32/setstack.S)
    434  */
    435 
    436 void set_stackptr	(u_int, u_int);
    437 u_int get_stackptr	(u_int);
    438 
    439 #endif /* _KERNEL || _KMEMUSER */
    440 
    441 #elif defined(__aarch64__)
    442 
    443 #include <aarch64/cpufunc.h>
    444 
    445 #endif /* __arm__/__aarch64__ */
    446 
    447 #endif	/* _ARM_CPUFUNC_H_ */
    448 
    449 /* End of cpufunc.h */
    450