Home | History | Annotate | Line # | Download | only in include
      1 /*	cpufunc.h,v 1.40.22.4 2007/11/08 10:59:33 matt Exp	*/
      2 
      3 /*
      4  * Copyright (c) 1997 Mark Brinicombe.
      5  * Copyright (c) 1997 Causality Limited
      6  * All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *	This product includes software developed by Causality Limited.
     19  * 4. The name of Causality Limited may not be used to endorse or promote
     20  *    products derived from this software without specific prior written
     21  *    permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
     24  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     25  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     26  * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
     27  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     28  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     29  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     33  * SUCH DAMAGE.
     34  *
     35  * RiscBSD kernel project
     36  *
     37  * cpufunc.h
     38  *
     39  * Prototypes for cpu, mmu and tlb related functions.
     40  */
     41 
     42 #ifndef	_ARM_CPUFUNC_H_
     43 #define	_ARM_CPUFUNC_H_
     44 
     45 #ifdef _ARM_ARCH_7
     46 /*
     47  * Options for DMB and DSB:
     48  *	oshld	Outer Shareable, load
     49  *	oshst	Outer Shareable, store
     50  *	osh	Outer Shareable, all
     51  *	nshld	Non-shareable, load
     52  *	nshst	Non-shareable, store
     53  *	nsh	Non-shareable, all
     54  *	ishld	Inner Shareable, load
     55  *	ishst	Inner Shareable, store
     56  *	ish	Inner Shareable, all
     57  *	ld	Full system, load
     58  *	st	Full system, store
     59  *	sy	Full system, all
     60  */
     61 #define	dsb(opt)	__asm __volatile("dsb " __STRING(opt) : : : "memory")
     62 #define	dmb(opt)	__asm __volatile("dmb " __STRING(opt) : : : "memory")
     63 #define	isb()		__asm __volatile("isb" : : : "memory")
     64 #define	sev()		__asm __volatile("sev" : : : "memory")
     65 
     66 #else
     67 
     68 #define	dsb(opt)	\
     69 	__asm __volatile("mcr p15, 0, %0, c7, c10, 4" :: "r" (0) : "memory")
     70 #define	dmb(opt)	\
     71 	__asm __volatile("mcr p15, 0, %0, c7, c10, 5" :: "r" (0) : "memory")
     72 #define	isb()		\
     73 	__asm __volatile("mcr p15, 0, %0, c7, c5, 4" :: "r" (0) : "memory")
     74 #define	sev()		__nothing
     75 
     76 #endif
     77 
     78 #if defined(_ARM_ARCH_8)
     79 #define	dma_r_r()	dsb(oshld)	// actually r_rw
     80 #define	dma_w_w()	dsb(oshst)
     81 #define	dma_rw_w()	dsb(osh)	// actually rw_rw
     82 #elif defined(_ARM_ARCH_6)
     83 #define	dma_r_r()	dsb(osh)	// actually rw_rw
     84 #define	dma_w_w()	dsb(oshst)
     85 #define	dma_rw_w()	dsb(osh)	// actually rw_rw
     86 #else
     87 #define	dma_r_r()	__nothing
     88 #define	dma_w_w()	cpu_drain_writebuf()
     89 #define	dma_rw_w()	cpu_drain_writebuf()
     90 #endif
     91 
     92 #ifdef __arm__
     93 
     94 #ifdef _KERNEL
     95 
     96 #include <sys/types.h>
     97 
     98 #include <arm/armreg.h>
     99 #include <arm/cpuconf.h>
    100 #include <arm/cpufunc_proto.h>
    101 
    102 struct cpu_functions {
    103 
    104 	/* CPU functions */
    105 
    106 	u_int	(*cf_id)		(void);
    107 	void	(*cf_cpwait)		(void);
    108 
    109 	/* MMU functions */
    110 
    111 	u_int	(*cf_control)		(u_int, u_int);
    112 	void	(*cf_domains)		(u_int);
    113 #if defined(ARM_MMU_EXTENDED)
    114 	void	(*cf_setttb)		(u_int, tlb_asid_t);
    115 #else
    116 	void	(*cf_setttb)		(u_int, bool);
    117 #endif
    118 	u_int	(*cf_faultstatus)	(void);
    119 	u_int	(*cf_faultaddress)	(void);
    120 
    121 	/* TLB functions */
    122 
    123 	void	(*cf_tlb_flushID)	(void);
    124 	void	(*cf_tlb_flushID_SE)	(vaddr_t);
    125 	void	(*cf_tlb_flushI)	(void);
    126 	void	(*cf_tlb_flushI_SE)	(vaddr_t);
    127 	void	(*cf_tlb_flushD)	(void);
    128 	void	(*cf_tlb_flushD_SE)	(vaddr_t);
    129 
    130 	/*
    131 	 * Cache operations:
    132 	 *
    133 	 * We define the following primitives:
    134 	 *
    135 	 *	icache_sync_all		Synchronize I-cache
    136 	 *	icache_sync_range	Synchronize I-cache range
    137 	 *
    138 	 *	dcache_wbinv_all	Write-back and Invalidate D-cache
    139 	 *	dcache_wbinv_range	Write-back and Invalidate D-cache range
    140 	 *	dcache_inv_range	Invalidate D-cache range
    141 	 *	dcache_wb_range		Write-back D-cache range
    142 	 *
    143 	 *	idcache_wbinv_all	Write-back and Invalidate D-cache,
    144 	 *				Invalidate I-cache
    145 	 *	idcache_wbinv_range	Write-back and Invalidate D-cache,
    146 	 *				Invalidate I-cache range
    147 	 *
    148 	 * Note that the ARM term for "write-back" is "clean".  We use
    149 	 * the term "write-back" since it's a more common way to describe
    150 	 * the operation.
    151 	 *
    152 	 * There are some rules that must be followed:
    153 	 *
    154 	 *	I-cache Synch (all or range):
    155 	 *		The goal is to synchronize the instruction stream,
    156 	 *		so you may beed to write-back dirty D-cache blocks
    157 	 *		first.  If a range is requested, and you can't
    158 	 *		synchronize just a range, you have to hit the whole
    159 	 *		thing.
    160 	 *
    161 	 *	D-cache Write-Back and Invalidate range:
    162 	 *		If you can't WB-Inv a range, you must WB-Inv the
    163 	 *		entire D-cache.
    164 	 *
    165 	 *	D-cache Invalidate:
    166 	 *		If you can't Inv the D-cache, you must Write-Back
    167 	 *		and Invalidate.  Code that uses this operation
    168 	 *		MUST NOT assume that the D-cache will not be written
    169 	 *		back to memory.
    170 	 *
    171 	 *	D-cache Write-Back:
    172 	 *		If you can't Write-back without doing an Inv,
    173 	 *		that's fine.  Then treat this as a WB-Inv.
    174 	 *		Skipping the invalidate is merely an optimization.
    175 	 *
    176 	 *	All operations:
    177 	 *		Valid virtual addresses must be passed to each
    178 	 *		cache operation.
    179 	 */
    180 	void	(*cf_icache_sync_all)	(void);
    181 	void	(*cf_icache_sync_range)	(vaddr_t, vsize_t);
    182 
    183 	void	(*cf_dcache_wbinv_all)	(void);
    184 	void	(*cf_dcache_wbinv_range)(vaddr_t, vsize_t);
    185 	void	(*cf_dcache_inv_range)	(vaddr_t, vsize_t);
    186 	void	(*cf_dcache_wb_range)	(vaddr_t, vsize_t);
    187 
    188 	void	(*cf_sdcache_wbinv_range)(vaddr_t, paddr_t, psize_t);
    189 	void	(*cf_sdcache_inv_range)	(vaddr_t, paddr_t, psize_t);
    190 	void	(*cf_sdcache_wb_range)	(vaddr_t, paddr_t, psize_t);
    191 
    192 	void	(*cf_idcache_wbinv_all)	(void);
    193 	void	(*cf_idcache_wbinv_range)(vaddr_t, vsize_t);
    194 
    195 	/* Other functions */
    196 
    197 	void	(*cf_flush_prefetchbuf)	(void);
    198 	void	(*cf_drain_writebuf)	(void);
    199 	void	(*cf_flush_brnchtgt_C)	(void);
    200 	void	(*cf_flush_brnchtgt_E)	(u_int);
    201 
    202 	void	(*cf_sleep)		(int mode);
    203 
    204 	/* Soft functions */
    205 
    206 	int	(*cf_dataabt_fixup)	(void *);
    207 	int	(*cf_prefetchabt_fixup)	(void *);
    208 
    209 #if defined(ARM_MMU_EXTENDED)
    210 	void	(*cf_context_switch)	(u_int, tlb_asid_t);
    211 #else
    212 	void	(*cf_context_switch)	(u_int);
    213 #endif
    214 
    215 	void	(*cf_setup)		(char *);
    216 };
    217 
    218 extern struct cpu_functions cpufuncs;
    219 extern u_int cputype;
    220 
    221 #define	cpu_idnum()		cpufuncs.cf_id()
    222 
    223 #define	cpu_control(c, e)	cpufuncs.cf_control(c, e)
    224 #define	cpu_domains(d)		cpufuncs.cf_domains(d)
    225 #define	cpu_setttb(t, f)	cpufuncs.cf_setttb(t, f)
    226 #define	cpu_faultstatus()	cpufuncs.cf_faultstatus()
    227 #define	cpu_faultaddress()	cpufuncs.cf_faultaddress()
    228 
    229 #define	cpu_tlb_flushID()	cpufuncs.cf_tlb_flushID()
    230 #define	cpu_tlb_flushID_SE(e)	cpufuncs.cf_tlb_flushID_SE(e)
    231 #define	cpu_tlb_flushI()	cpufuncs.cf_tlb_flushI()
    232 #define	cpu_tlb_flushI_SE(e)	cpufuncs.cf_tlb_flushI_SE(e)
    233 #define	cpu_tlb_flushD()	cpufuncs.cf_tlb_flushD()
    234 #define	cpu_tlb_flushD_SE(e)	cpufuncs.cf_tlb_flushD_SE(e)
    235 
    236 #define	cpu_icache_sync_all()	cpufuncs.cf_icache_sync_all()
    237 #define	cpu_icache_sync_range(a, s) cpufuncs.cf_icache_sync_range((a), (s))
    238 
    239 #define	cpu_dcache_wbinv_all()	cpufuncs.cf_dcache_wbinv_all()
    240 #define	cpu_dcache_wbinv_range(a, s) cpufuncs.cf_dcache_wbinv_range((a), (s))
    241 #define	cpu_dcache_inv_range(a, s) cpufuncs.cf_dcache_inv_range((a), (s))
    242 #define	cpu_dcache_wb_range(a, s) cpufuncs.cf_dcache_wb_range((a), (s))
    243 
    244 #define	cpu_sdcache_wbinv_range(a, b, s) cpufuncs.cf_sdcache_wbinv_range((a), (b), (s))
    245 #define	cpu_sdcache_inv_range(a, b, s) cpufuncs.cf_sdcache_inv_range((a), (b), (s))
    246 #define	cpu_sdcache_wb_range(a, b, s) cpufuncs.cf_sdcache_wb_range((a), (b), (s))
    247 
    248 #define	cpu_idcache_wbinv_all()	cpufuncs.cf_idcache_wbinv_all()
    249 #define	cpu_idcache_wbinv_range(a, s) cpufuncs.cf_idcache_wbinv_range((a), (s))
    250 
    251 #define	cpu_flush_prefetchbuf()	cpufuncs.cf_flush_prefetchbuf()
    252 #define	cpu_drain_writebuf()	cpufuncs.cf_drain_writebuf()
    253 #define	cpu_flush_brnchtgt_C()	cpufuncs.cf_flush_brnchtgt_C()
    254 #define	cpu_flush_brnchtgt_E(e)	cpufuncs.cf_flush_brnchtgt_E(e)
    255 
    256 #define	cpu_sleep(m)		cpufuncs.cf_sleep(m)
    257 
    258 #define	cpu_dataabt_fixup(a)		cpufuncs.cf_dataabt_fixup(a)
    259 #define	cpu_prefetchabt_fixup(a)	cpufuncs.cf_prefetchabt_fixup(a)
    260 #define	ABORT_FIXUP_OK		0	/* fixup succeeded */
    261 #define	ABORT_FIXUP_FAILED	1	/* fixup failed */
    262 #define	ABORT_FIXUP_RETURN	2	/* abort handler should return */
    263 
    264 #define	cpu_context_switch(a)		cpufuncs.cf_context_switch(a)
    265 #define	cpu_setup(a)			cpufuncs.cf_setup(a)
    266 
    267 int	set_cpufuncs		(void);
    268 int	set_cpufuncs_id		(u_int);
    269 #define	ARCHITECTURE_NOT_PRESENT	1	/* known but not configured */
    270 #define	ARCHITECTURE_NOT_SUPPORTED	2	/* not known */
    271 
    272 void	cpufunc_nullop		(void);
    273 int	cpufunc_null_fixup	(void *);
    274 int	early_abort_fixup	(void *);
    275 int	late_abort_fixup	(void *);
    276 u_int	cpufunc_id		(void);
    277 u_int	cpufunc_control		(u_int, u_int);
    278 void	cpufunc_domains		(u_int);
    279 u_int	cpufunc_faultstatus	(void);
    280 u_int	cpufunc_faultaddress	(void);
    281 
    282 #if defined(CPU_XSCALE)
    283 #define	cpu_cpwait()		cpufuncs.cf_cpwait()
    284 #endif
    285 
    286 #ifndef cpu_cpwait
    287 #define	cpu_cpwait()
    288 #endif
    289 
    290 /*
    291  * Macros for manipulating CPU interrupts
    292  */
    293 static __inline uint32_t __set_cpsr_c(uint32_t bic, uint32_t eor) __attribute__((__unused__));
    294 static __inline uint32_t disable_interrupts(uint32_t mask) __attribute__((__unused__));
    295 static __inline uint32_t enable_interrupts(uint32_t mask) __attribute__((__unused__));
    296 
    297 static __inline uint32_t
    298 __set_cpsr_c(uint32_t bic, uint32_t eor)
    299 {
    300 	uint32_t	tmp, ret;
    301 
    302 	__asm volatile(
    303 		"mrs     %0, cpsr\n"	/* Get the CPSR */
    304 		"bic	 %1, %0, %2\n"	/* Clear bits */
    305 		"eor	 %1, %1, %3\n"	/* XOR bits */
    306 		"msr     cpsr_c, %1\n"	/* Set the control field of CPSR */
    307 	: "=&r" (ret), "=&r" (tmp)
    308 	: "r" (bic), "r" (eor) : "memory");
    309 
    310 	return ret;
    311 }
    312 
    313 static __inline uint32_t
    314 disable_interrupts(uint32_t mask)
    315 {
    316 	uint32_t	tmp, ret;
    317 	mask &= (I32_bit | F32_bit);
    318 
    319 	__asm volatile(
    320 		"mrs     %0, cpsr\n"	/* Get the CPSR */
    321 		"orr	 %1, %0, %2\n"	/* set bits */
    322 		"msr     cpsr_c, %1\n"	/* Set the control field of CPSR */
    323 	: "=&r" (ret), "=&r" (tmp)
    324 	: "r" (mask)
    325 	: "memory");
    326 
    327 	return ret;
    328 }
    329 
    330 static __inline uint32_t
    331 enable_interrupts(uint32_t mask)
    332 {
    333 	uint32_t	ret;
    334 	mask &= (I32_bit | F32_bit);
    335 
    336 	/* Get the CPSR */
    337 	__asm __volatile("mrs\t%0, cpsr\n" : "=r"(ret));
    338 #ifdef _ARM_ARCH_6
    339 	if (__builtin_constant_p(mask)) {
    340 		switch (mask) {
    341 		case I32_bit | F32_bit:
    342 			__asm __volatile("cpsie\tif");
    343 			break;
    344 		case I32_bit:
    345 			__asm __volatile("cpsie\ti");
    346 			break;
    347 		case F32_bit:
    348 			__asm __volatile("cpsie\tf");
    349 			break;
    350 		default:
    351 			break;
    352 		}
    353 		return ret;
    354 	}
    355 #endif	/* _ARM_ARCH_6 */
    356 
    357 	/* Set the control field of CPSR */
    358 	__asm volatile("msr\tcpsr_c, %0" :: "r"(ret & ~mask));
    359 
    360 	return ret;
    361 }
    362 
    363 #define	restore_interrupts(old_cpsr)					\
    364 	(__set_cpsr_c((I32_bit | F32_bit), (old_cpsr) & (I32_bit | F32_bit)))
    365 
    366 #define	ENABLE_INTERRUPT()		cpsie(I32_bit)
    367 #define	DISABLE_INTERRUPT()		cpsid(I32_bit)
    368 #define	DISABLE_INTERRUPT_SAVE()	cpsid(I32_bit)
    369 
    370 static inline void cpsie(register_t psw) __attribute__((__unused__));
    371 static inline register_t cpsid(register_t psw) __attribute__((__unused__));
    372 
    373 static inline void
    374 cpsie(register_t psw)
    375 {
    376 #ifdef _ARM_ARCH_6
    377 	if (!__builtin_constant_p(psw)) {
    378 		enable_interrupts(psw);
    379 		return;
    380 	}
    381 	switch (psw & (I32_bit|F32_bit)) {
    382 	case I32_bit:		__asm("cpsie\ti"); break;
    383 	case F32_bit:		__asm("cpsie\tf"); break;
    384 	case I32_bit|F32_bit:	__asm("cpsie\tif"); break;
    385 	}
    386 #else
    387 	enable_interrupts(psw);
    388 #endif
    389 }
    390 
    391 static inline register_t
    392 cpsid(register_t psw)
    393 {
    394 #ifdef _ARM_ARCH_6
    395 	register_t oldpsw;
    396 	if (!__builtin_constant_p(psw))
    397 		return disable_interrupts(psw);
    398 
    399 	__asm("mrs	%0, cpsr" : "=r"(oldpsw));
    400 	switch (psw & (I32_bit|F32_bit)) {
    401 	case I32_bit:		__asm("cpsid\ti"); break;
    402 	case F32_bit:		__asm("cpsid\tf"); break;
    403 	case I32_bit|F32_bit:	__asm("cpsid\tif"); break;
    404 	}
    405 	return oldpsw;
    406 #else
    407 	return disable_interrupts(psw);
    408 #endif
    409 }
    410 
    411 
    412 /* Functions to manipulate the CPSR. */
    413 u_int	SetCPSR(u_int, u_int);
    414 u_int	GetCPSR(void);
    415 
    416 
    417 /*
    418  * CPU functions from locore.S
    419  */
    420 
    421 void cpu_reset		(void) __dead;
    422 
    423 #if (ARM_MMU_V6 + ARM_MMU_V7) != 0
    424 extern u_int arm_cache_prefer_mask;
    425 #endif
    426 extern u_int arm_dcache_align;
    427 extern u_int arm_dcache_align_mask;
    428 
    429 extern struct arm_cache_info arm_pcache;
    430 extern struct arm_cache_info arm_scache;
    431 
    432 extern uint32_t cpu_ttb;
    433 
    434 #endif	/* _KERNEL */
    435 
    436 #if defined(_KERNEL) || defined(_KMEMUSER)
    437 /*
    438  * Miscellany
    439  */
    440 
    441 int get_pc_str_offset	(void);
    442 
    443 bool cpu_gtmr_exists_p(void);
    444 u_int cpu_clusterid(void);
    445 bool cpu_earlydevice_va_p(void);
    446 
    447 /*
    448  * Functions to manipulate cpu r13
    449  * (in arm/arm32/setstack.S)
    450  */
    451 
    452 void set_stackptr	(u_int, u_int);
    453 u_int get_stackptr	(u_int);
    454 
    455 #endif	/* _KERNEL || _KMEMUSER */
    456 
    457 #elif defined(__aarch64__)
    458 
    459 #include <aarch64/cpufunc.h>
    460 
    461 #endif	/* __arm__/__aarch64__ */
    462 
    463 #endif	/* _ARM_CPUFUNC_H_ */
    464 
    465 /* End of cpufunc.h */
    466