Home | History | Annotate | Line # | Download | only in include
      1 /*	$NetBSD: locore.h,v 1.40 2025/10/07 10:38:30 skrll Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1994-1996 Mark Brinicombe.
      5  * Copyright (c) 1994 Brini.
      6  * All rights reserved.
      7  *
      8  * This code is derived from software written for Brini by Mark Brinicombe
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by Brini.
     21  * 4. The name of the company nor the name of the author may be used to
     22  *    endorse or promote products derived from this software without specific
     23  *    prior written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
     26  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
     27  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     28  * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
     29  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     30  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     31  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     35  * SUCH DAMAGE.
     36  *
     37  * RiscBSD kernel project
     38  *
     39  * cpu.h
     40  *
     41  * CPU specific symbols
     42  *
     43  * Created      : 18/09/94
     44  *
     45  * Based on kate/katelib/arm6.h
     46  */
     47 
     48 #ifndef _ARM_LOCORE_H_
     49 #define _ARM_LOCORE_H_
     50 
     51 #ifdef __arm__
     52 
     53 #ifdef _KERNEL_OPT
     54 #include "opt_cpuoptions.h"
     55 #include "opt_cputypes.h"
     56 #include "opt_arm_debug.h"
     57 #endif
     58 
     59 #include <sys/pcu.h>
     60 
     61 #include <arm/cpuconf.h>
     62 #include <arm/armreg.h>
     63 
     64 #include <machine/frame.h>
     65 
     66 #ifdef _LOCORE
     67 
     68 #if defined(_ARM_ARCH_6)
     69 #define	IRQ_DISABLE(rTMP)	cpsid	i
     70 #define	IRQ_ENABLE(rTMP)	cpsie	i
     71 
     72 #define	INTERRUPT_DISABLE(rTMP)	cpsid	if
     73 #define	INTERRUPT_ENABLE(rTMP)	cpsie	if
     74 #else
     75 #define	IRQ_DISABLE(rTMP)				\
     76 	mrs	rTMP, cpsr ;				\
     77 	orr	rTMP, rTMP, #(I32_bit) ;		\
     78 	msr	cpsr_c, rTMP
     79 
     80 #define	IRQ_ENABLE(rTMP)				\
     81 	mrs	rTMP, cpsr ;				\
     82 	bic	rTMP, rTMP, #(I32_bit) ;		\
     83 	msr	cpsr_c, rTMP
     84 
     85 #define	INTERRUPT_DISABLE(rTMP)				\
     86 	mrs	rTMP, cpsr ;				\
     87 	orr	rTMP, rTMP, #(I32_bit | F32_bit) ;	\
     88 	msr	cpsr_c, rTMP
     89 
     90 #define	INTERRUPT_ENABLE(rTMP)				\
     91 	mrs	rTMP, cpsr ;				\
     92 	bic	rTMP, rTMP, #(I32_bit | F32_bit) ;	\
     93 	msr	cpsr_c, rTMP
     94 #endif
     95 
     96 #if defined (TPIDRPRW_IS_CURCPU)
     97 #define	GET_CURCPU(rX)		mrc	p15, 0, rX, c13, c0, 4
     98 #define	GET_CURLWP(rX)		GET_CURCPU(rX); ldr rX, [rX, #CI_CURLWP]
     99 #define	GET_CURX(rCPU, rLWP)	GET_CURCPU(rCPU); ldr rLWP, [rCPU, #CI_CURLWP]
    100 #elif defined (TPIDRPRW_IS_CURLWP)
    101 #define	GET_CURLWP(rX)		mrc	p15, 0, rX, c13, c0, 4
    102 #if defined (MULTIPROCESSOR)
    103 #define	GET_CURCPU(rX)		GET_CURLWP(rX); ldr rX, [rX, #L_CPU]
    104 #define	GET_CURX(rCPU, rLWP)	GET_CURLWP(rLWP); ldr rCPU, [rLWP, #L_CPU]
    105 #elif defined(_ARM_ARCH_7)
    106 #define	GET_CURCPU(rX)		movw rX, #:lower16:cpu_info_store; movt rX, #:upper16:cpu_info_store
    107 #define	GET_CURX(rCPU, rLWP)	GET_CURLWP(rLWP); GET_CURCPU(rCPU)
    108 #else
    109 #define	GET_CURCPU(rX)		ldr rX, =_C_LABEL(cpu_info_store)
    110 #define	GET_CURX(rCPU, rLWP)	GET_CURLWP(rLWP); ldr rCPU, [rLWP, #L_CPU]
    111 #endif
    112 #elif !defined(MULTIPROCESSOR)
    113 #define	GET_CURCPU(rX)		ldr rX, =_C_LABEL(cpu_info_store)
    114 #define	GET_CURLWP(rX)		GET_CURCPU(rX); ldr rX, [rX, #CI_CURLWP]
    115 #define	GET_CURX(rCPU, rLWP)	GET_CURCPU(rCPU); ldr rLWP, [rCPU, #CI_CURLWP]
    116 #endif
    117 #define	GET_CURPCB(rX)		GET_CURLWP(rX); ldr rX, [rX, #L_PCB]
    118 
    119 #else /* !_LOCORE */
    120 
    121 #include <arm/cpufunc.h>
    122 
    123 #define	IRQdisable __set_cpsr_c(I32_bit, I32_bit);
    124 #define	IRQenable __set_cpsr_c(I32_bit, 0);
    125 
    126 /*
    127  * Validate a PC or PSR for a user process.  Used by various system calls
    128  * that take a context passed by the user and restore it.
    129  */
    130 
    131 #ifdef __NO_FIQ
    132 #define	VALID_PSR(psr)						\
    133     (((psr) & PSR_MODE) == PSR_USR32_MODE && ((psr) & I32_bit) == 0)
    134 #else
    135 #define	VALID_PSR(psr)						\
    136     (((psr) & PSR_MODE) == PSR_USR32_MODE && ((psr) & IF32_bits) == 0)
    137 #endif
    138 
    139 /*
    140  * Translation Table Base Register Share/Cache settings
    141  */
    142 #define	TTBR_UPATTR	(TTBR_S | TTBR_RGN_WBNWA | TTBR_C)
    143 #define	TTBR_MPATTR	(TTBR_S | TTBR_RGN_WBNWA /* | TTBR_NOS */ | TTBR_IRGN_WBNWA)
    144 
    145 /* The address of the vector page. */
    146 extern vaddr_t vector_page;
    147 void	arm32_vector_init(vaddr_t, int);
    148 
    149 #define	ARM_VEC_RESET			(1 << 0)
    150 #define	ARM_VEC_UNDEFINED		(1 << 1)
    151 #define	ARM_VEC_SWI			(1 << 2)
    152 #define	ARM_VEC_PREFETCH_ABORT		(1 << 3)
    153 #define	ARM_VEC_DATA_ABORT		(1 << 4)
    154 #define	ARM_VEC_ADDRESS_EXCEPTION	(1 << 5)
    155 #define	ARM_VEC_IRQ			(1 << 6)
    156 #define	ARM_VEC_FIQ			(1 << 7)
    157 
    158 #define	ARM_NVEC			8
    159 #define	ARM_VEC_ALL			0xffffffff
    160 
    161 /*
    162  * cpu device glue (belongs in cpuvar.h)
    163  */
    164 void	cpu_attach(device_t, cpuid_t);
    165 
    166 /* 1 == use cpu_sleep(), 0 == don't */
    167 extern int cpu_do_powersave;
    168 extern int cpu_printfataltraps;
    169 extern int cpu_fpu_present;
    170 extern int cpu_hwdiv_present;
    171 extern int cpu_neon_present;
    172 extern int cpu_simd_present;
    173 extern int cpu_simdex_present;
    174 extern int cpu_umull_present;
    175 extern int cpu_synchprim_present;
    176 
    177 extern int cpu_instruction_set_attributes[6];
    178 extern int cpu_memory_model_features[4];
    179 extern int cpu_processor_features[2];
    180 extern int cpu_media_and_vfp_features[2];
    181 
    182 extern bool arm_has_tlbiasid_p;
    183 extern bool arm_has_mpext_p;
    184 
    185 #if !defined(CPU_ARMV7)
    186 #define	CPU_IS_ARMV7_P()		false
    187 #elif defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6)
    188 extern bool cpu_armv7_p;
    189 #define	CPU_IS_ARMV7_P()		(cpu_armv7_p)
    190 #else
    191 #define	CPU_IS_ARMV7_P()		true
    192 #endif
    193 #if !defined(CPU_ARMV6)
    194 #define	CPU_IS_ARMV6_P()		false
    195 #elif defined(CPU_ARMV7) || defined(CPU_PRE_ARMV6)
    196 extern bool cpu_armv6_p;
    197 #define	CPU_IS_ARMV6_P()		(cpu_armv6_p)
    198 #else
    199 #define	CPU_IS_ARMV6_P()		true
    200 #endif
    201 
    202 /*
    203  * Used by the fault code to read the current instruction.
    204  */
    205 static inline uint32_t
    206 read_insn(vaddr_t va, bool user_p)
    207 {
    208 	uint32_t insn;
    209 	if (user_p) {
    210 		__asm __volatile("ldrt %0, [%1]" : "=&r"(insn) : "r"(va));
    211 	} else {
    212 		insn = *(const uint32_t *)va;
    213 	}
    214 #ifdef _ARM_ARCH_BE8
    215 	insn = bswap32(insn);
    216 #endif
    217 	return insn;
    218 }
    219 
    220 /*
    221  * Used by the fault code to read the current thumb instruction.
    222  */
    223 static inline uint32_t
    224 read_thumb_insn(vaddr_t va, bool user_p)
    225 {
    226 	va &= ~1;
    227 	uint32_t insn;
    228 	if (user_p) {
    229 #if defined(__thumb__) && defined(_ARM_ARCH_T2)
    230 		__asm __volatile("ldrht %0, [%1, #0]" : "=&r"(insn) : "r"(va));
    231 #elif defined(_ARM_ARCH_7)
    232 		__asm __volatile("ldrht %0, [%1], #0" : "=&r"(insn) : "r"(va));
    233 #else
    234 		__asm __volatile("ldrt %0, [%1]" : "=&r"(insn) : "r"(va & ~3));
    235 #ifdef __ARMEB__
    236 		insn = (uint16_t) (insn >> (((va ^ 2) & 2) << 3));
    237 #else
    238 		insn = (uint16_t) (insn >> ((va & 2) << 3));
    239 #endif
    240 #endif
    241 	} else {
    242 		insn = *(const uint16_t *)va;
    243 	}
    244 #ifdef _ARM_ARCH_BE8
    245 	insn = bswap16(insn);
    246 #endif
    247 	return insn;
    248 }
    249 
    250 /*
    251  * Random cruft
    252  */
    253 
    254 struct lwp;
    255 
    256 /* cpu.c */
    257 void	identify_arm_cpu(device_t, struct cpu_info *);
    258 
    259 /* cpuswitch.S */
    260 struct pcb;
    261 void	savectx(struct pcb *);
    262 
    263 /* ast.c */
    264 void	userret(struct lwp *);
    265 
    266 /* *_machdep.c */
    267 void	bootsync(void);
    268 
    269 /* fault.c */
    270 int	badaddr_read(void *, size_t, void *);
    271 
    272 /* syscall.c */
    273 void	swi_handler(trapframe_t *);
    274 
    275 /* vfp_init.c */
    276 void	vfp_detect(struct cpu_info *);
    277 void	vfp_attach(struct cpu_info *);
    278 void	vfp_discardcontext(lwp_t *, bool);
    279 void	vfp_savecontext(lwp_t *);
    280 void	vfp_kernel_acquire(void);
    281 void	vfp_kernel_release(void);
    282 bool	vfp_used_p(const lwp_t *);
    283 extern const pcu_ops_t arm_vfp_ops;
    284 
    285 #endif	/* !_LOCORE */
    286 
    287 #elif defined(__aarch64__)
    288 
    289 #include <aarch64/locore.h>
    290 
    291 #endif /* __arm__/__aarch64__ */
    292 
    293 #endif /* !_ARM_LOCORE_H_ */
    294