Home | History | Annotate | Line # | Download | only in include
locore.h revision 1.29
      1  1.29     skrll /*	$NetBSD: locore.h,v 1.29 2018/01/24 09:04:45 skrll Exp $	*/
      2   1.1      matt 
      3   1.1      matt /*
      4   1.1      matt  * Copyright (c) 1994-1996 Mark Brinicombe.
      5   1.1      matt  * Copyright (c) 1994 Brini.
      6   1.1      matt  * All rights reserved.
      7   1.1      matt  *
      8   1.1      matt  * This code is derived from software written for Brini by Mark Brinicombe
      9   1.1      matt  *
     10   1.1      matt  * Redistribution and use in source and binary forms, with or without
     11   1.1      matt  * modification, are permitted provided that the following conditions
     12   1.1      matt  * are met:
     13   1.1      matt  * 1. Redistributions of source code must retain the above copyright
     14   1.1      matt  *    notice, this list of conditions and the following disclaimer.
     15   1.1      matt  * 2. Redistributions in binary form must reproduce the above copyright
     16   1.1      matt  *    notice, this list of conditions and the following disclaimer in the
     17   1.1      matt  *    documentation and/or other materials provided with the distribution.
     18   1.1      matt  * 3. All advertising materials mentioning features or use of this software
     19   1.1      matt  *    must display the following acknowledgement:
     20   1.1      matt  *	This product includes software developed by Brini.
     21   1.1      matt  * 4. The name of the company nor the name of the author may be used to
     22   1.1      matt  *    endorse or promote products derived from this software without specific
     23   1.1      matt  *    prior written permission.
     24   1.1      matt  *
     25   1.1      matt  * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
     26   1.1      matt  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
     27   1.1      matt  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     28   1.1      matt  * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
     29   1.1      matt  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     30   1.1      matt  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     31   1.1      matt  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     32   1.1      matt  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     33   1.1      matt  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     34   1.1      matt  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     35   1.1      matt  * SUCH DAMAGE.
     36   1.1      matt  *
     37   1.1      matt  * RiscBSD kernel project
     38   1.1      matt  *
     39   1.1      matt  * cpu.h
     40   1.1      matt  *
     41   1.1      matt  * CPU specific symbols
     42   1.1      matt  *
     43   1.1      matt  * Created      : 18/09/94
     44   1.1      matt  *
     45   1.1      matt  * Based on kate/katelib/arm6.h
     46   1.1      matt  */
     47   1.1      matt 
     48   1.1      matt #ifndef _ARM_LOCORE_H_
     49   1.1      matt #define _ARM_LOCORE_H_
     50   1.1      matt 
     51   1.2      matt #ifdef _KERNEL_OPT
     52   1.2      matt #include "opt_cpuoptions.h"
     53   1.2      matt #include "opt_cputypes.h"
     54  1.15      matt #include "opt_arm_debug.h"
     55   1.2      matt #endif
     56   1.2      matt 
     57  1.21      matt #include <sys/pcu.h>
     58  1.21      matt 
     59   1.1      matt #include <arm/cpuconf.h>
     60   1.1      matt #include <arm/armreg.h>
     61   1.1      matt 
     62   1.1      matt #include <machine/frame.h>
     63   1.1      matt 
     64   1.1      matt #ifdef _LOCORE
     65   1.1      matt 
     66   1.1      matt #if defined(_ARM_ARCH_6)
     67   1.1      matt #define IRQdisable	cpsid	i
     68   1.1      matt #define IRQenable	cpsie	i
     69  1.29     skrll #else
     70   1.1      matt #define IRQdisable \
     71   1.1      matt 	stmfd	sp!, {r0} ; \
     72   1.1      matt 	mrs	r0, cpsr ; \
     73   1.1      matt 	orr	r0, r0, #(I32_bit) ; \
     74   1.1      matt 	msr	cpsr_c, r0 ; \
     75   1.1      matt 	ldmfd	sp!, {r0}
     76   1.1      matt 
     77   1.1      matt #define IRQenable \
     78   1.1      matt 	stmfd	sp!, {r0} ; \
     79   1.1      matt 	mrs	r0, cpsr ; \
     80   1.1      matt 	bic	r0, r0, #(I32_bit) ; \
     81   1.1      matt 	msr	cpsr_c, r0 ; \
     82  1.25     skrll 	ldmfd	sp!, {r0}
     83   1.1      matt #endif
     84   1.1      matt 
     85   1.1      matt #if defined (TPIDRPRW_IS_CURCPU)
     86   1.1      matt #define GET_CURCPU(rX)		mrc	p15, 0, rX, c13, c0, 4
     87   1.1      matt #define GET_CURLWP(rX)		GET_CURCPU(rX); ldr rX, [rX, #CI_CURLWP]
     88   1.1      matt #elif defined (TPIDRPRW_IS_CURLWP)
     89   1.1      matt #define GET_CURLWP(rX)		mrc	p15, 0, rX, c13, c0, 4
     90  1.20      matt #if defined (MULTIPROCESSOR)
     91   1.1      matt #define GET_CURCPU(rX)		GET_CURLWP(rX); ldr rX, [rX, #L_CPU]
     92  1.20      matt #elif defined(_ARM_ARCH_7)
     93  1.20      matt #define GET_CURCPU(rX)		movw rX, #:lower16:cpu_info_store; movt rX, #:upper16:cpu_info_store
     94  1.25     skrll #else
     95  1.20      matt #define GET_CURCPU(rX)		ldr rX, =_C_LABEL(cpu_info_store)
     96  1.20      matt #endif
     97   1.1      matt #elif !defined(MULTIPROCESSOR)
     98   1.1      matt #define GET_CURCPU(rX)		ldr rX, =_C_LABEL(cpu_info_store)
     99   1.1      matt #define GET_CURLWP(rX)		GET_CURCPU(rX); ldr rX, [rX, #CI_CURLWP]
    100   1.1      matt #endif
    101   1.1      matt #define GET_CURPCB(rX)		GET_CURLWP(rX); ldr rX, [rX, #L_PCB]
    102   1.1      matt 
    103   1.1      matt #else /* !_LOCORE */
    104   1.1      matt 
    105   1.3      matt #include <arm/cpufunc.h>
    106   1.3      matt 
    107   1.1      matt #define IRQdisable __set_cpsr_c(I32_bit, I32_bit);
    108   1.1      matt #define IRQenable __set_cpsr_c(I32_bit, 0);
    109   1.1      matt 
    110   1.1      matt /*
    111   1.1      matt  * Validate a PC or PSR for a user process.  Used by various system calls
    112   1.1      matt  * that take a context passed by the user and restore it.
    113   1.1      matt  */
    114   1.1      matt 
    115  1.17      matt #ifdef __NO_FIQ
    116   1.1      matt #define VALID_R15_PSR(r15,psr)						\
    117  1.17      matt 	(((psr) & PSR_MODE) == PSR_USR32_MODE && ((psr) & I32_bit) == 0)
    118  1.17      matt #else
    119  1.17      matt #define VALID_R15_PSR(r15,psr)						\
    120  1.17      matt 	(((psr) & PSR_MODE) == PSR_USR32_MODE && ((psr) & IF32_bits) == 0)
    121  1.17      matt #endif
    122   1.1      matt 
    123  1.24     skrll /*
    124  1.26     skrll  * Translation Table Base Register Share/Cache settings
    125  1.26     skrll  */
    126  1.24     skrll #define	TTBR_UPATTR	(TTBR_S | TTBR_RGN_WBNWA | TTBR_C)
    127  1.24     skrll #define	TTBR_MPATTR	(TTBR_S | TTBR_RGN_WBNWA /* | TTBR_NOS */ | TTBR_IRGN_WBNWA)
    128   1.1      matt 
    129   1.1      matt /* The address of the vector page. */
    130   1.1      matt extern vaddr_t vector_page;
    131   1.1      matt void	arm32_vector_init(vaddr_t, int);
    132   1.1      matt 
    133   1.1      matt #define	ARM_VEC_RESET			(1 << 0)
    134   1.1      matt #define	ARM_VEC_UNDEFINED		(1 << 1)
    135   1.1      matt #define	ARM_VEC_SWI			(1 << 2)
    136   1.1      matt #define	ARM_VEC_PREFETCH_ABORT		(1 << 3)
    137   1.1      matt #define	ARM_VEC_DATA_ABORT		(1 << 4)
    138   1.1      matt #define	ARM_VEC_ADDRESS_EXCEPTION	(1 << 5)
    139   1.1      matt #define	ARM_VEC_IRQ			(1 << 6)
    140   1.1      matt #define	ARM_VEC_FIQ			(1 << 7)
    141   1.1      matt 
    142   1.1      matt #define	ARM_NVEC			8
    143   1.1      matt #define	ARM_VEC_ALL			0xffffffff
    144   1.1      matt 
    145   1.1      matt /*
    146   1.1      matt  * cpu device glue (belongs in cpuvar.h)
    147   1.1      matt  */
    148   1.1      matt void	cpu_attach(device_t, cpuid_t);
    149   1.1      matt 
    150   1.2      matt /* 1 == use cpu_sleep(), 0 == don't */
    151  1.14      matt extern int cpu_do_powersave;
    152   1.7      matt extern int cpu_printfataltraps;
    153   1.2      matt extern int cpu_fpu_present;
    154   1.5      matt extern int cpu_hwdiv_present;
    155  1.14      matt extern int cpu_neon_present;
    156  1.14      matt extern int cpu_simd_present;
    157  1.14      matt extern int cpu_simdex_present;
    158  1.14      matt extern int cpu_umull_present;
    159  1.14      matt extern int cpu_synchprim_present;
    160  1.14      matt 
    161  1.14      matt extern int cpu_instruction_set_attributes[6];
    162  1.14      matt extern int cpu_memory_model_features[4];
    163  1.14      matt extern int cpu_processor_features[2];
    164  1.14      matt extern int cpu_media_and_vfp_features[2];
    165   1.2      matt 
    166  1.16      matt extern bool arm_has_tlbiasid_p;
    167  1.28  jmcneill extern bool arm_has_mpext_p;
    168  1.16      matt #ifdef MULTIPROCESSOR
    169  1.13      matt extern u_int arm_cpu_max;
    170  1.16      matt extern volatile u_int arm_cpu_hatched;
    171  1.16      matt #endif
    172  1.13      matt 
    173   1.2      matt #if !defined(CPU_ARMV7)
    174   1.2      matt #define	CPU_IS_ARMV7_P()		false
    175   1.2      matt #elif defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6)
    176   1.2      matt extern bool cpu_armv7_p;
    177   1.2      matt #define	CPU_IS_ARMV7_P()		(cpu_armv7_p)
    178   1.2      matt #else
    179   1.2      matt #define	CPU_IS_ARMV7_P()		true
    180   1.2      matt #endif
    181   1.6      matt #if !defined(CPU_ARMV6)
    182   1.6      matt #define	CPU_IS_ARMV6_P()		false
    183   1.6      matt #elif defined(CPU_ARMV7) || defined(CPU_PRE_ARMV6)
    184   1.6      matt extern bool cpu_armv6_p;
    185   1.6      matt #define	CPU_IS_ARMV6_P()		(cpu_armv6_p)
    186   1.6      matt #else
    187   1.6      matt #define	CPU_IS_ARMV6_P()		true
    188   1.6      matt #endif
    189   1.6      matt 
    190   1.8      matt /*
    191  1.12     joerg  * Used by the fault code to read the current instruction.
    192   1.8      matt  */
    193   1.8      matt static inline uint32_t
    194   1.8      matt read_insn(vaddr_t va, bool user_p)
    195   1.8      matt {
    196   1.8      matt 	uint32_t insn;
    197   1.8      matt 	if (user_p) {
    198   1.8      matt 		__asm __volatile("ldrt %0, [%1]" : "=&r"(insn) : "r"(va));
    199   1.8      matt 	} else {
    200   1.8      matt 		insn = *(const uint32_t *)va;
    201   1.8      matt 	}
    202   1.8      matt #if defined(__ARMEB__) && defined(_ARM_ARCH_7)
    203   1.8      matt 	insn = bswap32(insn);
    204   1.8      matt #endif
    205   1.8      matt 	return insn;
    206   1.8      matt }
    207   1.8      matt 
    208   1.8      matt /*
    209  1.12     joerg  * Used by the fault code to read the current thumb instruction.
    210   1.8      matt  */
    211   1.8      matt static inline uint32_t
    212   1.8      matt read_thumb_insn(vaddr_t va, bool user_p)
    213   1.8      matt {
    214   1.8      matt 	va &= ~1;
    215   1.8      matt 	uint32_t insn;
    216   1.8      matt 	if (user_p) {
    217  1.23      matt #if defined(__thumb__) && defined(_ARM_ARCH_T2)
    218  1.23      matt 		__asm __volatile("ldrht %0, [%1, #0]" : "=&r"(insn) : "r"(va));
    219  1.23      matt #elif defined(_ARM_ARCH_7)
    220  1.23      matt 		__asm __volatile("ldrht %0, [%1], #0" : "=&r"(insn) : "r"(va));
    221   1.9      matt #else
    222   1.9      matt 		__asm __volatile("ldrt %0, [%1]" : "=&r"(insn) : "r"(va & ~3));
    223   1.9      matt #ifdef __ARMEB__
    224   1.9      matt 		insn = (uint16_t) (insn >> (((va ^ 2) & 2) << 3));
    225   1.9      matt #else
    226   1.9      matt 		insn = (uint16_t) (insn >> ((va & 2) << 3));
    227   1.9      matt #endif
    228   1.9      matt #endif
    229   1.8      matt 	} else {
    230   1.8      matt 		insn = *(const uint16_t *)va;
    231   1.8      matt 	}
    232   1.8      matt #if defined(__ARMEB__) && defined(_ARM_ARCH_7)
    233   1.8      matt 	insn = bswap16(insn);
    234   1.8      matt #endif
    235   1.8      matt 	return insn;
    236   1.8      matt }
    237   1.2      matt 
    238  1.18    martin #ifndef _RUMPKERNEL
    239  1.16      matt static inline void
    240  1.16      matt arm_dmb(void)
    241  1.16      matt {
    242  1.16      matt 	if (CPU_IS_ARMV6_P())
    243  1.16      matt 		armreg_dmb_write(0);
    244  1.16      matt 	else if (CPU_IS_ARMV7_P())
    245  1.19     joerg 		__asm __volatile("dmb" ::: "memory");
    246  1.16      matt }
    247  1.16      matt 
    248  1.16      matt static inline void
    249  1.16      matt arm_dsb(void)
    250  1.16      matt {
    251  1.16      matt 	if (CPU_IS_ARMV6_P())
    252  1.16      matt 		armreg_dsb_write(0);
    253  1.16      matt 	else if (CPU_IS_ARMV7_P())
    254  1.19     joerg 		__asm __volatile("dsb" ::: "memory");
    255  1.16      matt }
    256  1.16      matt 
    257  1.16      matt static inline void
    258  1.16      matt arm_isb(void)
    259  1.16      matt {
    260  1.16      matt 	if (CPU_IS_ARMV6_P())
    261  1.16      matt 		armreg_isb_write(0);
    262  1.16      matt 	else if (CPU_IS_ARMV7_P())
    263  1.19     joerg 		__asm __volatile("isb" ::: "memory");
    264  1.16      matt }
    265  1.18    martin #endif
    266  1.16      matt 
    267   1.1      matt /*
    268   1.1      matt  * Random cruft
    269   1.1      matt  */
    270   1.1      matt 
    271   1.1      matt struct lwp;
    272   1.1      matt 
    273  1.16      matt /* cpu.c */
    274  1.16      matt void	identify_arm_cpu(device_t, struct cpu_info *);
    275   1.1      matt 
    276   1.1      matt /* cpuswitch.S */
    277   1.1      matt struct pcb;
    278   1.1      matt void	savectx(struct pcb *);
    279   1.1      matt 
    280   1.1      matt /* ast.c */
    281   1.1      matt void	userret(struct lwp *);
    282   1.1      matt 
    283   1.1      matt /* *_machdep.c */
    284   1.1      matt void	bootsync(void);
    285   1.1      matt 
    286   1.1      matt /* fault.c */
    287   1.1      matt int	badaddr_read(void *, size_t, void *);
    288   1.1      matt 
    289   1.1      matt /* syscall.c */
    290   1.1      matt void	swi_handler(trapframe_t *);
    291   1.1      matt 
    292   1.1      matt /* arm_machdep.c */
    293   1.1      matt void	ucas_ras_check(trapframe_t *);
    294   1.1      matt 
    295   1.1      matt /* vfp_init.c */
    296  1.16      matt void	vfp_attach(struct cpu_info *);
    297  1.27       chs void	vfp_discardcontext(lwp_t *, bool);
    298  1.27       chs void	vfp_savecontext(lwp_t *);
    299   1.1      matt void	vfp_kernel_acquire(void);
    300   1.1      matt void	vfp_kernel_release(void);
    301  1.27       chs bool	vfp_used_p(const lwp_t *);
    302   1.1      matt extern const pcu_ops_t arm_vfp_ops;
    303   1.1      matt 
    304   1.1      matt #endif	/* !_LOCORE */
    305   1.1      matt 
    306   1.1      matt #endif /* !_ARM_LOCORE_H_ */
    307