Home | History | Annotate | Line # | Download | only in include
locore.h revision 1.16
      1  1.16   matt /*	$NetBSD: locore.h,v 1.16 2014/03/28 21:39:09 matt Exp $	*/
      2   1.1   matt 
      3   1.1   matt /*
      4   1.1   matt  * Copyright (c) 1994-1996 Mark Brinicombe.
      5   1.1   matt  * Copyright (c) 1994 Brini.
      6   1.1   matt  * All rights reserved.
      7   1.1   matt  *
      8   1.1   matt  * This code is derived from software written for Brini by Mark Brinicombe
      9   1.1   matt  *
     10   1.1   matt  * Redistribution and use in source and binary forms, with or without
     11   1.1   matt  * modification, are permitted provided that the following conditions
     12   1.1   matt  * are met:
     13   1.1   matt  * 1. Redistributions of source code must retain the above copyright
     14   1.1   matt  *    notice, this list of conditions and the following disclaimer.
     15   1.1   matt  * 2. Redistributions in binary form must reproduce the above copyright
     16   1.1   matt  *    notice, this list of conditions and the following disclaimer in the
     17   1.1   matt  *    documentation and/or other materials provided with the distribution.
     18   1.1   matt  * 3. All advertising materials mentioning features or use of this software
     19   1.1   matt  *    must display the following acknowledgement:
     20   1.1   matt  *	This product includes software developed by Brini.
     21   1.1   matt  * 4. The name of the company nor the name of the author may be used to
     22   1.1   matt  *    endorse or promote products derived from this software without specific
     23   1.1   matt  *    prior written permission.
     24   1.1   matt  *
     25   1.1   matt  * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
     26   1.1   matt  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
     27   1.1   matt  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     28   1.1   matt  * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
     29   1.1   matt  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     30   1.1   matt  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     31   1.1   matt  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     32   1.1   matt  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     33   1.1   matt  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     34   1.1   matt  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     35   1.1   matt  * SUCH DAMAGE.
     36   1.1   matt  *
     37   1.1   matt  * RiscBSD kernel project
     38   1.1   matt  *
     39   1.1   matt  * cpu.h
     40   1.1   matt  *
     41   1.1   matt  * CPU specific symbols
     42   1.1   matt  *
     43   1.1   matt  * Created      : 18/09/94
     44   1.1   matt  *
     45   1.1   matt  * Based on kate/katelib/arm6.h
     46   1.1   matt  */
     47   1.1   matt 
     48   1.1   matt #ifndef _ARM_LOCORE_H_
     49   1.1   matt #define _ARM_LOCORE_H_
     50   1.1   matt 
     51   1.2   matt #ifdef _KERNEL_OPT
     52   1.2   matt #include "opt_cpuoptions.h"
     53   1.2   matt #include "opt_cputypes.h"
     54  1.15   matt #include "opt_arm_debug.h"
     55   1.2   matt #endif
     56   1.2   matt 
     57   1.1   matt #include <arm/cpuconf.h>
     58   1.1   matt #include <arm/armreg.h>
     59   1.1   matt 
     60   1.1   matt #include <machine/frame.h>
     61   1.1   matt 
     62   1.1   matt #ifdef _LOCORE
     63   1.1   matt 
     64   1.1   matt #if defined(_ARM_ARCH_6)
     65   1.1   matt #define IRQdisable	cpsid	i
     66   1.1   matt #define IRQenable	cpsie	i
     67   1.1   matt #elif defined(__PROG32)
     68   1.1   matt #define IRQdisable \
     69   1.1   matt 	stmfd	sp!, {r0} ; \
     70   1.1   matt 	mrs	r0, cpsr ; \
     71   1.1   matt 	orr	r0, r0, #(I32_bit) ; \
     72   1.1   matt 	msr	cpsr_c, r0 ; \
     73   1.1   matt 	ldmfd	sp!, {r0}
     74   1.1   matt 
     75   1.1   matt #define IRQenable \
     76   1.1   matt 	stmfd	sp!, {r0} ; \
     77   1.1   matt 	mrs	r0, cpsr ; \
     78   1.1   matt 	bic	r0, r0, #(I32_bit) ; \
     79   1.1   matt 	msr	cpsr_c, r0 ; \
     80   1.1   matt 	ldmfd	sp!, {r0}
     81   1.1   matt #else
     82   1.1   matt /* Not yet used in 26-bit code */
     83   1.1   matt #endif
     84   1.1   matt 
     85   1.1   matt #if defined (TPIDRPRW_IS_CURCPU)
     86   1.1   matt #define GET_CURCPU(rX)		mrc	p15, 0, rX, c13, c0, 4
     87   1.1   matt #define GET_CURLWP(rX)		GET_CURCPU(rX); ldr rX, [rX, #CI_CURLWP]
     88   1.1   matt #elif defined (TPIDRPRW_IS_CURLWP)
     89   1.1   matt #define GET_CURLWP(rX)		mrc	p15, 0, rX, c13, c0, 4
     90   1.1   matt #define GET_CURCPU(rX)		GET_CURLWP(rX); ldr rX, [rX, #L_CPU]
     91   1.1   matt #elif !defined(MULTIPROCESSOR)
     92   1.1   matt #define GET_CURCPU(rX)		ldr rX, =_C_LABEL(cpu_info_store)
     93   1.1   matt #define GET_CURLWP(rX)		GET_CURCPU(rX); ldr rX, [rX, #CI_CURLWP]
     94   1.1   matt #endif
     95   1.1   matt #define GET_CURPCB(rX)		GET_CURLWP(rX); ldr rX, [rX, #L_PCB]
     96   1.1   matt 
     97   1.1   matt #else /* !_LOCORE */
     98   1.1   matt 
     99   1.3   matt #include <arm/cpufunc.h>
    100   1.3   matt 
    101   1.1   matt #ifdef __PROG32
    102   1.1   matt #define IRQdisable __set_cpsr_c(I32_bit, I32_bit);
    103   1.1   matt #define IRQenable __set_cpsr_c(I32_bit, 0);
    104   1.1   matt #else
    105   1.1   matt #define IRQdisable set_r15(R15_IRQ_DISABLE, R15_IRQ_DISABLE);
    106   1.1   matt #define IRQenable set_r15(R15_IRQ_DISABLE, 0);
    107   1.1   matt #endif
    108   1.1   matt 
    109   1.1   matt /*
    110   1.1   matt  * Validate a PC or PSR for a user process.  Used by various system calls
    111   1.1   matt  * that take a context passed by the user and restore it.
    112   1.1   matt  */
    113   1.1   matt 
    114   1.1   matt #ifdef __PROG32
    115   1.1   matt #define VALID_R15_PSR(r15,psr)						\
    116   1.1   matt 	(((psr) & PSR_MODE) == PSR_USR32_MODE &&			\
    117   1.1   matt 		((psr) & (I32_bit | F32_bit)) == 0)
    118   1.1   matt #else
    119   1.1   matt #define VALID_R15_PSR(r15,psr)						\
    120   1.1   matt 	(((r15) & R15_MODE) == R15_MODE_USR &&				\
    121   1.1   matt 		((r15) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE)) == 0)
    122   1.1   matt #endif
    123   1.1   matt 
    124   1.1   matt 
    125   1.1   matt 
    126   1.1   matt /* The address of the vector page. */
    127   1.1   matt extern vaddr_t vector_page;
    128   1.1   matt #ifdef __PROG32
    129   1.1   matt void	arm32_vector_init(vaddr_t, int);
    130   1.1   matt 
    131   1.1   matt #define	ARM_VEC_RESET			(1 << 0)
    132   1.1   matt #define	ARM_VEC_UNDEFINED		(1 << 1)
    133   1.1   matt #define	ARM_VEC_SWI			(1 << 2)
    134   1.1   matt #define	ARM_VEC_PREFETCH_ABORT		(1 << 3)
    135   1.1   matt #define	ARM_VEC_DATA_ABORT		(1 << 4)
    136   1.1   matt #define	ARM_VEC_ADDRESS_EXCEPTION	(1 << 5)
    137   1.1   matt #define	ARM_VEC_IRQ			(1 << 6)
    138   1.1   matt #define	ARM_VEC_FIQ			(1 << 7)
    139   1.1   matt 
    140   1.1   matt #define	ARM_NVEC			8
    141   1.1   matt #define	ARM_VEC_ALL			0xffffffff
    142   1.1   matt #endif /* __PROG32 */
    143   1.1   matt 
    144   1.1   matt #ifndef acorn26
    145   1.1   matt /*
    146   1.1   matt  * cpu device glue (belongs in cpuvar.h)
    147   1.1   matt  */
    148   1.1   matt void	cpu_attach(device_t, cpuid_t);
    149   1.1   matt #endif
    150   1.1   matt 
    151   1.2   matt /* 1 == use cpu_sleep(), 0 == don't */
    152  1.14   matt extern int cpu_do_powersave;
    153   1.7   matt extern int cpu_printfataltraps;
    154   1.2   matt extern int cpu_fpu_present;
    155   1.5   matt extern int cpu_hwdiv_present;
    156  1.14   matt extern int cpu_neon_present;
    157  1.14   matt extern int cpu_simd_present;
    158  1.14   matt extern int cpu_simdex_present;
    159  1.14   matt extern int cpu_umull_present;
    160  1.14   matt extern int cpu_synchprim_present;
    161  1.14   matt 
    162  1.14   matt extern int cpu_instruction_set_attributes[6];
    163  1.14   matt extern int cpu_memory_model_features[4];
    164  1.14   matt extern int cpu_processor_features[2];
    165  1.14   matt extern int cpu_media_and_vfp_features[2];
    166   1.2   matt 
    167  1.16   matt extern bool arm_has_tlbiasid_p;
    168  1.16   matt #ifdef MULTIPROCESSOR
    169  1.13   matt extern u_int arm_cpu_max;
    170  1.16   matt extern volatile u_int arm_cpu_hatched;
    171  1.16   matt #endif
    172  1.13   matt 
    173   1.2   matt #if !defined(CPU_ARMV7)
    174   1.2   matt #define	CPU_IS_ARMV7_P()		false
    175   1.2   matt #elif defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6)
    176   1.2   matt extern bool cpu_armv7_p;
    177   1.2   matt #define	CPU_IS_ARMV7_P()		(cpu_armv7_p)
    178   1.2   matt #else
    179   1.2   matt #define	CPU_IS_ARMV7_P()		true
    180   1.2   matt #endif
    181   1.6   matt #if !defined(CPU_ARMV6)
    182   1.6   matt #define	CPU_IS_ARMV6_P()		false
    183   1.6   matt #elif defined(CPU_ARMV7) || defined(CPU_PRE_ARMV6)
    184   1.6   matt extern bool cpu_armv6_p;
    185   1.6   matt #define	CPU_IS_ARMV6_P()		(cpu_armv6_p)
    186   1.6   matt #else
    187   1.6   matt #define	CPU_IS_ARMV6_P()		true
    188   1.6   matt #endif
    189   1.6   matt 
    190   1.8   matt /*
    191  1.12  joerg  * Used by the fault code to read the current instruction.
    192   1.8   matt  */
    193   1.8   matt static inline uint32_t
    194   1.8   matt read_insn(vaddr_t va, bool user_p)
    195   1.8   matt {
    196   1.8   matt 	uint32_t insn;
    197   1.8   matt 	if (user_p) {
    198   1.8   matt 		__asm __volatile("ldrt %0, [%1]" : "=&r"(insn) : "r"(va));
    199   1.8   matt 	} else {
    200   1.8   matt 		insn = *(const uint32_t *)va;
    201   1.8   matt 	}
    202   1.8   matt #if defined(__ARMEB__) && defined(_ARM_ARCH_7)
    203   1.8   matt 	insn = bswap32(insn);
    204   1.8   matt #endif
    205   1.8   matt 	return insn;
    206   1.8   matt }
    207   1.8   matt 
    208   1.8   matt /*
    209  1.12  joerg  * Used by the fault code to read the current thumb instruction.
    210   1.8   matt  */
    211   1.8   matt static inline uint32_t
    212   1.8   matt read_thumb_insn(vaddr_t va, bool user_p)
    213   1.8   matt {
    214   1.8   matt 	va &= ~1;
    215   1.8   matt 	uint32_t insn;
    216   1.8   matt 	if (user_p) {
    217   1.9   matt #ifdef _ARM_ARCH_T2
    218  1.10  joerg 		__asm __volatile("ldrht %0, [%1], #0" : "=&r"(insn) : "r"(va));
    219   1.9   matt #else
    220   1.9   matt 		__asm __volatile("ldrt %0, [%1]" : "=&r"(insn) : "r"(va & ~3));
    221   1.9   matt #ifdef __ARMEB__
    222   1.9   matt 		insn = (uint16_t) (insn >> (((va ^ 2) & 2) << 3));
    223   1.9   matt #else
    224   1.9   matt 		insn = (uint16_t) (insn >> ((va & 2) << 3));
    225   1.9   matt #endif
    226   1.9   matt #endif
    227   1.8   matt 	} else {
    228   1.8   matt 		insn = *(const uint16_t *)va;
    229   1.8   matt 	}
    230   1.8   matt #if defined(__ARMEB__) && defined(_ARM_ARCH_7)
    231   1.8   matt 	insn = bswap16(insn);
    232   1.8   matt #endif
    233   1.8   matt 	return insn;
    234   1.8   matt }
    235   1.2   matt 
    236  1.16   matt static inline void
    237  1.16   matt arm_dmb(void)
    238  1.16   matt {
    239  1.16   matt 	if (CPU_IS_ARMV6_P())
    240  1.16   matt 		armreg_dmb_write(0);
    241  1.16   matt 	else if (CPU_IS_ARMV7_P())
    242  1.16   matt 		__asm __volatile("dmb");
    243  1.16   matt }
    244  1.16   matt 
    245  1.16   matt static inline void
    246  1.16   matt arm_dsb(void)
    247  1.16   matt {
    248  1.16   matt 	if (CPU_IS_ARMV6_P())
    249  1.16   matt 		armreg_dsb_write(0);
    250  1.16   matt 	else if (CPU_IS_ARMV7_P())
    251  1.16   matt 		__asm __volatile("dsb");
    252  1.16   matt }
    253  1.16   matt 
    254  1.16   matt static inline void
    255  1.16   matt arm_isb(void)
    256  1.16   matt {
    257  1.16   matt 	if (CPU_IS_ARMV6_P())
    258  1.16   matt 		armreg_isb_write(0);
    259  1.16   matt 	else if (CPU_IS_ARMV7_P())
    260  1.16   matt 		__asm __volatile("isb");
    261  1.16   matt }
    262  1.16   matt 
    263   1.1   matt /*
    264   1.1   matt  * Random cruft
    265   1.1   matt  */
    266   1.1   matt 
    267   1.1   matt struct lwp;
    268   1.1   matt 
    269  1.16   matt /* cpu.c */
    270  1.16   matt void	identify_arm_cpu(device_t, struct cpu_info *);
    271   1.1   matt 
    272   1.1   matt /* cpuswitch.S */
    273   1.1   matt struct pcb;
    274   1.1   matt void	savectx(struct pcb *);
    275   1.1   matt 
    276   1.1   matt /* ast.c */
    277   1.1   matt void	userret(struct lwp *);
    278   1.1   matt 
    279   1.1   matt /* *_machdep.c */
    280   1.1   matt void	bootsync(void);
    281   1.1   matt 
    282   1.1   matt /* fault.c */
    283   1.1   matt int	badaddr_read(void *, size_t, void *);
    284   1.1   matt 
    285   1.1   matt /* syscall.c */
    286   1.1   matt void	swi_handler(trapframe_t *);
    287   1.1   matt 
    288   1.1   matt /* arm_machdep.c */
    289   1.1   matt void	ucas_ras_check(trapframe_t *);
    290   1.1   matt 
    291   1.1   matt /* vfp_init.c */
    292  1.16   matt void	vfp_attach(struct cpu_info *);
    293   1.4   matt void	vfp_discardcontext(bool);
    294   1.1   matt void	vfp_savecontext(void);
    295   1.1   matt void	vfp_kernel_acquire(void);
    296   1.1   matt void	vfp_kernel_release(void);
    297   1.4   matt bool	vfp_used_p(void);
    298   1.1   matt extern const pcu_ops_t arm_vfp_ops;
    299   1.1   matt 
    300   1.1   matt #endif	/* !_LOCORE */
    301   1.1   matt 
    302   1.1   matt #endif /* !_ARM_LOCORE_H_ */
    303