locore.h revision 1.30 1 1.30 ryo /* $NetBSD: locore.h,v 1.30 2018/04/01 04:35:04 ryo Exp $ */
2 1.1 matt
3 1.1 matt /*
4 1.1 matt * Copyright (c) 1994-1996 Mark Brinicombe.
5 1.1 matt * Copyright (c) 1994 Brini.
6 1.1 matt * All rights reserved.
7 1.1 matt *
8 1.1 matt * This code is derived from software written for Brini by Mark Brinicombe
9 1.1 matt *
10 1.1 matt * Redistribution and use in source and binary forms, with or without
11 1.1 matt * modification, are permitted provided that the following conditions
12 1.1 matt * are met:
13 1.1 matt * 1. Redistributions of source code must retain the above copyright
14 1.1 matt * notice, this list of conditions and the following disclaimer.
15 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 matt * notice, this list of conditions and the following disclaimer in the
17 1.1 matt * documentation and/or other materials provided with the distribution.
18 1.1 matt * 3. All advertising materials mentioning features or use of this software
19 1.1 matt * must display the following acknowledgement:
20 1.1 matt * This product includes software developed by Brini.
21 1.1 matt * 4. The name of the company nor the name of the author may be used to
22 1.1 matt * endorse or promote products derived from this software without specific
23 1.1 matt * prior written permission.
24 1.1 matt *
25 1.1 matt * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 1.1 matt * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 1.1 matt * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28 1.1 matt * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
29 1.1 matt * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
30 1.1 matt * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
31 1.1 matt * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 1.1 matt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 1.1 matt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 1.1 matt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 1.1 matt * SUCH DAMAGE.
36 1.1 matt *
37 1.1 matt * RiscBSD kernel project
38 1.1 matt *
39 1.1 matt * cpu.h
40 1.1 matt *
41 1.1 matt * CPU specific symbols
42 1.1 matt *
43 1.1 matt * Created : 18/09/94
44 1.1 matt *
45 1.1 matt * Based on kate/katelib/arm6.h
46 1.1 matt */
47 1.1 matt
48 1.1 matt #ifndef _ARM_LOCORE_H_
49 1.1 matt #define _ARM_LOCORE_H_
50 1.1 matt
51 1.30 ryo #ifdef __arm__
52 1.30 ryo
53 1.2 matt #ifdef _KERNEL_OPT
54 1.2 matt #include "opt_cpuoptions.h"
55 1.2 matt #include "opt_cputypes.h"
56 1.15 matt #include "opt_arm_debug.h"
57 1.2 matt #endif
58 1.2 matt
59 1.21 matt #include <sys/pcu.h>
60 1.21 matt
61 1.1 matt #include <arm/cpuconf.h>
62 1.1 matt #include <arm/armreg.h>
63 1.1 matt
64 1.1 matt #include <machine/frame.h>
65 1.1 matt
66 1.1 matt #ifdef _LOCORE
67 1.1 matt
68 1.1 matt #if defined(_ARM_ARCH_6)
69 1.1 matt #define IRQdisable cpsid i
70 1.1 matt #define IRQenable cpsie i
71 1.29 skrll #else
72 1.1 matt #define IRQdisable \
73 1.1 matt stmfd sp!, {r0} ; \
74 1.1 matt mrs r0, cpsr ; \
75 1.1 matt orr r0, r0, #(I32_bit) ; \
76 1.1 matt msr cpsr_c, r0 ; \
77 1.1 matt ldmfd sp!, {r0}
78 1.1 matt
79 1.1 matt #define IRQenable \
80 1.1 matt stmfd sp!, {r0} ; \
81 1.1 matt mrs r0, cpsr ; \
82 1.1 matt bic r0, r0, #(I32_bit) ; \
83 1.1 matt msr cpsr_c, r0 ; \
84 1.25 skrll ldmfd sp!, {r0}
85 1.1 matt #endif
86 1.1 matt
87 1.1 matt #if defined (TPIDRPRW_IS_CURCPU)
88 1.1 matt #define GET_CURCPU(rX) mrc p15, 0, rX, c13, c0, 4
89 1.1 matt #define GET_CURLWP(rX) GET_CURCPU(rX); ldr rX, [rX, #CI_CURLWP]
90 1.1 matt #elif defined (TPIDRPRW_IS_CURLWP)
91 1.1 matt #define GET_CURLWP(rX) mrc p15, 0, rX, c13, c0, 4
92 1.20 matt #if defined (MULTIPROCESSOR)
93 1.1 matt #define GET_CURCPU(rX) GET_CURLWP(rX); ldr rX, [rX, #L_CPU]
94 1.20 matt #elif defined(_ARM_ARCH_7)
95 1.20 matt #define GET_CURCPU(rX) movw rX, #:lower16:cpu_info_store; movt rX, #:upper16:cpu_info_store
96 1.25 skrll #else
97 1.20 matt #define GET_CURCPU(rX) ldr rX, =_C_LABEL(cpu_info_store)
98 1.20 matt #endif
99 1.1 matt #elif !defined(MULTIPROCESSOR)
100 1.1 matt #define GET_CURCPU(rX) ldr rX, =_C_LABEL(cpu_info_store)
101 1.1 matt #define GET_CURLWP(rX) GET_CURCPU(rX); ldr rX, [rX, #CI_CURLWP]
102 1.1 matt #endif
103 1.1 matt #define GET_CURPCB(rX) GET_CURLWP(rX); ldr rX, [rX, #L_PCB]
104 1.1 matt
105 1.1 matt #else /* !_LOCORE */
106 1.1 matt
107 1.3 matt #include <arm/cpufunc.h>
108 1.3 matt
109 1.1 matt #define IRQdisable __set_cpsr_c(I32_bit, I32_bit);
110 1.1 matt #define IRQenable __set_cpsr_c(I32_bit, 0);
111 1.1 matt
112 1.1 matt /*
113 1.1 matt * Validate a PC or PSR for a user process. Used by various system calls
114 1.1 matt * that take a context passed by the user and restore it.
115 1.1 matt */
116 1.1 matt
117 1.17 matt #ifdef __NO_FIQ
118 1.1 matt #define VALID_R15_PSR(r15,psr) \
119 1.17 matt (((psr) & PSR_MODE) == PSR_USR32_MODE && ((psr) & I32_bit) == 0)
120 1.17 matt #else
121 1.17 matt #define VALID_R15_PSR(r15,psr) \
122 1.17 matt (((psr) & PSR_MODE) == PSR_USR32_MODE && ((psr) & IF32_bits) == 0)
123 1.17 matt #endif
124 1.1 matt
125 1.24 skrll /*
126 1.26 skrll * Translation Table Base Register Share/Cache settings
127 1.26 skrll */
128 1.24 skrll #define TTBR_UPATTR (TTBR_S | TTBR_RGN_WBNWA | TTBR_C)
129 1.24 skrll #define TTBR_MPATTR (TTBR_S | TTBR_RGN_WBNWA /* | TTBR_NOS */ | TTBR_IRGN_WBNWA)
130 1.1 matt
131 1.1 matt /* The address of the vector page. */
132 1.1 matt extern vaddr_t vector_page;
133 1.1 matt void arm32_vector_init(vaddr_t, int);
134 1.1 matt
135 1.1 matt #define ARM_VEC_RESET (1 << 0)
136 1.1 matt #define ARM_VEC_UNDEFINED (1 << 1)
137 1.1 matt #define ARM_VEC_SWI (1 << 2)
138 1.1 matt #define ARM_VEC_PREFETCH_ABORT (1 << 3)
139 1.1 matt #define ARM_VEC_DATA_ABORT (1 << 4)
140 1.1 matt #define ARM_VEC_ADDRESS_EXCEPTION (1 << 5)
141 1.1 matt #define ARM_VEC_IRQ (1 << 6)
142 1.1 matt #define ARM_VEC_FIQ (1 << 7)
143 1.1 matt
144 1.1 matt #define ARM_NVEC 8
145 1.1 matt #define ARM_VEC_ALL 0xffffffff
146 1.1 matt
147 1.1 matt /*
148 1.1 matt * cpu device glue (belongs in cpuvar.h)
149 1.1 matt */
150 1.1 matt void cpu_attach(device_t, cpuid_t);
151 1.1 matt
152 1.2 matt /* 1 == use cpu_sleep(), 0 == don't */
153 1.14 matt extern int cpu_do_powersave;
154 1.7 matt extern int cpu_printfataltraps;
155 1.2 matt extern int cpu_fpu_present;
156 1.5 matt extern int cpu_hwdiv_present;
157 1.14 matt extern int cpu_neon_present;
158 1.14 matt extern int cpu_simd_present;
159 1.14 matt extern int cpu_simdex_present;
160 1.14 matt extern int cpu_umull_present;
161 1.14 matt extern int cpu_synchprim_present;
162 1.14 matt
163 1.14 matt extern int cpu_instruction_set_attributes[6];
164 1.14 matt extern int cpu_memory_model_features[4];
165 1.14 matt extern int cpu_processor_features[2];
166 1.14 matt extern int cpu_media_and_vfp_features[2];
167 1.2 matt
168 1.16 matt extern bool arm_has_tlbiasid_p;
169 1.28 jmcneill extern bool arm_has_mpext_p;
170 1.16 matt #ifdef MULTIPROCESSOR
171 1.13 matt extern u_int arm_cpu_max;
172 1.16 matt extern volatile u_int arm_cpu_hatched;
173 1.16 matt #endif
174 1.13 matt
175 1.2 matt #if !defined(CPU_ARMV7)
176 1.2 matt #define CPU_IS_ARMV7_P() false
177 1.2 matt #elif defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6)
178 1.2 matt extern bool cpu_armv7_p;
179 1.2 matt #define CPU_IS_ARMV7_P() (cpu_armv7_p)
180 1.2 matt #else
181 1.2 matt #define CPU_IS_ARMV7_P() true
182 1.2 matt #endif
183 1.6 matt #if !defined(CPU_ARMV6)
184 1.6 matt #define CPU_IS_ARMV6_P() false
185 1.6 matt #elif defined(CPU_ARMV7) || defined(CPU_PRE_ARMV6)
186 1.6 matt extern bool cpu_armv6_p;
187 1.6 matt #define CPU_IS_ARMV6_P() (cpu_armv6_p)
188 1.6 matt #else
189 1.6 matt #define CPU_IS_ARMV6_P() true
190 1.6 matt #endif
191 1.6 matt
192 1.8 matt /*
193 1.12 joerg * Used by the fault code to read the current instruction.
194 1.8 matt */
195 1.8 matt static inline uint32_t
196 1.8 matt read_insn(vaddr_t va, bool user_p)
197 1.8 matt {
198 1.8 matt uint32_t insn;
199 1.8 matt if (user_p) {
200 1.8 matt __asm __volatile("ldrt %0, [%1]" : "=&r"(insn) : "r"(va));
201 1.8 matt } else {
202 1.8 matt insn = *(const uint32_t *)va;
203 1.8 matt }
204 1.8 matt #if defined(__ARMEB__) && defined(_ARM_ARCH_7)
205 1.8 matt insn = bswap32(insn);
206 1.8 matt #endif
207 1.8 matt return insn;
208 1.8 matt }
209 1.8 matt
210 1.8 matt /*
211 1.12 joerg * Used by the fault code to read the current thumb instruction.
212 1.8 matt */
213 1.8 matt static inline uint32_t
214 1.8 matt read_thumb_insn(vaddr_t va, bool user_p)
215 1.8 matt {
216 1.8 matt va &= ~1;
217 1.8 matt uint32_t insn;
218 1.8 matt if (user_p) {
219 1.23 matt #if defined(__thumb__) && defined(_ARM_ARCH_T2)
220 1.23 matt __asm __volatile("ldrht %0, [%1, #0]" : "=&r"(insn) : "r"(va));
221 1.23 matt #elif defined(_ARM_ARCH_7)
222 1.23 matt __asm __volatile("ldrht %0, [%1], #0" : "=&r"(insn) : "r"(va));
223 1.9 matt #else
224 1.9 matt __asm __volatile("ldrt %0, [%1]" : "=&r"(insn) : "r"(va & ~3));
225 1.9 matt #ifdef __ARMEB__
226 1.9 matt insn = (uint16_t) (insn >> (((va ^ 2) & 2) << 3));
227 1.9 matt #else
228 1.9 matt insn = (uint16_t) (insn >> ((va & 2) << 3));
229 1.9 matt #endif
230 1.9 matt #endif
231 1.8 matt } else {
232 1.8 matt insn = *(const uint16_t *)va;
233 1.8 matt }
234 1.8 matt #if defined(__ARMEB__) && defined(_ARM_ARCH_7)
235 1.8 matt insn = bswap16(insn);
236 1.8 matt #endif
237 1.8 matt return insn;
238 1.8 matt }
239 1.2 matt
240 1.18 martin #ifndef _RUMPKERNEL
241 1.16 matt static inline void
242 1.16 matt arm_dmb(void)
243 1.16 matt {
244 1.16 matt if (CPU_IS_ARMV6_P())
245 1.16 matt armreg_dmb_write(0);
246 1.16 matt else if (CPU_IS_ARMV7_P())
247 1.19 joerg __asm __volatile("dmb" ::: "memory");
248 1.16 matt }
249 1.16 matt
250 1.16 matt static inline void
251 1.16 matt arm_dsb(void)
252 1.16 matt {
253 1.16 matt if (CPU_IS_ARMV6_P())
254 1.16 matt armreg_dsb_write(0);
255 1.16 matt else if (CPU_IS_ARMV7_P())
256 1.19 joerg __asm __volatile("dsb" ::: "memory");
257 1.16 matt }
258 1.16 matt
259 1.16 matt static inline void
260 1.16 matt arm_isb(void)
261 1.16 matt {
262 1.16 matt if (CPU_IS_ARMV6_P())
263 1.16 matt armreg_isb_write(0);
264 1.16 matt else if (CPU_IS_ARMV7_P())
265 1.19 joerg __asm __volatile("isb" ::: "memory");
266 1.16 matt }
267 1.18 martin #endif
268 1.16 matt
269 1.1 matt /*
270 1.1 matt * Random cruft
271 1.1 matt */
272 1.1 matt
273 1.1 matt struct lwp;
274 1.1 matt
275 1.16 matt /* cpu.c */
276 1.16 matt void identify_arm_cpu(device_t, struct cpu_info *);
277 1.1 matt
278 1.1 matt /* cpuswitch.S */
279 1.1 matt struct pcb;
280 1.1 matt void savectx(struct pcb *);
281 1.1 matt
282 1.1 matt /* ast.c */
283 1.1 matt void userret(struct lwp *);
284 1.1 matt
285 1.1 matt /* *_machdep.c */
286 1.1 matt void bootsync(void);
287 1.1 matt
288 1.1 matt /* fault.c */
289 1.1 matt int badaddr_read(void *, size_t, void *);
290 1.1 matt
291 1.1 matt /* syscall.c */
292 1.1 matt void swi_handler(trapframe_t *);
293 1.1 matt
294 1.1 matt /* arm_machdep.c */
295 1.1 matt void ucas_ras_check(trapframe_t *);
296 1.1 matt
297 1.1 matt /* vfp_init.c */
298 1.16 matt void vfp_attach(struct cpu_info *);
299 1.27 chs void vfp_discardcontext(lwp_t *, bool);
300 1.27 chs void vfp_savecontext(lwp_t *);
301 1.1 matt void vfp_kernel_acquire(void);
302 1.1 matt void vfp_kernel_release(void);
303 1.27 chs bool vfp_used_p(const lwp_t *);
304 1.1 matt extern const pcu_ops_t arm_vfp_ops;
305 1.1 matt
306 1.1 matt #endif /* !_LOCORE */
307 1.1 matt
308 1.30 ryo #elif defined(__aarch64__)
309 1.30 ryo
310 1.30 ryo #include <aarch64/locore.h>
311 1.30 ryo
312 1.30 ryo #endif /* __arm__/__aarch64__ */
313 1.30 ryo
314 1.1 matt #endif /* !_ARM_LOCORE_H_ */
315