cpu.h revision 1.70 1 1.54 matt /* cpu.h,v 1.45.4.7 2008/01/28 18:20:39 matt Exp */
2 1.1 reinoud
3 1.1 reinoud /*
4 1.1 reinoud * Copyright (c) 1994-1996 Mark Brinicombe.
5 1.1 reinoud * Copyright (c) 1994 Brini.
6 1.1 reinoud * All rights reserved.
7 1.1 reinoud *
8 1.1 reinoud * This code is derived from software written for Brini by Mark Brinicombe
9 1.1 reinoud *
10 1.1 reinoud * Redistribution and use in source and binary forms, with or without
11 1.1 reinoud * modification, are permitted provided that the following conditions
12 1.1 reinoud * are met:
13 1.1 reinoud * 1. Redistributions of source code must retain the above copyright
14 1.1 reinoud * notice, this list of conditions and the following disclaimer.
15 1.1 reinoud * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 reinoud * notice, this list of conditions and the following disclaimer in the
17 1.1 reinoud * documentation and/or other materials provided with the distribution.
18 1.1 reinoud * 3. All advertising materials mentioning features or use of this software
19 1.1 reinoud * must display the following acknowledgement:
20 1.1 reinoud * This product includes software developed by Brini.
21 1.1 reinoud * 4. The name of the company nor the name of the author may be used to
22 1.1 reinoud * endorse or promote products derived from this software without specific
23 1.1 reinoud * prior written permission.
24 1.1 reinoud *
25 1.1 reinoud * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 1.1 reinoud * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 1.1 reinoud * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28 1.1 reinoud * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
29 1.1 reinoud * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
30 1.1 reinoud * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
31 1.1 reinoud * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 1.1 reinoud * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 1.1 reinoud * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 1.1 reinoud * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 1.1 reinoud * SUCH DAMAGE.
36 1.1 reinoud *
37 1.1 reinoud * RiscBSD kernel project
38 1.1 reinoud *
39 1.1 reinoud * cpu.h
40 1.1 reinoud *
41 1.1 reinoud * CPU specific symbols
42 1.1 reinoud *
43 1.1 reinoud * Created : 18/09/94
44 1.1 reinoud *
45 1.1 reinoud * Based on kate/katelib/arm6.h
46 1.1 reinoud */
47 1.1 reinoud
48 1.11 bjh21 #ifndef _ARM_CPU_H_
49 1.11 bjh21 #define _ARM_CPU_H_
50 1.1 reinoud
51 1.8 bjh21 /*
52 1.8 bjh21 * User-visible definitions
53 1.8 bjh21 */
54 1.8 bjh21
55 1.8 bjh21 /* CTL_MACHDEP definitions. */
56 1.8 bjh21 #define CPU_DEBUG 1 /* int: misc kernel debug control */
57 1.8 bjh21 #define CPU_BOOTED_DEVICE 2 /* string: device we booted from */
58 1.8 bjh21 #define CPU_BOOTED_KERNEL 3 /* string: kernel we booted */
59 1.8 bjh21 #define CPU_CONSDEV 4 /* struct: dev_t of our console */
60 1.29 thorpej #define CPU_POWERSAVE 5 /* int: use CPU powersave mode */
61 1.29 thorpej #define CPU_MAXID 6 /* number of valid machdep ids */
62 1.8 bjh21
63 1.63 christos #if defined(_KERNEL) || defined(_KMEMUSER)
64 1.8 bjh21
65 1.8 bjh21 /*
66 1.8 bjh21 * Kernel-only definitions
67 1.8 bjh21 */
68 1.8 bjh21
69 1.63 christos #if !defined(_LKM) && defined(_KERNEL_OPT)
70 1.34 martin #include "opt_multiprocessor.h"
71 1.54 matt #include "opt_cpuoptions.h"
72 1.1 reinoud #include "opt_lockdebug.h"
73 1.53 rearnsha #include "opt_cputypes.h"
74 1.63 christos #endif /* !_LKM && _KERNEL_OPT */
75 1.8 bjh21
76 1.26 thorpej #include <arm/cpuconf.h>
77 1.8 bjh21
78 1.8 bjh21 #ifndef _LOCORE
79 1.8 bjh21 #include <machine/frame.h>
80 1.8 bjh21 #endif /* !_LOCORE */
81 1.8 bjh21
82 1.7 bjh21 #include <arm/armreg.h>
83 1.29 thorpej
84 1.53 rearnsha
85 1.29 thorpej #ifndef _LOCORE
86 1.29 thorpej /* 1 == use cpu_sleep(), 0 == don't */
87 1.29 thorpej extern int cpu_do_powersave;
88 1.29 thorpej #endif
89 1.1 reinoud
90 1.1 reinoud #ifdef _LOCORE
91 1.57 bjh21
92 1.57 bjh21 #if defined(_ARM_ARCH_6)
93 1.54 matt #define IRQdisable cprid i
94 1.54 matt #define IRQenable cpsie i
95 1.57 bjh21 #elif defined(__PROG32)
96 1.1 reinoud #define IRQdisable \
97 1.1 reinoud stmfd sp!, {r0} ; \
98 1.28 briggs mrs r0, cpsr ; \
99 1.1 reinoud orr r0, r0, #(I32_bit) ; \
100 1.28 briggs msr cpsr_c, r0 ; \
101 1.1 reinoud ldmfd sp!, {r0}
102 1.1 reinoud
103 1.1 reinoud #define IRQenable \
104 1.1 reinoud stmfd sp!, {r0} ; \
105 1.28 briggs mrs r0, cpsr ; \
106 1.1 reinoud bic r0, r0, #(I32_bit) ; \
107 1.28 briggs msr cpsr_c, r0 ; \
108 1.1 reinoud ldmfd sp!, {r0}
109 1.57 bjh21 #else
110 1.57 bjh21 /* Not yet used in 26-bit code */
111 1.57 bjh21 #endif
112 1.54 matt
113 1.69 matt #if defined (TPIDRPRW_IS_CURCPU)
114 1.54 matt #define GET_CURCPU(rX) mrc p15, 0, rX, c13, c0, 4
115 1.54 matt #define GET_CURLWP(rX) GET_CURCPU(rX); ldr rX, [rX, #CI_CURLWP]
116 1.69 matt #elif defined (TPIDRPRW_IS_CURLWP)
117 1.54 matt #define GET_CURLWP(rX) mrc p15, 0, rX, c13, c0, 4
118 1.54 matt #define GET_CURCPU(rX) GET_CURLWP(rX); ldr rX, [rX, #L_CPU]
119 1.57 bjh21 #elif !defined(MULTIPROCESSOR)
120 1.57 bjh21 #define GET_CURCPU(rX) ldr rX, =_C_LABEL(cpu_info_store)
121 1.57 bjh21 #define GET_CURLWP(rX) GET_CURCPU(rX); ldr rX, [rX, #CI_CURLWP]
122 1.54 matt #endif
123 1.67 matt #define GET_CURPCB(rX) GET_CURLWP(rX); ldr rX, [rX, #L_PCB]
124 1.1 reinoud
125 1.57 bjh21 #else /* !_LOCORE */
126 1.57 bjh21
127 1.57 bjh21 #ifdef __PROG32
128 1.28 briggs #define IRQdisable __set_cpsr_c(I32_bit, I32_bit);
129 1.28 briggs #define IRQenable __set_cpsr_c(I32_bit, 0);
130 1.44 skrll #else
131 1.44 skrll #define IRQdisable set_r15(R15_IRQ_DISABLE, R15_IRQ_DISABLE);
132 1.44 skrll #define IRQenable set_r15(R15_IRQ_DISABLE, 0);
133 1.8 bjh21 #endif
134 1.1 reinoud
135 1.57 bjh21 #endif /* !_LOCORE */
136 1.57 bjh21
137 1.11 bjh21 #ifndef _LOCORE
138 1.11 bjh21
139 1.8 bjh21 /* All the CLKF_* macros take a struct clockframe * as an argument. */
140 1.8 bjh21
141 1.1 reinoud /*
142 1.11 bjh21 * CLKF_USERMODE: Return TRUE/FALSE (1/0) depending on whether the
143 1.11 bjh21 * frame came from USR mode or not.
144 1.1 reinoud */
145 1.17 thorpej #ifdef __PROG32
146 1.65 skrll #define CLKF_USERMODE(frame) ((frame->cf_tf.tf_spsr & PSR_MODE) == PSR_USR32_MODE)
147 1.11 bjh21 #else
148 1.37 cube #define CLKF_USERMODE(frame) ((frame->cf_if.if_r15 & R15_MODE) == R15_MODE_USR)
149 1.11 bjh21 #endif
150 1.1 reinoud
151 1.1 reinoud /*
152 1.11 bjh21 * CLKF_INTR: True if we took the interrupt from inside another
153 1.11 bjh21 * interrupt handler.
154 1.11 bjh21 */
155 1.17 thorpej #ifdef __PROG32
156 1.1 reinoud /* Hack to treat FPE time as interrupt time so we can measure it */
157 1.11 bjh21 #define CLKF_INTR(frame) \
158 1.54 matt ((curcpu()->ci_intr_depth > 1) || \
159 1.65 skrll (frame->cf_tf.tf_spsr & PSR_MODE) == PSR_UND32_MODE)
160 1.11 bjh21 #else
161 1.54 matt #define CLKF_INTR(frame) (curcpu()->ci_intr_depth > 1)
162 1.11 bjh21 #endif
163 1.1 reinoud
164 1.11 bjh21 /*
165 1.11 bjh21 * CLKF_PC: Extract the program counter from a clockframe
166 1.11 bjh21 */
167 1.17 thorpej #ifdef __PROG32
168 1.65 skrll #define CLKF_PC(frame) (frame->cf_tf.tf_pc)
169 1.11 bjh21 #else
170 1.37 cube #define CLKF_PC(frame) (frame->cf_if.if_r15 & R15_PC)
171 1.11 bjh21 #endif
172 1.8 bjh21
173 1.11 bjh21 /*
174 1.33 thorpej * LWP_PC: Find out the program counter for the given lwp.
175 1.11 bjh21 */
176 1.17 thorpej #ifdef __PROG32
177 1.68 matt #define LWP_PC(l) (lwp_trapframe(l)->tf_pc)
178 1.11 bjh21 #else
179 1.68 matt #define LWP_PC(l) (lwp_trapframe(l)->tf_r15 & R15_PC)
180 1.11 bjh21 #endif
181 1.8 bjh21
182 1.40 bjh21 /*
183 1.40 bjh21 * Validate a PC or PSR for a user process. Used by various system calls
184 1.40 bjh21 * that take a context passed by the user and restore it.
185 1.40 bjh21 */
186 1.40 bjh21
187 1.40 bjh21 #ifdef __PROG32
188 1.40 bjh21 #define VALID_R15_PSR(r15,psr) \
189 1.40 bjh21 (((psr) & PSR_MODE) == PSR_USR32_MODE && \
190 1.40 bjh21 ((psr) & (I32_bit | F32_bit)) == 0)
191 1.40 bjh21 #else
192 1.40 bjh21 #define VALID_R15_PSR(r15,psr) \
193 1.40 bjh21 (((r15) & R15_MODE) == R15_MODE_USR && \
194 1.40 bjh21 ((r15) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE)) == 0)
195 1.40 bjh21 #endif
196 1.40 bjh21
197 1.40 bjh21
198 1.40 bjh21
199 1.25 thorpej /* The address of the vector page. */
200 1.25 thorpej extern vaddr_t vector_page;
201 1.25 thorpej #ifdef __PROG32
202 1.25 thorpej void arm32_vector_init(vaddr_t, int);
203 1.25 thorpej
204 1.25 thorpej #define ARM_VEC_RESET (1 << 0)
205 1.25 thorpej #define ARM_VEC_UNDEFINED (1 << 1)
206 1.25 thorpej #define ARM_VEC_SWI (1 << 2)
207 1.25 thorpej #define ARM_VEC_PREFETCH_ABORT (1 << 3)
208 1.25 thorpej #define ARM_VEC_DATA_ABORT (1 << 4)
209 1.25 thorpej #define ARM_VEC_ADDRESS_EXCEPTION (1 << 5)
210 1.25 thorpej #define ARM_VEC_IRQ (1 << 6)
211 1.25 thorpej #define ARM_VEC_FIQ (1 << 7)
212 1.25 thorpej
213 1.25 thorpej #define ARM_NVEC 8
214 1.25 thorpej #define ARM_VEC_ALL 0xffffffff
215 1.25 thorpej #endif
216 1.8 bjh21
217 1.1 reinoud /*
218 1.11 bjh21 * Per-CPU information. For now we assume one CPU.
219 1.1 reinoud */
220 1.54 matt static inline int curcpl(void);
221 1.54 matt static inline void set_curcpl(int);
222 1.54 matt #ifdef __HAVE_FAST_SOFTINTS
223 1.54 matt static inline void cpu_dosoftints(void);
224 1.54 matt #endif
225 1.1 reinoud
226 1.62 uebayasi #include <sys/device_if.h>
227 1.61 uebayasi #include <sys/evcnt.h>
228 1.36 yamt #include <sys/cpu_data.h>
229 1.1 reinoud struct cpu_info {
230 1.36 yamt struct cpu_data ci_data; /* MI per-cpu data */
231 1.67 matt device_t ci_dev; /* Device corresponding to this CPU */
232 1.45 ad cpuid_t ci_cpuid;
233 1.67 matt uint32_t ci_arm_cpuid; /* aggregate CPU id */
234 1.67 matt uint32_t ci_arm_cputype; /* CPU type */
235 1.67 matt uint32_t ci_arm_cpurev; /* CPU revision */
236 1.67 matt uint32_t ci_ctrl; /* The CPU control register */
237 1.54 matt int ci_cpl; /* current processor level (spl) */
238 1.54 matt int ci_astpending; /* */
239 1.54 matt int ci_want_resched; /* resched() was called */
240 1.54 matt int ci_intr_depth; /* */
241 1.67 matt struct cpu_softc *ci_softc; /* platform softc */
242 1.54 matt #ifdef __HAVE_FAST_SOFTINTS
243 1.54 matt lwp_t *ci_softlwps[SOFTINT_COUNT];
244 1.64 skrll volatile uint32_t ci_softints;
245 1.54 matt #endif
246 1.54 matt lwp_t *ci_curlwp; /* current lwp */
247 1.54 matt #ifdef _ARM_ARCH_6
248 1.54 matt uint32_t ci_ccnt_freq; /* cycle count frequency */
249 1.54 matt #endif
250 1.20 bjh21 struct evcnt ci_arm700bugcount;
251 1.42 matt int32_t ci_mtx_count;
252 1.42 matt int ci_mtx_oldspl;
253 1.66 matt uint32_t ci_vfp_id;
254 1.30 bjh21 #ifdef MULTIPROCESSOR
255 1.30 bjh21 MP_CPU_INFO_MEMBERS
256 1.30 bjh21 #endif
257 1.1 reinoud };
258 1.11 bjh21
259 1.30 bjh21 #ifndef MULTIPROCESSOR
260 1.1 reinoud extern struct cpu_info cpu_info_store;
261 1.69 matt #if defined(TPIDRPRW_IS_CURLWP)
262 1.54 matt static inline struct lwp *
263 1.54 matt _curlwp(void)
264 1.54 matt {
265 1.54 matt struct lwp *l;
266 1.54 matt __asm("mrc\tp15, 0, %0, c13, c0, 4" : "=r"(l));
267 1.54 matt return l;
268 1.54 matt }
269 1.54 matt
270 1.54 matt static inline void
271 1.54 matt _curlwp_set(struct lwp *l)
272 1.54 matt {
273 1.54 matt __asm("mcr\tp15, 0, %0, c13, c0, 4" : "=r"(l));
274 1.54 matt }
275 1.54 matt
276 1.54 matt #define curlwp (_curlwp())
277 1.54 matt static inline struct cpu_info *
278 1.54 matt curcpu(void)
279 1.54 matt {
280 1.54 matt return curlwp->l_cpu;
281 1.54 matt }
282 1.69 matt #elif defined(TPIDRPRW_IS_CURCPU)
283 1.54 matt static inline struct cpu_info *
284 1.54 matt curcpu(void)
285 1.54 matt {
286 1.54 matt struct cpu_info *ci;
287 1.54 matt __asm("mrc\tp15, 0, %0, c13, c0, 4" : "=r"(ci));
288 1.54 matt return ci;
289 1.54 matt }
290 1.54 matt #else
291 1.1 reinoud #define curcpu() (&cpu_info_store)
292 1.69 matt #endif /* !TPIDRPRW_IS_CURCPU && !TPIDRPRW_IS_CURLWP */
293 1.54 matt #ifndef curlwp
294 1.54 matt #define curlwp (curcpu()->ci_curlwp)
295 1.54 matt #endif
296 1.11 bjh21 #define cpu_number() 0
297 1.54 matt #define LWP0_CPU_INFO (&cpu_info_store)
298 1.54 matt #endif /* !MULTIPROCESSOR */
299 1.54 matt
300 1.54 matt static inline int
301 1.54 matt curcpl(void)
302 1.54 matt {
303 1.54 matt return curcpu()->ci_cpl;
304 1.54 matt }
305 1.54 matt
306 1.54 matt static inline void
307 1.54 matt set_curcpl(int pri)
308 1.54 matt {
309 1.54 matt curcpu()->ci_cpl = pri;
310 1.54 matt }
311 1.54 matt
312 1.54 matt #ifdef __HAVE_FAST_SOFTINTS
313 1.54 matt void dosoftints(void);
314 1.54 matt static inline void
315 1.54 matt cpu_dosoftints(void)
316 1.54 matt {
317 1.56 matt struct cpu_info * const ci = curcpu();
318 1.56 matt if (ci->ci_intr_depth == 0 && (ci->ci_softints >> ci->ci_cpl) > 0)
319 1.54 matt dosoftints();
320 1.54 matt }
321 1.30 bjh21 #endif
322 1.11 bjh21
323 1.33 thorpej #ifdef __PROG32
324 1.33 thorpej void cpu_proc_fork(struct proc *, struct proc *);
325 1.33 thorpej #else
326 1.33 thorpej #define cpu_proc_fork(p1, p2)
327 1.33 thorpej #endif
328 1.11 bjh21
329 1.11 bjh21 /*
330 1.11 bjh21 * Scheduling glue
331 1.11 bjh21 */
332 1.11 bjh21
333 1.54 matt #define setsoftast() (curcpu()->ci_astpending = 1)
334 1.1 reinoud
335 1.1 reinoud /*
336 1.1 reinoud * Notify the current process (p) that it has a signal pending,
337 1.1 reinoud * process as soon as possible.
338 1.1 reinoud */
339 1.1 reinoud
340 1.42 matt #define cpu_signotify(l) setsoftast()
341 1.1 reinoud
342 1.1 reinoud /*
343 1.1 reinoud * Give a profiling tick to the current process when the user profiling
344 1.1 reinoud * buffer pages are invalid. On the i386, request an ast to send us
345 1.1 reinoud * through trap(), marking the proc as needing a profiling tick.
346 1.1 reinoud */
347 1.42 matt #define cpu_need_proftick(l) ((l)->l_pflag |= LP_OWEUPC, setsoftast())
348 1.1 reinoud
349 1.23 bjh21 #ifndef acorn26
350 1.11 bjh21 /*
351 1.11 bjh21 * cpu device glue (belongs in cpuvar.h)
352 1.11 bjh21 */
353 1.11 bjh21
354 1.11 bjh21 struct device;
355 1.70 matt void cpu_attach(device_t, cpuid_t);
356 1.11 bjh21 #endif
357 1.11 bjh21
358 1.11 bjh21 /*
359 1.11 bjh21 * Random cruft
360 1.11 bjh21 */
361 1.11 bjh21
362 1.33 thorpej struct lwp;
363 1.33 thorpej
364 1.1 reinoud /* locore.S */
365 1.43 yamt void atomic_set_bit(u_int *, u_int);
366 1.43 yamt void atomic_clear_bit(u_int *, u_int);
367 1.1 reinoud
368 1.1 reinoud /* cpuswitch.S */
369 1.1 reinoud struct pcb;
370 1.43 yamt void savectx(struct pcb *);
371 1.1 reinoud
372 1.1 reinoud /* ast.c */
373 1.43 yamt void userret(register struct lwp *);
374 1.1 reinoud
375 1.60 chs /* *_machdep.c */
376 1.43 yamt void bootsync(void);
377 1.16 thorpej
378 1.16 thorpej /* fault.c */
379 1.43 yamt int badaddr_read(void *, size_t, void *);
380 1.19 thorpej
381 1.19 thorpej /* syscall.c */
382 1.43 yamt void swi_handler(trapframe_t *);
383 1.1 reinoud
384 1.60 chs /* arm_machdep.c */
385 1.60 chs void ucas_ras_check(trapframe_t *);
386 1.60 chs
387 1.66 matt /* vfp_init.c */
388 1.66 matt void vfp_attach(void);
389 1.66 matt void vfp_discardcontext(void);
390 1.66 matt void vfp_savecontext(void);
391 1.66 matt extern const pcu_ops_t arm_vfp_ops;
392 1.66 matt
393 1.8 bjh21 #endif /* !_LOCORE */
394 1.1 reinoud
395 1.8 bjh21 #endif /* _KERNEL */
396 1.1 reinoud
397 1.11 bjh21 #endif /* !_ARM_CPU_H_ */
398