cpu.h revision 1.87 1 1.54 matt /* cpu.h,v 1.45.4.7 2008/01/28 18:20:39 matt Exp */
2 1.1 reinoud
3 1.1 reinoud /*
4 1.1 reinoud * Copyright (c) 1994-1996 Mark Brinicombe.
5 1.1 reinoud * Copyright (c) 1994 Brini.
6 1.1 reinoud * All rights reserved.
7 1.1 reinoud *
8 1.1 reinoud * This code is derived from software written for Brini by Mark Brinicombe
9 1.1 reinoud *
10 1.1 reinoud * Redistribution and use in source and binary forms, with or without
11 1.1 reinoud * modification, are permitted provided that the following conditions
12 1.1 reinoud * are met:
13 1.1 reinoud * 1. Redistributions of source code must retain the above copyright
14 1.1 reinoud * notice, this list of conditions and the following disclaimer.
15 1.1 reinoud * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 reinoud * notice, this list of conditions and the following disclaimer in the
17 1.1 reinoud * documentation and/or other materials provided with the distribution.
18 1.1 reinoud * 3. All advertising materials mentioning features or use of this software
19 1.1 reinoud * must display the following acknowledgement:
20 1.1 reinoud * This product includes software developed by Brini.
21 1.1 reinoud * 4. The name of the company nor the name of the author may be used to
22 1.1 reinoud * endorse or promote products derived from this software without specific
23 1.1 reinoud * prior written permission.
24 1.1 reinoud *
25 1.1 reinoud * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 1.1 reinoud * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 1.1 reinoud * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28 1.1 reinoud * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
29 1.1 reinoud * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
30 1.1 reinoud * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
31 1.1 reinoud * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 1.1 reinoud * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 1.1 reinoud * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 1.1 reinoud * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 1.1 reinoud * SUCH DAMAGE.
36 1.1 reinoud *
37 1.1 reinoud * RiscBSD kernel project
38 1.1 reinoud *
39 1.1 reinoud * cpu.h
40 1.1 reinoud *
41 1.1 reinoud * CPU specific symbols
42 1.1 reinoud *
43 1.1 reinoud * Created : 18/09/94
44 1.1 reinoud *
45 1.1 reinoud * Based on kate/katelib/arm6.h
46 1.1 reinoud */
47 1.1 reinoud
48 1.11 bjh21 #ifndef _ARM_CPU_H_
49 1.11 bjh21 #define _ARM_CPU_H_
50 1.1 reinoud
51 1.8 bjh21 /*
52 1.8 bjh21 * User-visible definitions
53 1.8 bjh21 */
54 1.8 bjh21
55 1.8 bjh21 /* CTL_MACHDEP definitions. */
56 1.8 bjh21 #define CPU_DEBUG 1 /* int: misc kernel debug control */
57 1.8 bjh21 #define CPU_BOOTED_DEVICE 2 /* string: device we booted from */
58 1.8 bjh21 #define CPU_BOOTED_KERNEL 3 /* string: kernel we booted */
59 1.8 bjh21 #define CPU_CONSDEV 4 /* struct: dev_t of our console */
60 1.29 thorpej #define CPU_POWERSAVE 5 /* int: use CPU powersave mode */
61 1.29 thorpej #define CPU_MAXID 6 /* number of valid machdep ids */
62 1.8 bjh21
63 1.63 christos #if defined(_KERNEL) || defined(_KMEMUSER)
64 1.8 bjh21
65 1.8 bjh21 /*
66 1.8 bjh21 * Kernel-only definitions
67 1.8 bjh21 */
68 1.8 bjh21
69 1.76 matt #if !defined(_MODULE) && defined(_KERNEL_OPT)
70 1.34 martin #include "opt_multiprocessor.h"
71 1.54 matt #include "opt_cpuoptions.h"
72 1.1 reinoud #include "opt_lockdebug.h"
73 1.53 rearnsha #include "opt_cputypes.h"
74 1.76 matt #endif /* !_MODULE && _KERNEL_OPT */
75 1.53 rearnsha
76 1.29 thorpej #ifndef _LOCORE
77 1.79 matt #if defined(TPIDRPRW_IS_CURLWP) || defined(TPIDRPRW_IS_CURCPU)
78 1.79 matt #include <arm/armreg.h>
79 1.79 matt #endif
80 1.79 matt
81 1.29 thorpej /* 1 == use cpu_sleep(), 0 == don't */
82 1.29 thorpej extern int cpu_do_powersave;
83 1.75 matt extern int cpu_fpu_present;
84 1.11 bjh21
85 1.8 bjh21 /* All the CLKF_* macros take a struct clockframe * as an argument. */
86 1.8 bjh21
87 1.1 reinoud /*
88 1.11 bjh21 * CLKF_USERMODE: Return TRUE/FALSE (1/0) depending on whether the
89 1.11 bjh21 * frame came from USR mode or not.
90 1.1 reinoud */
91 1.17 thorpej #ifdef __PROG32
92 1.76 matt #define CLKF_USERMODE(cf) (((cf)->cf_tf.tf_spsr & PSR_MODE) == PSR_USR32_MODE)
93 1.11 bjh21 #else
94 1.76 matt #define CLKF_USERMODE(cf) (((cf)->cf_if.if_r15 & R15_MODE) == R15_MODE_USR)
95 1.11 bjh21 #endif
96 1.1 reinoud
97 1.1 reinoud /*
98 1.11 bjh21 * CLKF_INTR: True if we took the interrupt from inside another
99 1.11 bjh21 * interrupt handler.
100 1.11 bjh21 */
101 1.76 matt #if defined(__PROG32) && !defined(__ARM_EABI__)
102 1.1 reinoud /* Hack to treat FPE time as interrupt time so we can measure it */
103 1.76 matt #define CLKF_INTR(cf) \
104 1.76 matt ((curcpu()->ci_intr_depth > 1) || \
105 1.76 matt ((cf)->cf_tf.tf_spsr & PSR_MODE) == PSR_UND32_MODE)
106 1.11 bjh21 #else
107 1.80 matt #define CLKF_INTR(cf) ((void)(cf), curcpu()->ci_intr_depth > 1)
108 1.11 bjh21 #endif
109 1.1 reinoud
110 1.11 bjh21 /*
111 1.11 bjh21 * CLKF_PC: Extract the program counter from a clockframe
112 1.11 bjh21 */
113 1.17 thorpej #ifdef __PROG32
114 1.65 skrll #define CLKF_PC(frame) (frame->cf_tf.tf_pc)
115 1.11 bjh21 #else
116 1.37 cube #define CLKF_PC(frame) (frame->cf_if.if_r15 & R15_PC)
117 1.11 bjh21 #endif
118 1.8 bjh21
119 1.11 bjh21 /*
120 1.33 thorpej * LWP_PC: Find out the program counter for the given lwp.
121 1.11 bjh21 */
122 1.17 thorpej #ifdef __PROG32
123 1.68 matt #define LWP_PC(l) (lwp_trapframe(l)->tf_pc)
124 1.11 bjh21 #else
125 1.68 matt #define LWP_PC(l) (lwp_trapframe(l)->tf_r15 & R15_PC)
126 1.11 bjh21 #endif
127 1.8 bjh21
128 1.40 bjh21 /*
129 1.11 bjh21 * Per-CPU information. For now we assume one CPU.
130 1.1 reinoud */
131 1.85 matt #ifdef _KERNEL
132 1.54 matt static inline int curcpl(void);
133 1.54 matt static inline void set_curcpl(int);
134 1.54 matt static inline void cpu_dosoftints(void);
135 1.85 matt #endif
136 1.1 reinoud
137 1.78 matt #ifdef _KMEMUSER
138 1.78 matt #include <sys/intr.h>
139 1.78 matt #endif
140 1.87 matt #include <sys/atomic.h>
141 1.76 matt #include <sys/cpu_data.h>
142 1.62 uebayasi #include <sys/device_if.h>
143 1.61 uebayasi #include <sys/evcnt.h>
144 1.72 matt
145 1.1 reinoud struct cpu_info {
146 1.36 yamt struct cpu_data ci_data; /* MI per-cpu data */
147 1.67 matt device_t ci_dev; /* Device corresponding to this CPU */
148 1.45 ad cpuid_t ci_cpuid;
149 1.67 matt uint32_t ci_arm_cpuid; /* aggregate CPU id */
150 1.67 matt uint32_t ci_arm_cputype; /* CPU type */
151 1.67 matt uint32_t ci_arm_cpurev; /* CPU revision */
152 1.67 matt uint32_t ci_ctrl; /* The CPU control register */
153 1.54 matt int ci_cpl; /* current processor level (spl) */
154 1.87 matt volatile int ci_astpending; /* */
155 1.54 matt int ci_want_resched; /* resched() was called */
156 1.54 matt int ci_intr_depth; /* */
157 1.67 matt struct cpu_softc *ci_softc; /* platform softc */
158 1.54 matt lwp_t *ci_softlwps[SOFTINT_COUNT];
159 1.64 skrll volatile uint32_t ci_softints;
160 1.54 matt lwp_t *ci_curlwp; /* current lwp */
161 1.82 matt lwp_t *ci_lastlwp; /* last lwp */
162 1.20 bjh21 struct evcnt ci_arm700bugcount;
163 1.42 matt int32_t ci_mtx_count;
164 1.42 matt int ci_mtx_oldspl;
165 1.72 matt register_t ci_undefsave[3];
166 1.66 matt uint32_t ci_vfp_id;
167 1.72 matt uint64_t ci_lastintr;
168 1.82 matt struct pmap_tlb_info *ci_tlb_info;
169 1.82 matt struct pmap *ci_pmap_lastuser;
170 1.82 matt struct pmap *ci_pmap_cur;
171 1.82 matt tlb_asid_t ci_pmap_asid_cur;
172 1.83 matt struct trapframe *ci_ddb_regs;
173 1.77 matt struct evcnt ci_abt_evs[16];
174 1.83 matt struct evcnt ci_und_ev;
175 1.83 matt struct evcnt ci_und_cp15_ev;
176 1.83 matt struct evcnt ci_vfp_evs[3];
177 1.72 matt #if defined(MP_CPU_INFO_MEMBERS)
178 1.30 bjh21 MP_CPU_INFO_MEMBERS
179 1.30 bjh21 #endif
180 1.1 reinoud };
181 1.11 bjh21
182 1.1 reinoud extern struct cpu_info cpu_info_store;
183 1.76 matt
184 1.86 matt struct lwp *arm_curlwp(void);
185 1.86 matt struct cpu_info *arm_curcpu(void);
186 1.86 matt
187 1.86 matt #if defined(_MODULE)
188 1.86 matt
189 1.86 matt #define curlwp arm_curlwp()
190 1.86 matt #define curcpu() arm_curcpu()
191 1.86 matt
192 1.86 matt #elif defined(TPIDRPRW_IS_CURLWP)
193 1.54 matt static inline struct lwp *
194 1.54 matt _curlwp(void)
195 1.54 matt {
196 1.72 matt return (struct lwp *) armreg_tpidrprw_read();
197 1.54 matt }
198 1.54 matt
199 1.54 matt static inline void
200 1.54 matt _curlwp_set(struct lwp *l)
201 1.54 matt {
202 1.72 matt armreg_tpidrprw_write((uintptr_t)l);
203 1.54 matt }
204 1.54 matt
205 1.85 matt // Also in <sys/lwp.h> but also here if this was included before <sys/lwp.h>
206 1.85 matt static inline struct cpu_info *lwp_getcpu(struct lwp *);
207 1.85 matt
208 1.85 matt #define curlwp _curlwp()
209 1.85 matt // curcpu() expands into two instructions: a mrc and a ldr
210 1.85 matt #define curcpu() lwp_getcpu(_curlwp())
211 1.69 matt #elif defined(TPIDRPRW_IS_CURCPU)
212 1.54 matt static inline struct cpu_info *
213 1.54 matt curcpu(void)
214 1.54 matt {
215 1.72 matt return (struct cpu_info *) armreg_tpidrprw_read();
216 1.54 matt }
217 1.72 matt #elif !defined(MULTIPROCESSOR)
218 1.72 matt #define curcpu() (&cpu_info_store)
219 1.54 matt #else
220 1.85 matt #error MULTIPROCESSOR requires TPIDRPRW_IS_CURCPU or TPIDRPRW_IS_CURLWP
221 1.69 matt #endif /* !TPIDRPRW_IS_CURCPU && !TPIDRPRW_IS_CURLWP */
222 1.72 matt
223 1.54 matt #ifndef curlwp
224 1.54 matt #define curlwp (curcpu()->ci_curlwp)
225 1.54 matt #endif
226 1.72 matt
227 1.72 matt #define CPU_INFO_ITERATOR int
228 1.72 matt #if defined(MULTIPROCESSOR)
229 1.72 matt extern struct cpu_info *cpu_info[];
230 1.82 matt #define cpu_number() (curcpu()->ci_index)
231 1.72 matt void cpu_boot_secondary_processors(void);
232 1.82 matt #define CPU_IS_PRIMARY(ci) ((ci)->ci_index == 0)
233 1.72 matt #define CPU_INFO_FOREACH(cii, ci) \
234 1.72 matt cii = 0, ci = cpu_info[0]; cii < ncpu && (ci = cpu_info[cii]) != NULL; cii++
235 1.72 matt #else
236 1.72 matt #define cpu_number() 0
237 1.72 matt
238 1.72 matt #define CPU_IS_PRIMARY(ci) true
239 1.72 matt #define CPU_INFO_FOREACH(cii, ci) \
240 1.81 christos cii = 0, __USE(cii), ci = curcpu(); ci != NULL; ci = NULL
241 1.72 matt #endif
242 1.72 matt
243 1.54 matt #define LWP0_CPU_INFO (&cpu_info_store)
244 1.54 matt
245 1.54 matt static inline int
246 1.54 matt curcpl(void)
247 1.54 matt {
248 1.54 matt return curcpu()->ci_cpl;
249 1.54 matt }
250 1.54 matt
251 1.54 matt static inline void
252 1.54 matt set_curcpl(int pri)
253 1.54 matt {
254 1.54 matt curcpu()->ci_cpl = pri;
255 1.54 matt }
256 1.54 matt
257 1.54 matt static inline void
258 1.54 matt cpu_dosoftints(void)
259 1.54 matt {
260 1.72 matt #ifdef __HAVE_FAST_SOFTINTS
261 1.72 matt void dosoftints(void);
262 1.72 matt #ifndef __HAVE_PIC_FAST_SOFTINTS
263 1.56 matt struct cpu_info * const ci = curcpu();
264 1.56 matt if (ci->ci_intr_depth == 0 && (ci->ci_softints >> ci->ci_cpl) > 0)
265 1.54 matt dosoftints();
266 1.72 matt #endif
267 1.72 matt #endif
268 1.54 matt }
269 1.11 bjh21
270 1.33 thorpej #ifdef __PROG32
271 1.33 thorpej void cpu_proc_fork(struct proc *, struct proc *);
272 1.33 thorpej #else
273 1.33 thorpej #define cpu_proc_fork(p1, p2)
274 1.33 thorpej #endif
275 1.11 bjh21
276 1.11 bjh21 /*
277 1.11 bjh21 * Scheduling glue
278 1.11 bjh21 */
279 1.11 bjh21
280 1.87 matt #ifdef __HAVE_PREEMPTION
281 1.87 matt #define setsoftast() atomic_or_uint(&curcpu()->ci_astpending, \
282 1.87 matt __BIT(0))
283 1.87 matt #else
284 1.87 matt #define setsoftast() (curcpu()->ci_astpending = __BIT(0))
285 1.87 matt #endif
286 1.1 reinoud
287 1.1 reinoud /*
288 1.1 reinoud * Notify the current process (p) that it has a signal pending,
289 1.1 reinoud * process as soon as possible.
290 1.1 reinoud */
291 1.1 reinoud
292 1.72 matt #define cpu_signotify(l) setsoftast()
293 1.1 reinoud
294 1.1 reinoud /*
295 1.1 reinoud * Give a profiling tick to the current process when the user profiling
296 1.1 reinoud * buffer pages are invalid. On the i386, request an ast to send us
297 1.1 reinoud * through trap(), marking the proc as needing a profiling tick.
298 1.1 reinoud */
299 1.42 matt #define cpu_need_proftick(l) ((l)->l_pflag |= LP_OWEUPC, setsoftast())
300 1.1 reinoud
301 1.87 matt /* for preeemption. */
302 1.87 matt void cpu_set_curpri(int);
303 1.87 matt
304 1.72 matt /*
305 1.72 matt * We've already preallocated the stack for the idlelwps for additional CPUs.
306 1.72 matt * This hook allows to return them.
307 1.72 matt */
308 1.72 matt vaddr_t cpu_uarea_alloc_idlelwp(struct cpu_info *);
309 1.72 matt
310 1.23 bjh21 #ifndef acorn26
311 1.11 bjh21 /*
312 1.11 bjh21 * cpu device glue (belongs in cpuvar.h)
313 1.11 bjh21 */
314 1.70 matt void cpu_attach(device_t, cpuid_t);
315 1.11 bjh21 #endif
316 1.11 bjh21
317 1.76 matt #endif /* !_LOCORE */
318 1.1 reinoud
319 1.8 bjh21 #endif /* _KERNEL */
320 1.1 reinoud
321 1.11 bjh21 #endif /* !_ARM_CPU_H_ */
322