cpu.h revision 1.122 1 1.122 riastrad /* $NetBSD: cpu.h,v 1.122 2021/12/18 16:41:37 riastradh Exp $ */
2 1.1 reinoud
3 1.1 reinoud /*
4 1.1 reinoud * Copyright (c) 1994-1996 Mark Brinicombe.
5 1.1 reinoud * Copyright (c) 1994 Brini.
6 1.1 reinoud * All rights reserved.
7 1.1 reinoud *
8 1.1 reinoud * This code is derived from software written for Brini by Mark Brinicombe
9 1.1 reinoud *
10 1.1 reinoud * Redistribution and use in source and binary forms, with or without
11 1.1 reinoud * modification, are permitted provided that the following conditions
12 1.1 reinoud * are met:
13 1.1 reinoud * 1. Redistributions of source code must retain the above copyright
14 1.1 reinoud * notice, this list of conditions and the following disclaimer.
15 1.1 reinoud * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 reinoud * notice, this list of conditions and the following disclaimer in the
17 1.1 reinoud * documentation and/or other materials provided with the distribution.
18 1.1 reinoud * 3. All advertising materials mentioning features or use of this software
19 1.1 reinoud * must display the following acknowledgement:
20 1.1 reinoud * This product includes software developed by Brini.
21 1.1 reinoud * 4. The name of the company nor the name of the author may be used to
22 1.1 reinoud * endorse or promote products derived from this software without specific
23 1.1 reinoud * prior written permission.
24 1.1 reinoud *
25 1.1 reinoud * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 1.1 reinoud * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 1.1 reinoud * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28 1.1 reinoud * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
29 1.1 reinoud * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
30 1.1 reinoud * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
31 1.1 reinoud * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 1.1 reinoud * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 1.1 reinoud * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 1.1 reinoud * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 1.1 reinoud * SUCH DAMAGE.
36 1.1 reinoud *
37 1.1 reinoud * RiscBSD kernel project
38 1.1 reinoud *
39 1.1 reinoud * cpu.h
40 1.1 reinoud *
41 1.1 reinoud * CPU specific symbols
42 1.1 reinoud *
43 1.1 reinoud * Created : 18/09/94
44 1.1 reinoud *
45 1.1 reinoud * Based on kate/katelib/arm6.h
46 1.1 reinoud */
47 1.1 reinoud
48 1.11 bjh21 #ifndef _ARM_CPU_H_
49 1.11 bjh21 #define _ARM_CPU_H_
50 1.1 reinoud
51 1.108 skrll #ifdef _KERNEL
52 1.108 skrll #ifndef _LOCORE
53 1.108 skrll
54 1.108 skrll typedef unsigned long mpidr_t;
55 1.108 skrll
56 1.108 skrll #ifdef MULTIPROCESSOR
57 1.108 skrll extern u_int arm_cpu_max;
58 1.108 skrll extern mpidr_t cpu_mpidr[];
59 1.108 skrll
60 1.120 skrll void cpu_init_secondary_processor(int);
61 1.108 skrll void cpu_boot_secondary_processors(void);
62 1.108 skrll void cpu_mpstart(void);
63 1.108 skrll bool cpu_hatched_p(u_int);
64 1.108 skrll
65 1.108 skrll void cpu_clr_mbox(int);
66 1.108 skrll void cpu_set_hatched(int);
67 1.108 skrll
68 1.108 skrll #endif
69 1.108 skrll
70 1.122 riastrad struct proc;
71 1.122 riastrad
72 1.108 skrll void cpu_proc_fork(struct proc *, struct proc *);
73 1.108 skrll
74 1.108 skrll #endif /* !_LOCORE */
75 1.108 skrll #endif /* _KERNEL */
76 1.108 skrll
77 1.96 ryo #ifdef __arm__
78 1.96 ryo
79 1.8 bjh21 /*
80 1.8 bjh21 * User-visible definitions
81 1.8 bjh21 */
82 1.8 bjh21
83 1.8 bjh21 /* CTL_MACHDEP definitions. */
84 1.8 bjh21 #define CPU_DEBUG 1 /* int: misc kernel debug control */
85 1.8 bjh21 #define CPU_BOOTED_DEVICE 2 /* string: device we booted from */
86 1.8 bjh21 #define CPU_BOOTED_KERNEL 3 /* string: kernel we booted */
87 1.8 bjh21 #define CPU_CONSDEV 4 /* struct: dev_t of our console */
88 1.29 thorpej #define CPU_POWERSAVE 5 /* int: use CPU powersave mode */
89 1.8 bjh21
90 1.63 christos #if defined(_KERNEL) || defined(_KMEMUSER)
91 1.8 bjh21
92 1.8 bjh21 /*
93 1.8 bjh21 * Kernel-only definitions
94 1.8 bjh21 */
95 1.8 bjh21
96 1.76 matt #if !defined(_MODULE) && defined(_KERNEL_OPT)
97 1.119 ryo #include "opt_gprof.h"
98 1.34 martin #include "opt_multiprocessor.h"
99 1.54 matt #include "opt_cpuoptions.h"
100 1.1 reinoud #include "opt_lockdebug.h"
101 1.53 rearnsha #include "opt_cputypes.h"
102 1.76 matt #endif /* !_MODULE && _KERNEL_OPT */
103 1.53 rearnsha
104 1.29 thorpej #ifndef _LOCORE
105 1.79 matt #if defined(TPIDRPRW_IS_CURLWP) || defined(TPIDRPRW_IS_CURCPU)
106 1.79 matt #include <arm/armreg.h>
107 1.109 christos #endif /* TPIDRPRW_IS_CURLWP || TPIDRPRW_IS_CURCPU */
108 1.79 matt
109 1.29 thorpej /* 1 == use cpu_sleep(), 0 == don't */
110 1.29 thorpej extern int cpu_do_powersave;
111 1.75 matt extern int cpu_fpu_present;
112 1.11 bjh21
113 1.8 bjh21 /* All the CLKF_* macros take a struct clockframe * as an argument. */
114 1.8 bjh21
115 1.1 reinoud /*
116 1.11 bjh21 * CLKF_USERMODE: Return TRUE/FALSE (1/0) depending on whether the
117 1.11 bjh21 * frame came from USR mode or not.
118 1.1 reinoud */
119 1.76 matt #define CLKF_USERMODE(cf) (((cf)->cf_tf.tf_spsr & PSR_MODE) == PSR_USR32_MODE)
120 1.1 reinoud
121 1.1 reinoud /*
122 1.11 bjh21 * CLKF_INTR: True if we took the interrupt from inside another
123 1.11 bjh21 * interrupt handler.
124 1.11 bjh21 */
125 1.95 skrll #if !defined(__ARM_EABI__)
126 1.1 reinoud /* Hack to treat FPE time as interrupt time so we can measure it */
127 1.76 matt #define CLKF_INTR(cf) \
128 1.76 matt ((curcpu()->ci_intr_depth > 1) || \
129 1.76 matt ((cf)->cf_tf.tf_spsr & PSR_MODE) == PSR_UND32_MODE)
130 1.11 bjh21 #else
131 1.91 skrll #define CLKF_INTR(cf) ((void)(cf), curcpu()->ci_intr_depth > 1)
132 1.11 bjh21 #endif
133 1.1 reinoud
134 1.11 bjh21 /*
135 1.11 bjh21 * CLKF_PC: Extract the program counter from a clockframe
136 1.11 bjh21 */
137 1.65 skrll #define CLKF_PC(frame) (frame->cf_tf.tf_pc)
138 1.8 bjh21
139 1.11 bjh21 /*
140 1.33 thorpej * LWP_PC: Find out the program counter for the given lwp.
141 1.11 bjh21 */
142 1.68 matt #define LWP_PC(l) (lwp_trapframe(l)->tf_pc)
143 1.8 bjh21
144 1.40 bjh21 /*
145 1.11 bjh21 * Per-CPU information. For now we assume one CPU.
146 1.1 reinoud */
147 1.85 matt #ifdef _KERNEL
148 1.54 matt static inline int curcpl(void);
149 1.54 matt static inline void set_curcpl(int);
150 1.54 matt static inline void cpu_dosoftints(void);
151 1.85 matt #endif
152 1.1 reinoud
153 1.78 matt #ifdef _KMEMUSER
154 1.78 matt #include <sys/intr.h>
155 1.78 matt #endif
156 1.87 matt #include <sys/atomic.h>
157 1.76 matt #include <sys/cpu_data.h>
158 1.62 uebayasi #include <sys/device_if.h>
159 1.61 uebayasi #include <sys/evcnt.h>
160 1.120 skrll
161 1.114 mrg #include <machine/param.h>
162 1.72 matt
163 1.121 skrll /*
164 1.121 skrll * Cache info variables.
165 1.121 skrll */
166 1.121 skrll #define CACHE_TYPE_VIVT 0
167 1.121 skrll #define CACHE_TYPE_xxPT 1
168 1.121 skrll #define CACHE_TYPE_VIPT 1
169 1.121 skrll #define CACHE_TYPE_PIxx 2
170 1.121 skrll #define CACHE_TYPE_PIPT 3
171 1.121 skrll
172 1.121 skrll /* PRIMARY CACHE VARIABLES */
173 1.121 skrll struct arm_cache_info {
174 1.121 skrll u_int icache_size;
175 1.121 skrll u_int icache_line_size;
176 1.121 skrll u_int icache_ways;
177 1.121 skrll u_int icache_way_size;
178 1.121 skrll u_int icache_sets;
179 1.121 skrll
180 1.121 skrll u_int dcache_size;
181 1.121 skrll u_int dcache_line_size;
182 1.121 skrll u_int dcache_ways;
183 1.121 skrll u_int dcache_way_size;
184 1.121 skrll u_int dcache_sets;
185 1.121 skrll
186 1.121 skrll uint8_t cache_type;
187 1.121 skrll bool cache_unified;
188 1.121 skrll uint8_t icache_type;
189 1.121 skrll uint8_t dcache_type;
190 1.121 skrll };
191 1.121 skrll
192 1.1 reinoud struct cpu_info {
193 1.99 skrll struct cpu_data ci_data; /* MI per-cpu data */
194 1.99 skrll device_t ci_dev; /* Device corresponding to this CPU */
195 1.99 skrll cpuid_t ci_cpuid;
196 1.99 skrll uint32_t ci_arm_cpuid; /* aggregate CPU id */
197 1.99 skrll uint32_t ci_arm_cputype; /* CPU type */
198 1.99 skrll uint32_t ci_arm_cpurev; /* CPU revision */
199 1.99 skrll uint32_t ci_ctrl; /* The CPU control register */
200 1.99 skrll
201 1.112 skrll /*
202 1.112 skrll * the following are in their own cache line, as they are stored to
203 1.112 skrll * regularly by remote CPUs; when they were mixed with other fields
204 1.112 skrll * we observed frequent cache misses.
205 1.112 skrll */
206 1.112 skrll int ci_want_resched __aligned(COHERENCY_UNIT);
207 1.112 skrll /* resched() was called */
208 1.112 skrll lwp_t * ci_curlwp __aligned(COHERENCY_UNIT);
209 1.112 skrll /* current lwp */
210 1.112 skrll lwp_t * ci_onproc; /* current user LWP / kthread */
211 1.112 skrll
212 1.112 skrll /*
213 1.112 skrll * largely CPU-private.
214 1.112 skrll */
215 1.112 skrll lwp_t * ci_softlwps[SOFTINT_COUNT] __aligned(COHERENCY_UNIT);
216 1.111 riastrad
217 1.99 skrll struct cpu_softc *
218 1.99 skrll ci_softc; /* platform softc */
219 1.99 skrll
220 1.112 skrll int ci_cpl; /* current processor level (spl) */
221 1.116 jmcneill int ci_hwpl; /* current hardware priority */
222 1.112 skrll int ci_kfpu_spl;
223 1.112 skrll
224 1.112 skrll volatile u_int ci_intr_depth; /* */
225 1.112 skrll volatile u_int ci_softints;
226 1.118 skrll volatile uint32_t ci_blocked_pics;
227 1.118 skrll volatile uint32_t ci_pending_pics;
228 1.118 skrll volatile uint32_t ci_pending_ipls;
229 1.99 skrll
230 1.99 skrll lwp_t * ci_lastlwp; /* last lwp */
231 1.99 skrll
232 1.99 skrll struct evcnt ci_arm700bugcount;
233 1.99 skrll int32_t ci_mtx_count;
234 1.99 skrll int ci_mtx_oldspl;
235 1.99 skrll register_t ci_undefsave[3];
236 1.99 skrll uint32_t ci_vfp_id;
237 1.99 skrll uint64_t ci_lastintr;
238 1.99 skrll
239 1.99 skrll struct pmap_tlb_info *
240 1.99 skrll ci_tlb_info;
241 1.99 skrll struct pmap * ci_pmap_lastuser;
242 1.99 skrll struct pmap * ci_pmap_cur;
243 1.99 skrll tlb_asid_t ci_pmap_asid_cur;
244 1.99 skrll
245 1.99 skrll struct trapframe *
246 1.99 skrll ci_ddb_regs;
247 1.99 skrll
248 1.99 skrll struct evcnt ci_abt_evs[16];
249 1.99 skrll struct evcnt ci_und_ev;
250 1.99 skrll struct evcnt ci_und_cp15_ev;
251 1.99 skrll struct evcnt ci_vfp_evs[3];
252 1.99 skrll
253 1.100 skrll uint32_t ci_midr;
254 1.120 skrll uint32_t ci_actlr;
255 1.120 skrll uint32_t ci_revidr;
256 1.100 skrll uint32_t ci_mpidr;
257 1.120 skrll uint32_t ci_mvfr[2];
258 1.120 skrll
259 1.104 mrg uint32_t ci_capacity_dmips_mhz;
260 1.100 skrll
261 1.120 skrll struct arm_cache_info
262 1.100 skrll ci_cacheinfo;
263 1.119 ryo
264 1.119 ryo #if defined(GPROF) && defined(MULTIPROCESSOR)
265 1.119 ryo struct gmonparam *ci_gmon; /* MI per-cpu GPROF */
266 1.119 ryo #endif
267 1.1 reinoud };
268 1.11 bjh21
269 1.108 skrll extern struct cpu_info cpu_info_store[];
270 1.76 matt
271 1.86 matt struct lwp *arm_curlwp(void);
272 1.86 matt struct cpu_info *arm_curcpu(void);
273 1.86 matt
274 1.109 christos #ifdef _KERNEL
275 1.86 matt #if defined(_MODULE)
276 1.86 matt
277 1.86 matt #define curlwp arm_curlwp()
278 1.86 matt #define curcpu() arm_curcpu()
279 1.86 matt
280 1.86 matt #elif defined(TPIDRPRW_IS_CURLWP)
281 1.54 matt static inline struct lwp *
282 1.54 matt _curlwp(void)
283 1.54 matt {
284 1.72 matt return (struct lwp *) armreg_tpidrprw_read();
285 1.54 matt }
286 1.54 matt
287 1.54 matt static inline void
288 1.54 matt _curlwp_set(struct lwp *l)
289 1.54 matt {
290 1.72 matt armreg_tpidrprw_write((uintptr_t)l);
291 1.54 matt }
292 1.54 matt
293 1.91 skrll // Also in <sys/lwp.h> but also here if this was included before <sys/lwp.h>
294 1.85 matt static inline struct cpu_info *lwp_getcpu(struct lwp *);
295 1.85 matt
296 1.85 matt #define curlwp _curlwp()
297 1.85 matt // curcpu() expands into two instructions: a mrc and a ldr
298 1.85 matt #define curcpu() lwp_getcpu(_curlwp())
299 1.69 matt #elif defined(TPIDRPRW_IS_CURCPU)
300 1.88 matt #ifdef __HAVE_PREEMPTION
301 1.88 matt #error __HAVE_PREEMPTION requires TPIDRPRW_IS_CURLWP
302 1.88 matt #endif
303 1.54 matt static inline struct cpu_info *
304 1.54 matt curcpu(void)
305 1.54 matt {
306 1.72 matt return (struct cpu_info *) armreg_tpidrprw_read();
307 1.54 matt }
308 1.72 matt #elif !defined(MULTIPROCESSOR)
309 1.108 skrll #define curcpu() (&cpu_info_store[0])
310 1.89 jmcneill #elif !defined(__HAVE_PREEMPTION)
311 1.88 matt #error MULTIPROCESSOR && !__HAVE_PREEMPTION requires TPIDRPRW_IS_CURCPU or TPIDRPRW_IS_CURLWP
312 1.54 matt #else
313 1.88 matt #error MULTIPROCESSOR && __HAVE_PREEMPTION requires TPIDRPRW_IS_CURLWP
314 1.69 matt #endif /* !TPIDRPRW_IS_CURCPU && !TPIDRPRW_IS_CURLWP */
315 1.72 matt
316 1.54 matt #ifndef curlwp
317 1.54 matt #define curlwp (curcpu()->ci_curlwp)
318 1.54 matt #endif
319 1.110 skrll #define curpcb ((struct pcb *)lwp_getpcb(curlwp))
320 1.72 matt
321 1.72 matt #define CPU_INFO_ITERATOR int
322 1.92 skrll #if defined(_MODULE) || defined(MULTIPROCESSOR)
323 1.72 matt extern struct cpu_info *cpu_info[];
324 1.82 matt #define cpu_number() (curcpu()->ci_index)
325 1.82 matt #define CPU_IS_PRIMARY(ci) ((ci)->ci_index == 0)
326 1.72 matt #define CPU_INFO_FOREACH(cii, ci) \
327 1.94 mrg cii = 0, ci = cpu_info[0]; cii < (ncpu ? ncpu : 1) && (ci = cpu_info[cii]) != NULL; cii++
328 1.91 skrll #else
329 1.72 matt #define cpu_number() 0
330 1.72 matt
331 1.72 matt #define CPU_IS_PRIMARY(ci) true
332 1.72 matt #define CPU_INFO_FOREACH(cii, ci) \
333 1.81 christos cii = 0, __USE(cii), ci = curcpu(); ci != NULL; ci = NULL
334 1.72 matt #endif
335 1.72 matt
336 1.108 skrll #define LWP0_CPU_INFO (&cpu_info_store[0])
337 1.54 matt
338 1.54 matt static inline int
339 1.54 matt curcpl(void)
340 1.54 matt {
341 1.54 matt return curcpu()->ci_cpl;
342 1.54 matt }
343 1.54 matt
344 1.54 matt static inline void
345 1.54 matt set_curcpl(int pri)
346 1.54 matt {
347 1.54 matt curcpu()->ci_cpl = pri;
348 1.54 matt }
349 1.54 matt
350 1.54 matt static inline void
351 1.54 matt cpu_dosoftints(void)
352 1.54 matt {
353 1.72 matt #ifdef __HAVE_FAST_SOFTINTS
354 1.72 matt void dosoftints(void);
355 1.72 matt #ifndef __HAVE_PIC_FAST_SOFTINTS
356 1.56 matt struct cpu_info * const ci = curcpu();
357 1.56 matt if (ci->ci_intr_depth == 0 && (ci->ci_softints >> ci->ci_cpl) > 0)
358 1.54 matt dosoftints();
359 1.72 matt #endif
360 1.72 matt #endif
361 1.54 matt }
362 1.11 bjh21
363 1.11 bjh21 /*
364 1.11 bjh21 * Scheduling glue
365 1.11 bjh21 */
366 1.112 skrll void cpu_signotify(struct lwp *);
367 1.112 skrll #define setsoftast(ci) (cpu_signotify((ci)->ci_onproc))
368 1.1 reinoud
369 1.1 reinoud /*
370 1.1 reinoud * Give a profiling tick to the current process when the user profiling
371 1.1 reinoud * buffer pages are invalid. On the i386, request an ast to send us
372 1.1 reinoud * through trap(), marking the proc as needing a profiling tick.
373 1.1 reinoud */
374 1.90 matt #define cpu_need_proftick(l) ((l)->l_pflag |= LP_OWEUPC, \
375 1.112 skrll setsoftast(lwp_getcpu(l)))
376 1.1 reinoud
377 1.72 matt /*
378 1.91 skrll * We've already preallocated the stack for the idlelwps for additional CPUs.
379 1.72 matt * This hook allows to return them.
380 1.72 matt */
381 1.72 matt vaddr_t cpu_uarea_alloc_idlelwp(struct cpu_info *);
382 1.72 matt
383 1.107 skrll #ifdef _ARM_ARCH_6
384 1.105 skrll int cpu_maxproc_hook(int);
385 1.105 skrll #endif
386 1.105 skrll
387 1.109 christos #endif /* _KERNEL */
388 1.109 christos
389 1.76 matt #endif /* !_LOCORE */
390 1.1 reinoud
391 1.109 christos #endif /* _KERNEL || _KMEMUSER */
392 1.1 reinoud
393 1.96 ryo #elif defined(__aarch64__)
394 1.96 ryo
395 1.96 ryo #include <aarch64/cpu.h>
396 1.96 ryo
397 1.96 ryo #endif /* __arm__/__aarch64__ */
398 1.96 ryo
399 1.11 bjh21 #endif /* !_ARM_CPU_H_ */
400