1 1.125 riastrad /* $NetBSD: cpu.h,v 1.125 2023/07/11 11:01:18 riastradh Exp $ */ 2 1.1 reinoud 3 1.1 reinoud /* 4 1.1 reinoud * Copyright (c) 1994-1996 Mark Brinicombe. 5 1.1 reinoud * Copyright (c) 1994 Brini. 6 1.1 reinoud * All rights reserved. 7 1.1 reinoud * 8 1.1 reinoud * This code is derived from software written for Brini by Mark Brinicombe 9 1.1 reinoud * 10 1.1 reinoud * Redistribution and use in source and binary forms, with or without 11 1.1 reinoud * modification, are permitted provided that the following conditions 12 1.1 reinoud * are met: 13 1.1 reinoud * 1. Redistributions of source code must retain the above copyright 14 1.1 reinoud * notice, this list of conditions and the following disclaimer. 15 1.1 reinoud * 2. Redistributions in binary form must reproduce the above copyright 16 1.1 reinoud * notice, this list of conditions and the following disclaimer in the 17 1.1 reinoud * documentation and/or other materials provided with the distribution. 18 1.1 reinoud * 3. All advertising materials mentioning features or use of this software 19 1.1 reinoud * must display the following acknowledgement: 20 1.1 reinoud * This product includes software developed by Brini. 21 1.1 reinoud * 4. The name of the company nor the name of the author may be used to 22 1.1 reinoud * endorse or promote products derived from this software without specific 23 1.1 reinoud * prior written permission. 24 1.1 reinoud * 25 1.1 reinoud * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED 26 1.1 reinoud * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 27 1.1 reinoud * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 28 1.1 reinoud * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 29 1.1 reinoud * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 30 1.1 reinoud * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 31 1.1 reinoud * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 1.1 reinoud * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 1.1 reinoud * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 1.1 reinoud * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 1.1 reinoud * SUCH DAMAGE. 36 1.1 reinoud * 37 1.1 reinoud * RiscBSD kernel project 38 1.1 reinoud * 39 1.1 reinoud * cpu.h 40 1.1 reinoud * 41 1.1 reinoud * CPU specific symbols 42 1.1 reinoud * 43 1.1 reinoud * Created : 18/09/94 44 1.1 reinoud * 45 1.1 reinoud * Based on kate/katelib/arm6.h 46 1.1 reinoud */ 47 1.1 reinoud 48 1.11 bjh21 #ifndef _ARM_CPU_H_ 49 1.11 bjh21 #define _ARM_CPU_H_ 50 1.1 reinoud 51 1.108 skrll #ifdef _KERNEL 52 1.108 skrll #ifndef _LOCORE 53 1.108 skrll 54 1.108 skrll typedef unsigned long mpidr_t; 55 1.108 skrll 56 1.108 skrll #ifdef MULTIPROCESSOR 57 1.108 skrll extern u_int arm_cpu_max; 58 1.108 skrll extern mpidr_t cpu_mpidr[]; 59 1.108 skrll 60 1.120 skrll void cpu_init_secondary_processor(int); 61 1.108 skrll void cpu_boot_secondary_processors(void); 62 1.108 skrll void cpu_mpstart(void); 63 1.108 skrll bool cpu_hatched_p(u_int); 64 1.108 skrll 65 1.108 skrll void cpu_clr_mbox(int); 66 1.108 skrll void cpu_set_hatched(int); 67 1.108 skrll 68 1.108 skrll #endif 69 1.108 skrll 70 1.122 riastrad struct proc; 71 1.122 riastrad 72 1.108 skrll void cpu_proc_fork(struct proc *, struct proc *); 73 1.108 skrll 74 1.108 skrll #endif /* !_LOCORE */ 75 1.108 skrll #endif /* _KERNEL */ 76 1.108 skrll 77 1.96 ryo #ifdef __arm__ 78 1.96 ryo 79 1.8 bjh21 /* 80 1.8 bjh21 * User-visible definitions 81 1.8 bjh21 */ 82 1.8 bjh21 83 1.8 bjh21 /* CTL_MACHDEP definitions. */ 84 1.8 bjh21 #define CPU_DEBUG 1 /* int: misc kernel debug control */ 85 1.8 bjh21 #define CPU_BOOTED_DEVICE 2 /* string: device we booted from */ 86 1.8 bjh21 #define CPU_BOOTED_KERNEL 3 /* string: kernel we booted */ 87 1.8 bjh21 #define CPU_CONSDEV 4 /* struct: dev_t of our console */ 88 1.29 thorpej #define CPU_POWERSAVE 5 /* int: use CPU powersave mode */ 89 1.8 bjh21 90 1.63 christos #if defined(_KERNEL) || defined(_KMEMUSER) 91 1.8 bjh21 92 1.8 bjh21 /* 93 1.8 bjh21 * Kernel-only definitions 94 1.8 bjh21 */ 95 1.8 bjh21 96 1.76 matt #if !defined(_MODULE) && defined(_KERNEL_OPT) 97 1.119 ryo #include "opt_gprof.h" 98 1.34 martin #include "opt_multiprocessor.h" 99 1.54 matt #include "opt_cpuoptions.h" 100 1.1 reinoud #include "opt_lockdebug.h" 101 1.53 rearnsha #include "opt_cputypes.h" 102 1.76 matt #endif /* !_MODULE && _KERNEL_OPT */ 103 1.53 rearnsha 104 1.29 thorpej #ifndef _LOCORE 105 1.79 matt #if defined(TPIDRPRW_IS_CURLWP) || defined(TPIDRPRW_IS_CURCPU) 106 1.79 matt #include <arm/armreg.h> 107 1.109 christos #endif /* TPIDRPRW_IS_CURLWP || TPIDRPRW_IS_CURCPU */ 108 1.79 matt 109 1.29 thorpej /* 1 == use cpu_sleep(), 0 == don't */ 110 1.29 thorpej extern int cpu_do_powersave; 111 1.75 matt extern int cpu_fpu_present; 112 1.11 bjh21 113 1.8 bjh21 /* All the CLKF_* macros take a struct clockframe * as an argument. */ 114 1.8 bjh21 115 1.1 reinoud /* 116 1.11 bjh21 * CLKF_USERMODE: Return TRUE/FALSE (1/0) depending on whether the 117 1.11 bjh21 * frame came from USR mode or not. 118 1.1 reinoud */ 119 1.76 matt #define CLKF_USERMODE(cf) (((cf)->cf_tf.tf_spsr & PSR_MODE) == PSR_USR32_MODE) 120 1.1 reinoud 121 1.1 reinoud /* 122 1.11 bjh21 * CLKF_INTR: True if we took the interrupt from inside another 123 1.11 bjh21 * interrupt handler. 124 1.11 bjh21 */ 125 1.95 skrll #if !defined(__ARM_EABI__) 126 1.1 reinoud /* Hack to treat FPE time as interrupt time so we can measure it */ 127 1.76 matt #define CLKF_INTR(cf) \ 128 1.76 matt ((curcpu()->ci_intr_depth > 1) || \ 129 1.76 matt ((cf)->cf_tf.tf_spsr & PSR_MODE) == PSR_UND32_MODE) 130 1.11 bjh21 #else 131 1.91 skrll #define CLKF_INTR(cf) ((void)(cf), curcpu()->ci_intr_depth > 1) 132 1.11 bjh21 #endif 133 1.1 reinoud 134 1.11 bjh21 /* 135 1.11 bjh21 * CLKF_PC: Extract the program counter from a clockframe 136 1.11 bjh21 */ 137 1.65 skrll #define CLKF_PC(frame) (frame->cf_tf.tf_pc) 138 1.8 bjh21 139 1.11 bjh21 /* 140 1.33 thorpej * LWP_PC: Find out the program counter for the given lwp. 141 1.11 bjh21 */ 142 1.68 matt #define LWP_PC(l) (lwp_trapframe(l)->tf_pc) 143 1.8 bjh21 144 1.40 bjh21 /* 145 1.11 bjh21 * Per-CPU information. For now we assume one CPU. 146 1.1 reinoud */ 147 1.85 matt #ifdef _KERNEL 148 1.54 matt static inline int curcpl(void); 149 1.54 matt static inline void set_curcpl(int); 150 1.54 matt static inline void cpu_dosoftints(void); 151 1.85 matt #endif 152 1.1 reinoud 153 1.125 riastrad #include <sys/param.h> 154 1.125 riastrad 155 1.78 matt #ifdef _KMEMUSER 156 1.78 matt #include <sys/intr.h> 157 1.78 matt #endif 158 1.87 matt #include <sys/atomic.h> 159 1.76 matt #include <sys/cpu_data.h> 160 1.62 uebayasi #include <sys/device_if.h> 161 1.61 uebayasi #include <sys/evcnt.h> 162 1.120 skrll 163 1.121 skrll /* 164 1.121 skrll * Cache info variables. 165 1.121 skrll */ 166 1.121 skrll #define CACHE_TYPE_VIVT 0 167 1.121 skrll #define CACHE_TYPE_xxPT 1 168 1.121 skrll #define CACHE_TYPE_VIPT 1 169 1.121 skrll #define CACHE_TYPE_PIxx 2 170 1.121 skrll #define CACHE_TYPE_PIPT 3 171 1.121 skrll 172 1.121 skrll /* PRIMARY CACHE VARIABLES */ 173 1.121 skrll struct arm_cache_info { 174 1.121 skrll u_int icache_size; 175 1.121 skrll u_int icache_line_size; 176 1.121 skrll u_int icache_ways; 177 1.121 skrll u_int icache_way_size; 178 1.121 skrll u_int icache_sets; 179 1.121 skrll 180 1.121 skrll u_int dcache_size; 181 1.121 skrll u_int dcache_line_size; 182 1.121 skrll u_int dcache_ways; 183 1.121 skrll u_int dcache_way_size; 184 1.121 skrll u_int dcache_sets; 185 1.121 skrll 186 1.121 skrll uint8_t cache_type; 187 1.121 skrll bool cache_unified; 188 1.121 skrll uint8_t icache_type; 189 1.121 skrll uint8_t dcache_type; 190 1.121 skrll }; 191 1.121 skrll 192 1.1 reinoud struct cpu_info { 193 1.99 skrll struct cpu_data ci_data; /* MI per-cpu data */ 194 1.99 skrll device_t ci_dev; /* Device corresponding to this CPU */ 195 1.99 skrll cpuid_t ci_cpuid; 196 1.99 skrll uint32_t ci_arm_cpuid; /* aggregate CPU id */ 197 1.99 skrll uint32_t ci_arm_cputype; /* CPU type */ 198 1.99 skrll uint32_t ci_arm_cpurev; /* CPU revision */ 199 1.99 skrll uint32_t ci_ctrl; /* The CPU control register */ 200 1.99 skrll 201 1.112 skrll /* 202 1.112 skrll * the following are in their own cache line, as they are stored to 203 1.112 skrll * regularly by remote CPUs; when they were mixed with other fields 204 1.112 skrll * we observed frequent cache misses. 205 1.112 skrll */ 206 1.112 skrll int ci_want_resched __aligned(COHERENCY_UNIT); 207 1.112 skrll /* resched() was called */ 208 1.112 skrll lwp_t * ci_curlwp __aligned(COHERENCY_UNIT); 209 1.112 skrll /* current lwp */ 210 1.112 skrll lwp_t * ci_onproc; /* current user LWP / kthread */ 211 1.112 skrll 212 1.112 skrll /* 213 1.112 skrll * largely CPU-private. 214 1.112 skrll */ 215 1.112 skrll lwp_t * ci_softlwps[SOFTINT_COUNT] __aligned(COHERENCY_UNIT); 216 1.111 riastrad 217 1.99 skrll struct cpu_softc * 218 1.99 skrll ci_softc; /* platform softc */ 219 1.99 skrll 220 1.112 skrll int ci_cpl; /* current processor level (spl) */ 221 1.123 jmcneill volatile int ci_hwpl; /* current hardware priority */ 222 1.112 skrll int ci_kfpu_spl; 223 1.112 skrll 224 1.112 skrll volatile u_int ci_intr_depth; /* */ 225 1.112 skrll volatile u_int ci_softints; 226 1.118 skrll volatile uint32_t ci_blocked_pics; 227 1.118 skrll volatile uint32_t ci_pending_pics; 228 1.118 skrll volatile uint32_t ci_pending_ipls; 229 1.99 skrll 230 1.99 skrll lwp_t * ci_lastlwp; /* last lwp */ 231 1.99 skrll 232 1.99 skrll struct evcnt ci_arm700bugcount; 233 1.99 skrll int32_t ci_mtx_count; 234 1.99 skrll int ci_mtx_oldspl; 235 1.99 skrll register_t ci_undefsave[3]; 236 1.99 skrll uint32_t ci_vfp_id; 237 1.99 skrll uint64_t ci_lastintr; 238 1.99 skrll 239 1.99 skrll struct pmap * ci_pmap_lastuser; 240 1.99 skrll struct pmap * ci_pmap_cur; 241 1.99 skrll tlb_asid_t ci_pmap_asid_cur; 242 1.99 skrll 243 1.99 skrll struct trapframe * 244 1.99 skrll ci_ddb_regs; 245 1.99 skrll 246 1.99 skrll struct evcnt ci_abt_evs[16]; 247 1.99 skrll struct evcnt ci_und_ev; 248 1.99 skrll struct evcnt ci_und_cp15_ev; 249 1.99 skrll struct evcnt ci_vfp_evs[3]; 250 1.99 skrll 251 1.100 skrll uint32_t ci_midr; 252 1.120 skrll uint32_t ci_actlr; 253 1.120 skrll uint32_t ci_revidr; 254 1.100 skrll uint32_t ci_mpidr; 255 1.120 skrll uint32_t ci_mvfr[2]; 256 1.120 skrll 257 1.104 mrg uint32_t ci_capacity_dmips_mhz; 258 1.100 skrll 259 1.120 skrll struct arm_cache_info 260 1.100 skrll ci_cacheinfo; 261 1.119 ryo 262 1.119 ryo #if defined(GPROF) && defined(MULTIPROCESSOR) 263 1.119 ryo struct gmonparam *ci_gmon; /* MI per-cpu GPROF */ 264 1.119 ryo #endif 265 1.1 reinoud }; 266 1.11 bjh21 267 1.108 skrll extern struct cpu_info cpu_info_store[]; 268 1.76 matt 269 1.86 matt struct lwp *arm_curlwp(void); 270 1.86 matt struct cpu_info *arm_curcpu(void); 271 1.86 matt 272 1.109 christos #ifdef _KERNEL 273 1.86 matt #if defined(_MODULE) 274 1.86 matt 275 1.86 matt #define curlwp arm_curlwp() 276 1.86 matt #define curcpu() arm_curcpu() 277 1.86 matt 278 1.86 matt #elif defined(TPIDRPRW_IS_CURLWP) 279 1.54 matt static inline struct lwp * 280 1.54 matt _curlwp(void) 281 1.54 matt { 282 1.72 matt return (struct lwp *) armreg_tpidrprw_read(); 283 1.54 matt } 284 1.54 matt 285 1.54 matt static inline void 286 1.54 matt _curlwp_set(struct lwp *l) 287 1.54 matt { 288 1.72 matt armreg_tpidrprw_write((uintptr_t)l); 289 1.54 matt } 290 1.54 matt 291 1.91 skrll // Also in <sys/lwp.h> but also here if this was included before <sys/lwp.h> 292 1.85 matt static inline struct cpu_info *lwp_getcpu(struct lwp *); 293 1.85 matt 294 1.85 matt #define curlwp _curlwp() 295 1.85 matt // curcpu() expands into two instructions: a mrc and a ldr 296 1.85 matt #define curcpu() lwp_getcpu(_curlwp()) 297 1.69 matt #elif defined(TPIDRPRW_IS_CURCPU) 298 1.88 matt #ifdef __HAVE_PREEMPTION 299 1.88 matt #error __HAVE_PREEMPTION requires TPIDRPRW_IS_CURLWP 300 1.88 matt #endif 301 1.54 matt static inline struct cpu_info * 302 1.54 matt curcpu(void) 303 1.54 matt { 304 1.72 matt return (struct cpu_info *) armreg_tpidrprw_read(); 305 1.54 matt } 306 1.72 matt #elif !defined(MULTIPROCESSOR) 307 1.108 skrll #define curcpu() (&cpu_info_store[0]) 308 1.89 jmcneill #elif !defined(__HAVE_PREEMPTION) 309 1.88 matt #error MULTIPROCESSOR && !__HAVE_PREEMPTION requires TPIDRPRW_IS_CURCPU or TPIDRPRW_IS_CURLWP 310 1.54 matt #else 311 1.88 matt #error MULTIPROCESSOR && __HAVE_PREEMPTION requires TPIDRPRW_IS_CURLWP 312 1.69 matt #endif /* !TPIDRPRW_IS_CURCPU && !TPIDRPRW_IS_CURLWP */ 313 1.72 matt 314 1.54 matt #ifndef curlwp 315 1.54 matt #define curlwp (curcpu()->ci_curlwp) 316 1.54 matt #endif 317 1.110 skrll #define curpcb ((struct pcb *)lwp_getpcb(curlwp)) 318 1.72 matt 319 1.72 matt #define CPU_INFO_ITERATOR int 320 1.92 skrll #if defined(_MODULE) || defined(MULTIPROCESSOR) 321 1.72 matt extern struct cpu_info *cpu_info[]; 322 1.82 matt #define cpu_number() (curcpu()->ci_index) 323 1.82 matt #define CPU_IS_PRIMARY(ci) ((ci)->ci_index == 0) 324 1.72 matt #define CPU_INFO_FOREACH(cii, ci) \ 325 1.94 mrg cii = 0, ci = cpu_info[0]; cii < (ncpu ? ncpu : 1) && (ci = cpu_info[cii]) != NULL; cii++ 326 1.91 skrll #else 327 1.72 matt #define cpu_number() 0 328 1.72 matt 329 1.72 matt #define CPU_IS_PRIMARY(ci) true 330 1.72 matt #define CPU_INFO_FOREACH(cii, ci) \ 331 1.81 christos cii = 0, __USE(cii), ci = curcpu(); ci != NULL; ci = NULL 332 1.72 matt #endif 333 1.72 matt 334 1.108 skrll #define LWP0_CPU_INFO (&cpu_info_store[0]) 335 1.54 matt 336 1.54 matt static inline int 337 1.54 matt curcpl(void) 338 1.54 matt { 339 1.54 matt return curcpu()->ci_cpl; 340 1.54 matt } 341 1.54 matt 342 1.54 matt static inline void 343 1.54 matt set_curcpl(int pri) 344 1.54 matt { 345 1.54 matt curcpu()->ci_cpl = pri; 346 1.54 matt } 347 1.54 matt 348 1.54 matt static inline void 349 1.54 matt cpu_dosoftints(void) 350 1.54 matt { 351 1.72 matt #ifdef __HAVE_FAST_SOFTINTS 352 1.72 matt void dosoftints(void); 353 1.72 matt #ifndef __HAVE_PIC_FAST_SOFTINTS 354 1.56 matt struct cpu_info * const ci = curcpu(); 355 1.56 matt if (ci->ci_intr_depth == 0 && (ci->ci_softints >> ci->ci_cpl) > 0) 356 1.54 matt dosoftints(); 357 1.72 matt #endif 358 1.72 matt #endif 359 1.54 matt } 360 1.11 bjh21 361 1.11 bjh21 /* 362 1.11 bjh21 * Scheduling glue 363 1.11 bjh21 */ 364 1.112 skrll void cpu_signotify(struct lwp *); 365 1.112 skrll #define setsoftast(ci) (cpu_signotify((ci)->ci_onproc)) 366 1.1 reinoud 367 1.1 reinoud /* 368 1.1 reinoud * Give a profiling tick to the current process when the user profiling 369 1.1 reinoud * buffer pages are invalid. On the i386, request an ast to send us 370 1.1 reinoud * through trap(), marking the proc as needing a profiling tick. 371 1.1 reinoud */ 372 1.90 matt #define cpu_need_proftick(l) ((l)->l_pflag |= LP_OWEUPC, \ 373 1.112 skrll setsoftast(lwp_getcpu(l))) 374 1.1 reinoud 375 1.72 matt /* 376 1.91 skrll * We've already preallocated the stack for the idlelwps for additional CPUs. 377 1.72 matt * This hook allows to return them. 378 1.72 matt */ 379 1.72 matt vaddr_t cpu_uarea_alloc_idlelwp(struct cpu_info *); 380 1.72 matt 381 1.107 skrll #ifdef _ARM_ARCH_6 382 1.105 skrll int cpu_maxproc_hook(int); 383 1.105 skrll #endif 384 1.105 skrll 385 1.109 christos #endif /* _KERNEL */ 386 1.109 christos 387 1.76 matt #endif /* !_LOCORE */ 388 1.1 reinoud 389 1.109 christos #endif /* _KERNEL || _KMEMUSER */ 390 1.1 reinoud 391 1.96 ryo #elif defined(__aarch64__) 392 1.96 ryo 393 1.96 ryo #include <aarch64/cpu.h> 394 1.96 ryo 395 1.96 ryo #endif /* __arm__/__aarch64__ */ 396 1.96 ryo 397 1.11 bjh21 #endif /* !_ARM_CPU_H_ */ 398