1 1.53 jmcneill /* $NetBSD: cpu.h,v 1.53 2024/12/30 19:17:21 jmcneill Exp $ */ 2 1.1 matt 3 1.1 matt /*- 4 1.26 skrll * Copyright (c) 2014, 2020 The NetBSD Foundation, Inc. 5 1.1 matt * All rights reserved. 6 1.1 matt * 7 1.1 matt * This code is derived from software contributed to The NetBSD Foundation 8 1.1 matt * by Matt Thomas of 3am Software Foundry. 9 1.1 matt * 10 1.1 matt * Redistribution and use in source and binary forms, with or without 11 1.1 matt * modification, are permitted provided that the following conditions 12 1.1 matt * are met: 13 1.1 matt * 1. Redistributions of source code must retain the above copyright 14 1.1 matt * notice, this list of conditions and the following disclaimer. 15 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright 16 1.1 matt * notice, this list of conditions and the following disclaimer in the 17 1.1 matt * documentation and/or other materials provided with the distribution. 18 1.1 matt * 19 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 1.1 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 1.1 matt * POSSIBILITY OF SUCH DAMAGE. 30 1.1 matt */ 31 1.1 matt 32 1.1 matt #ifndef _AARCH64_CPU_H_ 33 1.1 matt #define _AARCH64_CPU_H_ 34 1.1 matt 35 1.21 skrll #include <arm/cpu.h> 36 1.21 skrll 37 1.1 matt #ifdef __aarch64__ 38 1.1 matt 39 1.2 ryo #ifdef _KERNEL_OPT 40 1.38 ryo #include "opt_gprof.h" 41 1.2 ryo #include "opt_multiprocessor.h" 42 1.48 skrll #include "opt_pmap.h" 43 1.2 ryo #endif 44 1.2 ryo 45 1.7 ryo #include <sys/param.h> 46 1.7 ryo 47 1.1 matt #if defined(_KERNEL) || defined(_KMEMUSER) 48 1.2 ryo #include <sys/evcnt.h> 49 1.11 mrg 50 1.35 skrll #include <aarch64/armreg.h> 51 1.2 ryo #include <aarch64/frame.h> 52 1.2 ryo 53 1.1 matt struct clockframe { 54 1.2 ryo struct trapframe cf_tf; 55 1.1 matt }; 56 1.1 matt 57 1.2 ryo /* (spsr & 15) == SPSR_M_EL0T(64bit,0) or USER(32bit,0) */ 58 1.2 ryo #define CLKF_USERMODE(cf) ((((cf)->cf_tf.tf_spsr) & 0x0f) == 0) 59 1.2 ryo #define CLKF_PC(cf) ((cf)->cf_tf.tf_pc) 60 1.2 ryo #define CLKF_INTR(cf) ((void)(cf), curcpu()->ci_intr_depth > 1) 61 1.1 matt 62 1.12 skrll /* 63 1.12 skrll * LWP_PC: Find out the program counter for the given lwp. 64 1.12 skrll */ 65 1.12 skrll #define LWP_PC(l) ((l)->l_md.md_utf->tf_pc) 66 1.12 skrll 67 1.1 matt #include <sys/cpu_data.h> 68 1.1 matt #include <sys/device_if.h> 69 1.1 matt #include <sys/intr.h> 70 1.1 matt 71 1.13 ryo struct aarch64_cpufuncs { 72 1.13 ryo void (*cf_set_ttbr0)(uint64_t); 73 1.24 ryo void (*cf_icache_sync_range)(vaddr_t, vsize_t); 74 1.13 ryo }; 75 1.13 ryo 76 1.43 skrll #define MAX_CACHE_LEVEL 8 /* ARMv8 has maximum 8 level cache */ 77 1.43 skrll 78 1.43 skrll struct aarch64_cache_unit { 79 1.43 skrll u_int cache_type; 80 1.43 skrll #define CACHE_TYPE_VPIPT 0 /* VMID-aware PIPT */ 81 1.43 skrll #define CACHE_TYPE_VIVT 1 /* ASID-tagged VIVT */ 82 1.43 skrll #define CACHE_TYPE_VIPT 2 83 1.43 skrll #define CACHE_TYPE_PIPT 3 84 1.43 skrll u_int cache_line_size; 85 1.43 skrll u_int cache_ways; 86 1.43 skrll u_int cache_sets; 87 1.43 skrll u_int cache_way_size; 88 1.43 skrll u_int cache_size; 89 1.43 skrll }; 90 1.43 skrll 91 1.43 skrll struct aarch64_cache_info { 92 1.43 skrll u_int cacheable; 93 1.43 skrll #define CACHE_CACHEABLE_NONE 0 94 1.43 skrll #define CACHE_CACHEABLE_ICACHE 1 /* instruction cache only */ 95 1.43 skrll #define CACHE_CACHEABLE_DCACHE 2 /* data cache only */ 96 1.43 skrll #define CACHE_CACHEABLE_IDCACHE 3 /* instruction and data caches */ 97 1.43 skrll #define CACHE_CACHEABLE_UNIFIED 4 /* unified cache */ 98 1.43 skrll struct aarch64_cache_unit icache; 99 1.43 skrll struct aarch64_cache_unit dcache; 100 1.43 skrll }; 101 1.43 skrll 102 1.53 jmcneill struct aarch64_low_power_idle { 103 1.53 jmcneill uint32_t min_res; /* minimum residency */ 104 1.53 jmcneill uint32_t wakeup_latency; /* worst case */ 105 1.53 jmcneill uint32_t save_restore_flags; 106 1.53 jmcneill #define LPI_SAVE_RESTORE_CORE __BIT(0) 107 1.53 jmcneill #define LPI_SAVE_RESTORE_TRACE __BIT(1) 108 1.53 jmcneill #define LPI_SAVE_RESTORE_GICR __BIT(2) 109 1.53 jmcneill #define LPI_SAVE_RESTORE_GICD __BIT(3) 110 1.53 jmcneill uint32_t reg_addr; 111 1.53 jmcneill #define LPI_REG_ADDR_WFI 0xffffffff 112 1.53 jmcneill 113 1.53 jmcneill char *name; 114 1.53 jmcneill struct evcnt events; 115 1.53 jmcneill }; 116 1.53 jmcneill 117 1.1 matt struct cpu_info { 118 1.1 matt struct cpu_data ci_data; 119 1.1 matt device_t ci_dev; 120 1.1 matt cpuid_t ci_cpuid; 121 1.26 skrll 122 1.26 skrll /* 123 1.26 skrll * the following are in their own cache line, as they are stored to 124 1.26 skrll * regularly by remote CPUs; when they were mixed with other fields 125 1.26 skrll * we observed frequent cache misses. 126 1.26 skrll */ 127 1.26 skrll int ci_want_resched __aligned(COHERENCY_UNIT); 128 1.26 skrll /* XXX pending IPIs? */ 129 1.26 skrll 130 1.26 skrll /* 131 1.26 skrll * this is stored frequently, and is fetched by remote CPUs. 132 1.26 skrll */ 133 1.26 skrll struct lwp *ci_curlwp __aligned(COHERENCY_UNIT); 134 1.16 ad struct lwp *ci_onproc; 135 1.26 skrll 136 1.26 skrll /* 137 1.26 skrll * largely CPU-private. 138 1.26 skrll */ 139 1.26 skrll struct lwp *ci_softlwps[SOFTINT_COUNT] __aligned(COHERENCY_UNIT); 140 1.1 matt 141 1.1 matt uint64_t ci_lastintr; 142 1.1 matt 143 1.1 matt int ci_mtx_oldspl; 144 1.1 matt int ci_mtx_count; 145 1.1 matt 146 1.32 jmcneill int ci_cpl; /* current processor level (spl) */ 147 1.46 jmcneill volatile int ci_hwpl; /* current hardware priority */ 148 1.2 ryo volatile u_int ci_softints; 149 1.1 matt volatile u_int ci_intr_depth; 150 1.37 skrll volatile uint32_t ci_blocked_pics; 151 1.37 skrll volatile uint32_t ci_pending_pics; 152 1.37 skrll volatile uint32_t ci_pending_ipls; 153 1.2 ryo 154 1.23 riastrad int ci_kfpu_spl; 155 1.23 riastrad 156 1.48 skrll #if defined(PMAP_MI) 157 1.48 skrll struct pmap_tlb_info *ci_tlb_info; 158 1.48 skrll struct pmap *ci_pmap_lastuser; 159 1.48 skrll struct pmap *ci_pmap_cur; 160 1.48 skrll #endif 161 1.48 skrll 162 1.41 skrll /* ASID of current pmap */ 163 1.41 skrll tlb_asid_t ci_pmap_asid_cur; 164 1.40 skrll 165 1.2 ryo /* event counters */ 166 1.2 ryo struct evcnt ci_vfp_use; 167 1.2 ryo struct evcnt ci_vfp_reuse; 168 1.2 ryo struct evcnt ci_vfp_save; 169 1.2 ryo struct evcnt ci_vfp_release; 170 1.25 ryo struct evcnt ci_uct_trap; 171 1.29 jmcneill struct evcnt ci_intr_preempt; 172 1.51 riastrad struct evcnt ci_rndrrs_fail; 173 1.6 jmcneill 174 1.18 mrg /* FDT or similar supplied "cpu capacity" */ 175 1.18 mrg uint32_t ci_capacity_dmips_mhz; 176 1.18 mrg 177 1.6 jmcneill /* interrupt controller */ 178 1.6 jmcneill u_int ci_gic_redist; /* GICv3 redistributor index */ 179 1.6 jmcneill uint64_t ci_gic_sgir; /* GICv3 SGIR target */ 180 1.7 ryo 181 1.9 jmcneill /* ACPI */ 182 1.30 jmcneill uint32_t ci_acpiid; /* ACPI Processor Unique ID */ 183 1.9 jmcneill 184 1.53 jmcneill /* ACPI low power idle */ 185 1.53 jmcneill uint32_t ci_nlpi; 186 1.53 jmcneill struct aarch64_low_power_idle *ci_lpi; 187 1.53 jmcneill uint64_t ci_last_idle; 188 1.53 jmcneill 189 1.43 skrll /* cached system registers */ 190 1.43 skrll uint64_t ci_sctlr_el1; 191 1.43 skrll uint64_t ci_sctlr_el2; 192 1.43 skrll 193 1.42 skrll /* sysctl(9) exposed system registers */ 194 1.11 mrg struct aarch64_sysctl_cpu_id ci_id; 195 1.52 jmcneill #define ci_midr ci_id.ac_midr 196 1.7 ryo 197 1.42 skrll /* cache information and function pointers */ 198 1.44 skrll struct aarch64_cache_info ci_cacheinfo[MAX_CACHE_LEVEL]; 199 1.13 ryo struct aarch64_cpufuncs ci_cpufuncs; 200 1.7 ryo 201 1.38 ryo #if defined(GPROF) && defined(MULTIPROCESSOR) 202 1.38 ryo struct gmonparam *ci_gmon; /* MI per-cpu GPROF */ 203 1.38 ryo #endif 204 1.7 ryo } __aligned(COHERENCY_UNIT); 205 1.1 matt 206 1.22 christos #ifdef _KERNEL 207 1.45 ryo static inline __always_inline struct lwp * __attribute__ ((const)) 208 1.26 skrll aarch64_curlwp(void) 209 1.1 matt { 210 1.26 skrll struct lwp *l; 211 1.26 skrll __asm("mrs %0, tpidr_el1" : "=r"(l)); 212 1.26 skrll return l; 213 1.1 matt } 214 1.1 matt 215 1.26 skrll /* forward declaration; defined in sys/lwp.h. */ 216 1.26 skrll static __inline struct cpu_info *lwp_getcpu(struct lwp *); 217 1.26 skrll 218 1.26 skrll #define curcpu() (lwp_getcpu(aarch64_curlwp())) 219 1.26 skrll #define setsoftast(ci) (cpu_signotify((ci)->ci_onproc)) 220 1.26 skrll #undef curlwp 221 1.26 skrll #define curlwp (aarch64_curlwp()) 222 1.48 skrll #define curpcb ((struct pcb *)lwp_getpcb(curlwp)) 223 1.10 skrll 224 1.26 skrll void cpu_signotify(struct lwp *l); 225 1.21 skrll void cpu_need_proftick(struct lwp *l); 226 1.21 skrll 227 1.21 skrll void cpu_hatch(struct cpu_info *); 228 1.2 ryo 229 1.2 ryo extern struct cpu_info *cpu_info[]; 230 1.21 skrll extern struct cpu_info cpu_info_store[]; 231 1.1 matt 232 1.28 ryo #define CPU_INFO_ITERATOR int 233 1.20 riastrad #if defined(MULTIPROCESSOR) || defined(_MODULE) 234 1.2 ryo #define cpu_number() (curcpu()->ci_index) 235 1.2 ryo #define CPU_IS_PRIMARY(ci) ((ci)->ci_index == 0) 236 1.3 ryo #define CPU_INFO_FOREACH(cii, ci) \ 237 1.3 ryo cii = 0, ci = cpu_info[0]; \ 238 1.3 ryo cii < (ncpu ? ncpu : 1) && (ci = cpu_info[cii]) != NULL; \ 239 1.2 ryo cii++ 240 1.2 ryo #else /* MULTIPROCESSOR */ 241 1.2 ryo #define cpu_number() 0 242 1.2 ryo #define CPU_IS_PRIMARY(ci) true 243 1.3 ryo #define CPU_INFO_FOREACH(cii, ci) \ 244 1.2 ryo cii = 0, __USE(cii), ci = curcpu(); ci != NULL; ci = NULL 245 1.2 ryo #endif /* MULTIPROCESSOR */ 246 1.1 matt 247 1.26 skrll #define LWP0_CPU_INFO (&cpu_info_store[0]) 248 1.1 matt 249 1.33 jmcneill #define __HAVE_CPU_DOSOFTINTS_CI 250 1.33 jmcneill 251 1.1 matt static inline void 252 1.33 jmcneill cpu_dosoftints_ci(struct cpu_info *ci) 253 1.1 matt { 254 1.2 ryo #if defined(__HAVE_FAST_SOFTINTS) && !defined(__HAVE_PIC_FAST_SOFTINTS) 255 1.2 ryo void dosoftints(void); 256 1.2 ryo 257 1.33 jmcneill if (ci->ci_intr_depth == 0 && (ci->ci_softints >> ci->ci_cpl) > 0) { 258 1.2 ryo dosoftints(); 259 1.33 jmcneill } 260 1.33 jmcneill #endif 261 1.33 jmcneill } 262 1.33 jmcneill 263 1.33 jmcneill static inline void 264 1.33 jmcneill cpu_dosoftints(void) 265 1.33 jmcneill { 266 1.33 jmcneill #if defined(__HAVE_FAST_SOFTINTS) && !defined(__HAVE_PIC_FAST_SOFTINTS) 267 1.49 riastrad KDASSERT(kpreempt_disabled()); 268 1.33 jmcneill cpu_dosoftints_ci(curcpu()); 269 1.2 ryo #endif 270 1.1 matt } 271 1.1 matt 272 1.50 pho struct cpufeature_attach_args { 273 1.50 pho struct cpu_info *ci; 274 1.50 pho }; 275 1.33 jmcneill 276 1.22 christos #endif /* _KERNEL */ 277 1.22 christos 278 1.1 matt #endif /* _KERNEL || _KMEMUSER */ 279 1.1 matt 280 1.1 matt #endif 281 1.1 matt 282 1.1 matt #endif /* _AARCH64_CPU_H_ */ 283