1 1.16 skrll /* $NetBSD: cpu.h,v 1.16 2024/08/04 08:16:25 skrll Exp $ */ 2 1.1 matt 3 1.1 matt /*- 4 1.1 matt * Copyright (c) 2014 The NetBSD Foundation, Inc. 5 1.1 matt * All rights reserved. 6 1.1 matt * 7 1.1 matt * This code is derived from software contributed to The NetBSD Foundation 8 1.1 matt * by Matt Thomas of 3am Software Foundry. 9 1.1 matt * 10 1.1 matt * Redistribution and use in source and binary forms, with or without 11 1.1 matt * modification, are permitted provided that the following conditions 12 1.1 matt * are met: 13 1.1 matt * 1. Redistributions of source code must retain the above copyright 14 1.1 matt * notice, this list of conditions and the following disclaimer. 15 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright 16 1.1 matt * notice, this list of conditions and the following disclaimer in the 17 1.1 matt * documentation and/or other materials provided with the distribution. 18 1.1 matt * 19 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 1.1 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 1.1 matt * POSSIBILITY OF SUCH DAMAGE. 30 1.1 matt */ 31 1.1 matt 32 1.1 matt #ifndef _RISCV_CPU_H_ 33 1.1 matt #define _RISCV_CPU_H_ 34 1.1 matt 35 1.1 matt #if defined(_KERNEL) || defined(_KMEMUSER) 36 1.4 matt 37 1.1 matt struct clockframe { 38 1.9 simonb vaddr_t cf_epc; 39 1.9 simonb register_t cf_status; 40 1.1 matt int cf_intr_depth; 41 1.1 matt }; 42 1.1 matt 43 1.9 simonb #define CLKF_USERMODE(cf) (((cf)->cf_status & SR_SPP) == 0) 44 1.9 simonb #define CLKF_PC(cf) ((cf)->cf_epc) 45 1.11 skrll #define CLKF_INTR(cf) ((cf)->cf_intr_depth > 1) 46 1.1 matt 47 1.1 matt #include <sys/cpu_data.h> 48 1.1 matt #include <sys/device_if.h> 49 1.1 matt #include <sys/evcnt.h> 50 1.1 matt #include <sys/intr.h> 51 1.1 matt 52 1.1 matt struct cpu_info { 53 1.1 matt struct cpu_data ci_data; 54 1.1 matt device_t ci_dev; 55 1.1 matt cpuid_t ci_cpuid; 56 1.1 matt struct lwp *ci_curlwp; 57 1.7 ad struct lwp *ci_onproc; /* current user LWP / kthread */ 58 1.1 matt struct lwp *ci_softlwps[SOFTINT_COUNT]; 59 1.1 matt struct trapframe *ci_ddb_regs; 60 1.1 matt 61 1.1 matt uint64_t ci_lastintr; 62 1.10 skrll uint64_t ci_lastintr_scheduled; 63 1.10 skrll struct evcnt ci_ev_timer; 64 1.10 skrll struct evcnt ci_ev_timer_missed; 65 1.1 matt 66 1.12 skrll u_long ci_cpu_freq; /* CPU frequency */ 67 1.1 matt int ci_mtx_oldspl; 68 1.1 matt int ci_mtx_count; 69 1.12 skrll int ci_cpl; 70 1.12 skrll volatile u_int ci_intr_depth; 71 1.1 matt 72 1.12 skrll int ci_want_resched __aligned(COHERENCY_UNIT); 73 1.1 matt u_int ci_softints; 74 1.1 matt 75 1.1 matt tlb_asid_t ci_pmap_asid_cur; 76 1.5 maxv 77 1.5 maxv union pmap_segtab *ci_pmap_user_segtab; 78 1.1 matt #ifdef _LP64 79 1.5 maxv union pmap_segtab *ci_pmap_user_seg0tab; 80 1.1 matt #endif 81 1.1 matt 82 1.1 matt struct evcnt ci_ev_fpu_saves; 83 1.1 matt struct evcnt ci_ev_fpu_loads; 84 1.1 matt struct evcnt ci_ev_fpu_reenables; 85 1.12 skrll 86 1.12 skrll struct pmap_tlb_info *ci_tlb_info; 87 1.12 skrll 88 1.12 skrll #ifdef MULTIPROCESSOR 89 1.13 skrll volatile u_long ci_flags; 90 1.13 skrll #define CPUF_PRIMARY __BIT(0) /* CPU is primary CPU */ 91 1.13 skrll #define CPUF_PRESENT __BIT(1) /* CPU is present */ 92 1.13 skrll #define CPUF_RUNNING __BIT(2) /* CPU is running */ 93 1.13 skrll #define CPUF_PAUSED __BIT(3) /* CPU is paused */ 94 1.12 skrll 95 1.15 skrll void *ci_intcsoftc; 96 1.12 skrll volatile u_long ci_request_ipis; 97 1.13 skrll /* bitmask of IPIs requested */ 98 1.13 skrll u_long ci_active_ipis; /* bitmask of IPIs being serviced */ 99 1.12 skrll 100 1.13 skrll struct evcnt ci_evcnt_all_ipis; /* aggregated IPI counter */ 101 1.12 skrll struct evcnt ci_evcnt_per_ipi[NIPIS]; /* individual IPI counters */ 102 1.12 skrll struct evcnt ci_evcnt_synci_onproc_rqst; 103 1.12 skrll struct evcnt ci_evcnt_synci_deferred_rqst; 104 1.12 skrll struct evcnt ci_evcnt_synci_ipi_rqst; 105 1.12 skrll 106 1.12 skrll kcpuset_t *ci_shootdowncpus; 107 1.12 skrll kcpuset_t *ci_multicastcpus; 108 1.12 skrll kcpuset_t *ci_watchcpus; 109 1.12 skrll kcpuset_t *ci_ddbcpus; 110 1.12 skrll #endif 111 1.12 skrll 112 1.8 ryo #if defined(GPROF) && defined(MULTIPROCESSOR) 113 1.8 ryo struct gmonparam *ci_gmon; /* MI per-cpu GPROF */ 114 1.8 ryo #endif 115 1.1 matt }; 116 1.1 matt 117 1.4 matt #endif /* _KERNEL || _KMEMUSER */ 118 1.4 matt 119 1.4 matt #ifdef _KERNEL 120 1.4 matt 121 1.10 skrll extern struct cpu_info cpu_info_store[]; 122 1.14 skrll extern cpuid_t cpu_bphartid; 123 1.14 skrll extern u_int cpu_hartindex[]; 124 1.12 skrll 125 1.12 skrll #ifdef MULTIPROCESSOR 126 1.12 skrll 127 1.14 skrll void cpu_hatch(struct cpu_info *, unsigned long); 128 1.12 skrll 129 1.14 skrll void cpu_init_secondary_processor(u_int); 130 1.12 skrll void cpu_boot_secondary_processors(void); 131 1.12 skrll void cpu_mpstart(void); 132 1.12 skrll bool cpu_hatched_p(u_int); 133 1.12 skrll 134 1.14 skrll void cpu_clr_mbox(u_int); 135 1.14 skrll void cpu_set_hatched(u_int); 136 1.12 skrll 137 1.12 skrll 138 1.12 skrll void cpu_halt(void); 139 1.12 skrll void cpu_halt_others(void); 140 1.12 skrll bool cpu_is_paused(cpuid_t); 141 1.12 skrll void cpu_pause(void); 142 1.12 skrll void cpu_pause_others(void); 143 1.12 skrll void cpu_resume(cpuid_t); 144 1.12 skrll void cpu_resume_others(void); 145 1.12 skrll void cpu_debug_dump(void); 146 1.12 skrll 147 1.12 skrll extern kcpuset_t *cpus_running; 148 1.12 skrll extern kcpuset_t *cpus_hatched; 149 1.12 skrll extern kcpuset_t *cpus_paused; 150 1.12 skrll extern kcpuset_t *cpus_resumed; 151 1.12 skrll extern kcpuset_t *cpus_halted; 152 1.12 skrll 153 1.12 skrll /* 154 1.12 skrll * definitions of cpu-dependent requirements 155 1.12 skrll * referenced in generic code 156 1.12 skrll */ 157 1.12 skrll 158 1.12 skrll /* 159 1.12 skrll * Send an inter-processor interrupt to each other CPU (excludes curcpu()) 160 1.12 skrll */ 161 1.12 skrll void cpu_broadcast_ipi(int); 162 1.12 skrll 163 1.12 skrll /* 164 1.12 skrll * Send an inter-processor interrupt to CPUs in kcpuset (excludes curcpu()) 165 1.12 skrll */ 166 1.12 skrll void cpu_multicast_ipi(const kcpuset_t *, int); 167 1.12 skrll 168 1.12 skrll /* 169 1.12 skrll * Send an inter-processor interrupt to another CPU. 170 1.12 skrll */ 171 1.12 skrll int cpu_send_ipi(struct cpu_info *, int); 172 1.12 skrll 173 1.12 skrll #endif 174 1.12 skrll 175 1.3 matt struct lwp; 176 1.3 matt static inline struct cpu_info *lwp_getcpu(struct lwp *); 177 1.3 matt 178 1.2 matt register struct lwp *riscv_curlwp __asm("tp"); 179 1.1 matt #define curlwp riscv_curlwp 180 1.3 matt #define curcpu() lwp_getcpu(curlwp) 181 1.10 skrll #define curpcb ((struct pcb *)lwp_getpcb(curlwp)) 182 1.1 matt 183 1.1 matt static inline cpuid_t 184 1.1 matt cpu_number(void) 185 1.1 matt { 186 1.1 matt #ifdef MULTIPROCESSOR 187 1.1 matt return curcpu()->ci_cpuid; 188 1.1 matt #else 189 1.1 matt return 0; 190 1.1 matt #endif 191 1.1 matt } 192 1.1 matt 193 1.1 matt void cpu_proc_fork(struct proc *, struct proc *); 194 1.1 matt void cpu_signotify(struct lwp *); 195 1.1 matt void cpu_need_proftick(struct lwp *l); 196 1.1 matt void cpu_boot_secondary_processors(void); 197 1.1 matt 198 1.1 matt #define CPU_INFO_ITERATOR cpuid_t 199 1.1 matt #ifdef MULTIPROCESSOR 200 1.12 skrll #define CPU_IS_PRIMARY(ci) ((ci)->ci_flags & CPUF_PRIMARY) 201 1.12 skrll #define CPU_INFO_FOREACH(cii, ci) \ 202 1.14 skrll cii = 0, ci = &cpu_info_store[0]; \ 203 1.14 skrll ci != NULL; \ 204 1.14 skrll cii++, ncpu ? (ci = cpu_infos[cii]) \ 205 1.12 skrll : (ci = NULL) 206 1.1 matt #else 207 1.12 skrll #define CPU_IS_PRIMARY(ci) true 208 1.1 matt #define CPU_INFO_FOREACH(cii, ci) \ 209 1.1 matt (cii) = 0, (ci) = curcpu(); (cii) == 0; (cii)++ 210 1.1 matt #endif 211 1.1 matt 212 1.1 matt #define CPU_INFO_CURPMAP(ci) (curlwp->l_proc->p_vmspace->vm_map.pmap) 213 1.1 matt 214 1.1 matt static inline void 215 1.1 matt cpu_dosoftints(void) 216 1.1 matt { 217 1.1 matt extern void dosoftints(void); 218 1.16 skrll struct cpu_info * const ci = curcpu(); 219 1.16 skrll if (ci->ci_intr_depth == 0 220 1.1 matt && (ci->ci_data.cpu_softints >> ci->ci_cpl) > 0) 221 1.16 skrll dosoftints(); 222 1.1 matt } 223 1.1 matt 224 1.1 matt static inline bool 225 1.1 matt cpu_intr_p(void) 226 1.1 matt { 227 1.1 matt return curcpu()->ci_intr_depth > 0; 228 1.1 matt } 229 1.1 matt 230 1.1 matt #define LWP_PC(l) cpu_lwp_pc(l) 231 1.1 matt 232 1.1 matt vaddr_t cpu_lwp_pc(struct lwp *); 233 1.1 matt 234 1.1 matt static inline void 235 1.1 matt cpu_idle(void) 236 1.1 matt { 237 1.10 skrll asm volatile("wfi" ::: "memory"); 238 1.1 matt } 239 1.1 matt 240 1.4 matt #endif /* _KERNEL */ 241 1.1 matt 242 1.1 matt #endif /* _RISCV_CPU_H_ */ 243