cpu.h revision 1.14 1 1.14 skrll /* $NetBSD: cpu.h,v 1.14 2023/09/03 08:48:20 skrll Exp $ */
2 1.1 matt
3 1.1 matt /*-
4 1.1 matt * Copyright (c) 2014 The NetBSD Foundation, Inc.
5 1.1 matt * All rights reserved.
6 1.1 matt *
7 1.1 matt * This code is derived from software contributed to The NetBSD Foundation
8 1.1 matt * by Matt Thomas of 3am Software Foundry.
9 1.1 matt *
10 1.1 matt * Redistribution and use in source and binary forms, with or without
11 1.1 matt * modification, are permitted provided that the following conditions
12 1.1 matt * are met:
13 1.1 matt * 1. Redistributions of source code must retain the above copyright
14 1.1 matt * notice, this list of conditions and the following disclaimer.
15 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 matt * notice, this list of conditions and the following disclaimer in the
17 1.1 matt * documentation and/or other materials provided with the distribution.
18 1.1 matt *
19 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 matt * POSSIBILITY OF SUCH DAMAGE.
30 1.1 matt */
31 1.1 matt
32 1.1 matt #ifndef _RISCV_CPU_H_
33 1.1 matt #define _RISCV_CPU_H_
34 1.1 matt
35 1.1 matt #if defined(_KERNEL) || defined(_KMEMUSER)
36 1.4 matt
37 1.1 matt struct clockframe {
38 1.9 simonb vaddr_t cf_epc;
39 1.9 simonb register_t cf_status;
40 1.1 matt int cf_intr_depth;
41 1.1 matt };
42 1.1 matt
43 1.9 simonb #define CLKF_USERMODE(cf) (((cf)->cf_status & SR_SPP) == 0)
44 1.9 simonb #define CLKF_PC(cf) ((cf)->cf_epc)
45 1.11 skrll #define CLKF_INTR(cf) ((cf)->cf_intr_depth > 1)
46 1.1 matt
47 1.1 matt #include <sys/cpu_data.h>
48 1.1 matt #include <sys/device_if.h>
49 1.1 matt #include <sys/evcnt.h>
50 1.1 matt #include <sys/intr.h>
51 1.1 matt
52 1.1 matt struct cpu_info {
53 1.1 matt struct cpu_data ci_data;
54 1.1 matt device_t ci_dev;
55 1.1 matt cpuid_t ci_cpuid;
56 1.1 matt struct lwp *ci_curlwp;
57 1.7 ad struct lwp *ci_onproc; /* current user LWP / kthread */
58 1.1 matt struct lwp *ci_softlwps[SOFTINT_COUNT];
59 1.1 matt struct trapframe *ci_ddb_regs;
60 1.1 matt
61 1.1 matt uint64_t ci_lastintr;
62 1.10 skrll uint64_t ci_lastintr_scheduled;
63 1.10 skrll struct evcnt ci_ev_timer;
64 1.10 skrll struct evcnt ci_ev_timer_missed;
65 1.1 matt
66 1.12 skrll u_long ci_cpu_freq; /* CPU frequency */
67 1.1 matt int ci_mtx_oldspl;
68 1.1 matt int ci_mtx_count;
69 1.12 skrll int ci_cpl;
70 1.12 skrll volatile u_int ci_intr_depth;
71 1.1 matt
72 1.12 skrll int ci_want_resched __aligned(COHERENCY_UNIT);
73 1.1 matt u_int ci_softints;
74 1.1 matt
75 1.1 matt tlb_asid_t ci_pmap_asid_cur;
76 1.5 maxv
77 1.5 maxv union pmap_segtab *ci_pmap_user_segtab;
78 1.1 matt #ifdef _LP64
79 1.5 maxv union pmap_segtab *ci_pmap_user_seg0tab;
80 1.1 matt #endif
81 1.1 matt
82 1.1 matt struct evcnt ci_ev_fpu_saves;
83 1.1 matt struct evcnt ci_ev_fpu_loads;
84 1.1 matt struct evcnt ci_ev_fpu_reenables;
85 1.12 skrll
86 1.12 skrll struct pmap_tlb_info *ci_tlb_info;
87 1.12 skrll
88 1.12 skrll #ifdef MULTIPROCESSOR
89 1.13 skrll volatile u_long ci_flags;
90 1.13 skrll #define CPUF_PRIMARY __BIT(0) /* CPU is primary CPU */
91 1.13 skrll #define CPUF_PRESENT __BIT(1) /* CPU is present */
92 1.13 skrll #define CPUF_RUNNING __BIT(2) /* CPU is running */
93 1.13 skrll #define CPUF_PAUSED __BIT(3) /* CPU is paused */
94 1.12 skrll
95 1.12 skrll volatile u_long ci_request_ipis;
96 1.13 skrll /* bitmask of IPIs requested */
97 1.13 skrll u_long ci_active_ipis; /* bitmask of IPIs being serviced */
98 1.12 skrll
99 1.13 skrll struct evcnt ci_evcnt_all_ipis; /* aggregated IPI counter */
100 1.12 skrll struct evcnt ci_evcnt_per_ipi[NIPIS]; /* individual IPI counters */
101 1.12 skrll struct evcnt ci_evcnt_synci_onproc_rqst;
102 1.12 skrll struct evcnt ci_evcnt_synci_deferred_rqst;
103 1.12 skrll struct evcnt ci_evcnt_synci_ipi_rqst;
104 1.12 skrll
105 1.12 skrll kcpuset_t *ci_shootdowncpus;
106 1.12 skrll kcpuset_t *ci_multicastcpus;
107 1.12 skrll kcpuset_t *ci_watchcpus;
108 1.12 skrll kcpuset_t *ci_ddbcpus;
109 1.12 skrll #endif
110 1.12 skrll
111 1.8 ryo #if defined(GPROF) && defined(MULTIPROCESSOR)
112 1.8 ryo struct gmonparam *ci_gmon; /* MI per-cpu GPROF */
113 1.8 ryo #endif
114 1.1 matt };
115 1.1 matt
116 1.4 matt #endif /* _KERNEL || _KMEMUSER */
117 1.4 matt
118 1.4 matt #ifdef _KERNEL
119 1.4 matt
120 1.10 skrll extern struct cpu_info cpu_info_store[];
121 1.14 skrll extern cpuid_t cpu_bphartid;
122 1.14 skrll extern u_int cpu_hartindex[];
123 1.12 skrll
124 1.12 skrll #ifdef MULTIPROCESSOR
125 1.12 skrll
126 1.14 skrll void cpu_hatch(struct cpu_info *, unsigned long);
127 1.12 skrll
128 1.14 skrll void cpu_init_secondary_processor(u_int);
129 1.12 skrll void cpu_boot_secondary_processors(void);
130 1.12 skrll void cpu_mpstart(void);
131 1.12 skrll bool cpu_hatched_p(u_int);
132 1.12 skrll
133 1.14 skrll void cpu_clr_mbox(u_int);
134 1.14 skrll void cpu_set_hatched(u_int);
135 1.12 skrll
136 1.12 skrll
137 1.12 skrll void cpu_halt(void);
138 1.12 skrll void cpu_halt_others(void);
139 1.12 skrll bool cpu_is_paused(cpuid_t);
140 1.12 skrll void cpu_pause(void);
141 1.12 skrll void cpu_pause_others(void);
142 1.12 skrll void cpu_resume(cpuid_t);
143 1.12 skrll void cpu_resume_others(void);
144 1.12 skrll void cpu_debug_dump(void);
145 1.12 skrll
146 1.12 skrll extern kcpuset_t *cpus_running;
147 1.12 skrll extern kcpuset_t *cpus_hatched;
148 1.12 skrll extern kcpuset_t *cpus_paused;
149 1.12 skrll extern kcpuset_t *cpus_resumed;
150 1.12 skrll extern kcpuset_t *cpus_halted;
151 1.12 skrll
152 1.12 skrll /*
153 1.12 skrll * definitions of cpu-dependent requirements
154 1.12 skrll * referenced in generic code
155 1.12 skrll */
156 1.12 skrll
157 1.12 skrll /*
158 1.12 skrll * Send an inter-processor interrupt to each other CPU (excludes curcpu())
159 1.12 skrll */
160 1.12 skrll void cpu_broadcast_ipi(int);
161 1.12 skrll
162 1.12 skrll /*
163 1.12 skrll * Send an inter-processor interrupt to CPUs in kcpuset (excludes curcpu())
164 1.12 skrll */
165 1.12 skrll void cpu_multicast_ipi(const kcpuset_t *, int);
166 1.12 skrll
167 1.12 skrll /*
168 1.12 skrll * Send an inter-processor interrupt to another CPU.
169 1.12 skrll */
170 1.12 skrll int cpu_send_ipi(struct cpu_info *, int);
171 1.12 skrll
172 1.12 skrll #endif
173 1.12 skrll
174 1.3 matt struct lwp;
175 1.3 matt static inline struct cpu_info *lwp_getcpu(struct lwp *);
176 1.3 matt
177 1.2 matt register struct lwp *riscv_curlwp __asm("tp");
178 1.1 matt #define curlwp riscv_curlwp
179 1.3 matt #define curcpu() lwp_getcpu(curlwp)
180 1.10 skrll #define curpcb ((struct pcb *)lwp_getpcb(curlwp))
181 1.1 matt
182 1.1 matt static inline cpuid_t
183 1.1 matt cpu_number(void)
184 1.1 matt {
185 1.1 matt #ifdef MULTIPROCESSOR
186 1.1 matt return curcpu()->ci_cpuid;
187 1.1 matt #else
188 1.1 matt return 0;
189 1.1 matt #endif
190 1.1 matt }
191 1.1 matt
192 1.1 matt void cpu_proc_fork(struct proc *, struct proc *);
193 1.1 matt void cpu_signotify(struct lwp *);
194 1.1 matt void cpu_need_proftick(struct lwp *l);
195 1.1 matt void cpu_boot_secondary_processors(void);
196 1.1 matt
197 1.1 matt #define CPU_INFO_ITERATOR cpuid_t
198 1.1 matt #ifdef MULTIPROCESSOR
199 1.12 skrll #define CPU_IS_PRIMARY(ci) ((ci)->ci_flags & CPUF_PRIMARY)
200 1.12 skrll #define CPU_INFO_FOREACH(cii, ci) \
201 1.14 skrll cii = 0, ci = &cpu_info_store[0]; \
202 1.14 skrll ci != NULL; \
203 1.14 skrll cii++, ncpu ? (ci = cpu_infos[cii]) \
204 1.12 skrll : (ci = NULL)
205 1.1 matt #else
206 1.12 skrll #define CPU_IS_PRIMARY(ci) true
207 1.1 matt #define CPU_INFO_FOREACH(cii, ci) \
208 1.1 matt (cii) = 0, (ci) = curcpu(); (cii) == 0; (cii)++
209 1.1 matt #endif
210 1.1 matt
211 1.1 matt #define CPU_INFO_CURPMAP(ci) (curlwp->l_proc->p_vmspace->vm_map.pmap)
212 1.1 matt
213 1.1 matt static inline void
214 1.1 matt cpu_dosoftints(void)
215 1.1 matt {
216 1.1 matt extern void dosoftints(void);
217 1.1 matt struct cpu_info * const ci = curcpu();
218 1.1 matt if (ci->ci_intr_depth == 0
219 1.1 matt && (ci->ci_data.cpu_softints >> ci->ci_cpl) > 0)
220 1.1 matt dosoftints();
221 1.1 matt }
222 1.1 matt
223 1.1 matt static inline bool
224 1.1 matt cpu_intr_p(void)
225 1.1 matt {
226 1.1 matt return curcpu()->ci_intr_depth > 0;
227 1.1 matt }
228 1.1 matt
229 1.1 matt #define LWP_PC(l) cpu_lwp_pc(l)
230 1.1 matt
231 1.1 matt vaddr_t cpu_lwp_pc(struct lwp *);
232 1.1 matt
233 1.1 matt static inline void
234 1.1 matt cpu_idle(void)
235 1.1 matt {
236 1.10 skrll asm volatile("wfi" ::: "memory");
237 1.1 matt }
238 1.1 matt
239 1.4 matt #endif /* _KERNEL */
240 1.1 matt
241 1.1 matt #endif /* _RISCV_CPU_H_ */
242