cpu.h revision 1.13 1 1.13 skrll /* $NetBSD: cpu.h,v 1.13 2023/07/29 06:59:47 skrll Exp $ */
2 1.1 matt
3 1.1 matt /*-
4 1.1 matt * Copyright (c) 2014 The NetBSD Foundation, Inc.
5 1.1 matt * All rights reserved.
6 1.1 matt *
7 1.1 matt * This code is derived from software contributed to The NetBSD Foundation
8 1.1 matt * by Matt Thomas of 3am Software Foundry.
9 1.1 matt *
10 1.1 matt * Redistribution and use in source and binary forms, with or without
11 1.1 matt * modification, are permitted provided that the following conditions
12 1.1 matt * are met:
13 1.1 matt * 1. Redistributions of source code must retain the above copyright
14 1.1 matt * notice, this list of conditions and the following disclaimer.
15 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 matt * notice, this list of conditions and the following disclaimer in the
17 1.1 matt * documentation and/or other materials provided with the distribution.
18 1.1 matt *
19 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 matt * POSSIBILITY OF SUCH DAMAGE.
30 1.1 matt */
31 1.1 matt
32 1.1 matt #ifndef _RISCV_CPU_H_
33 1.1 matt #define _RISCV_CPU_H_
34 1.1 matt
35 1.1 matt #if defined(_KERNEL) || defined(_KMEMUSER)
36 1.4 matt
37 1.1 matt struct clockframe {
38 1.9 simonb vaddr_t cf_epc;
39 1.9 simonb register_t cf_status;
40 1.1 matt int cf_intr_depth;
41 1.1 matt };
42 1.1 matt
43 1.9 simonb #define CLKF_USERMODE(cf) (((cf)->cf_status & SR_SPP) == 0)
44 1.9 simonb #define CLKF_PC(cf) ((cf)->cf_epc)
45 1.11 skrll #define CLKF_INTR(cf) ((cf)->cf_intr_depth > 1)
46 1.1 matt
47 1.1 matt #include <sys/cpu_data.h>
48 1.1 matt #include <sys/device_if.h>
49 1.1 matt #include <sys/evcnt.h>
50 1.1 matt #include <sys/intr.h>
51 1.1 matt
52 1.1 matt struct cpu_info {
53 1.1 matt struct cpu_data ci_data;
54 1.1 matt device_t ci_dev;
55 1.1 matt cpuid_t ci_cpuid;
56 1.1 matt struct lwp *ci_curlwp;
57 1.7 ad struct lwp *ci_onproc; /* current user LWP / kthread */
58 1.1 matt struct lwp *ci_softlwps[SOFTINT_COUNT];
59 1.1 matt struct trapframe *ci_ddb_regs;
60 1.1 matt
61 1.1 matt uint64_t ci_lastintr;
62 1.10 skrll uint64_t ci_lastintr_scheduled;
63 1.10 skrll struct evcnt ci_ev_timer;
64 1.10 skrll struct evcnt ci_ev_timer_missed;
65 1.1 matt
66 1.12 skrll u_long ci_cpu_freq; /* CPU frequency */
67 1.1 matt int ci_mtx_oldspl;
68 1.1 matt int ci_mtx_count;
69 1.12 skrll int ci_cpl;
70 1.12 skrll volatile u_int ci_intr_depth;
71 1.1 matt
72 1.12 skrll int ci_want_resched __aligned(COHERENCY_UNIT);
73 1.1 matt u_int ci_softints;
74 1.1 matt
75 1.1 matt tlb_asid_t ci_pmap_asid_cur;
76 1.5 maxv
77 1.5 maxv union pmap_segtab *ci_pmap_user_segtab;
78 1.1 matt #ifdef _LP64
79 1.5 maxv union pmap_segtab *ci_pmap_user_seg0tab;
80 1.1 matt #endif
81 1.1 matt
82 1.1 matt struct evcnt ci_ev_fpu_saves;
83 1.1 matt struct evcnt ci_ev_fpu_loads;
84 1.1 matt struct evcnt ci_ev_fpu_reenables;
85 1.12 skrll
86 1.12 skrll struct pmap_tlb_info *ci_tlb_info;
87 1.12 skrll
88 1.12 skrll #ifdef MULTIPROCESSOR
89 1.13 skrll volatile u_long ci_flags;
90 1.13 skrll #define CPUF_PRIMARY __BIT(0) /* CPU is primary CPU */
91 1.13 skrll #define CPUF_PRESENT __BIT(1) /* CPU is present */
92 1.13 skrll #define CPUF_RUNNING __BIT(2) /* CPU is running */
93 1.13 skrll #define CPUF_PAUSED __BIT(3) /* CPU is paused */
94 1.13 skrll #define CPUF_USERPMAP __BIT(4) /* CPU has a user pmap activated */
95 1.12 skrll
96 1.12 skrll volatile u_long ci_request_ipis;
97 1.13 skrll /* bitmask of IPIs requested */
98 1.13 skrll u_long ci_active_ipis; /* bitmask of IPIs being serviced */
99 1.12 skrll
100 1.13 skrll struct evcnt ci_evcnt_all_ipis; /* aggregated IPI counter */
101 1.12 skrll struct evcnt ci_evcnt_per_ipi[NIPIS]; /* individual IPI counters */
102 1.12 skrll struct evcnt ci_evcnt_synci_onproc_rqst;
103 1.12 skrll struct evcnt ci_evcnt_synci_deferred_rqst;
104 1.12 skrll struct evcnt ci_evcnt_synci_ipi_rqst;
105 1.12 skrll
106 1.12 skrll kcpuset_t *ci_shootdowncpus;
107 1.12 skrll kcpuset_t *ci_multicastcpus;
108 1.12 skrll kcpuset_t *ci_watchcpus;
109 1.12 skrll kcpuset_t *ci_ddbcpus;
110 1.12 skrll #endif
111 1.12 skrll
112 1.8 ryo #if defined(GPROF) && defined(MULTIPROCESSOR)
113 1.8 ryo struct gmonparam *ci_gmon; /* MI per-cpu GPROF */
114 1.8 ryo #endif
115 1.1 matt };
116 1.1 matt
117 1.4 matt #endif /* _KERNEL || _KMEMUSER */
118 1.4 matt
119 1.4 matt #ifdef _KERNEL
120 1.4 matt
121 1.12 skrll extern struct cpu_info *cpu_info[];
122 1.10 skrll extern struct cpu_info cpu_info_store[];
123 1.1 matt
124 1.12 skrll
125 1.12 skrll #ifdef MULTIPROCESSOR
126 1.12 skrll extern u_int riscv_cpu_max;
127 1.12 skrll extern cpuid_t cpu_hartid[];
128 1.12 skrll
129 1.12 skrll void cpu_hatch(struct cpu_info *);
130 1.12 skrll
131 1.12 skrll void cpu_init_secondary_processor(int);
132 1.12 skrll void cpu_boot_secondary_processors(void);
133 1.12 skrll void cpu_mpstart(void);
134 1.12 skrll bool cpu_hatched_p(u_int);
135 1.12 skrll
136 1.12 skrll void cpu_clr_mbox(int);
137 1.12 skrll void cpu_set_hatched(int);
138 1.12 skrll
139 1.12 skrll
140 1.12 skrll void cpu_halt(void);
141 1.12 skrll void cpu_halt_others(void);
142 1.12 skrll bool cpu_is_paused(cpuid_t);
143 1.12 skrll void cpu_pause(void);
144 1.12 skrll void cpu_pause_others(void);
145 1.12 skrll void cpu_resume(cpuid_t);
146 1.12 skrll void cpu_resume_others(void);
147 1.12 skrll void cpu_debug_dump(void);
148 1.12 skrll
149 1.12 skrll extern kcpuset_t *cpus_running;
150 1.12 skrll extern kcpuset_t *cpus_hatched;
151 1.12 skrll extern kcpuset_t *cpus_paused;
152 1.12 skrll extern kcpuset_t *cpus_resumed;
153 1.12 skrll extern kcpuset_t *cpus_halted;
154 1.12 skrll
155 1.12 skrll /*
156 1.12 skrll * definitions of cpu-dependent requirements
157 1.12 skrll * referenced in generic code
158 1.12 skrll */
159 1.12 skrll
160 1.12 skrll /*
161 1.12 skrll * Send an inter-processor interrupt to each other CPU (excludes curcpu())
162 1.12 skrll */
163 1.12 skrll void cpu_broadcast_ipi(int);
164 1.12 skrll
165 1.12 skrll /*
166 1.12 skrll * Send an inter-processor interrupt to CPUs in kcpuset (excludes curcpu())
167 1.12 skrll */
168 1.12 skrll void cpu_multicast_ipi(const kcpuset_t *, int);
169 1.12 skrll
170 1.12 skrll /*
171 1.12 skrll * Send an inter-processor interrupt to another CPU.
172 1.12 skrll */
173 1.12 skrll int cpu_send_ipi(struct cpu_info *, int);
174 1.12 skrll
175 1.12 skrll #endif
176 1.12 skrll
177 1.3 matt struct lwp;
178 1.3 matt static inline struct cpu_info *lwp_getcpu(struct lwp *);
179 1.3 matt
180 1.2 matt register struct lwp *riscv_curlwp __asm("tp");
181 1.1 matt #define curlwp riscv_curlwp
182 1.3 matt #define curcpu() lwp_getcpu(curlwp)
183 1.10 skrll #define curpcb ((struct pcb *)lwp_getpcb(curlwp))
184 1.1 matt
185 1.1 matt static inline cpuid_t
186 1.1 matt cpu_number(void)
187 1.1 matt {
188 1.1 matt #ifdef MULTIPROCESSOR
189 1.1 matt return curcpu()->ci_cpuid;
190 1.1 matt #else
191 1.1 matt return 0;
192 1.1 matt #endif
193 1.1 matt }
194 1.1 matt
195 1.1 matt void cpu_proc_fork(struct proc *, struct proc *);
196 1.1 matt void cpu_signotify(struct lwp *);
197 1.1 matt void cpu_need_proftick(struct lwp *l);
198 1.1 matt void cpu_boot_secondary_processors(void);
199 1.1 matt
200 1.1 matt #define CPU_INFO_ITERATOR cpuid_t
201 1.1 matt #ifdef MULTIPROCESSOR
202 1.12 skrll #define CPU_IS_PRIMARY(ci) ((ci)->ci_flags & CPUF_PRIMARY)
203 1.12 skrll #define CPU_INFO_FOREACH(cii, ci) \
204 1.12 skrll cii = 0, ci = &cpu_info_store[0]; \
205 1.12 skrll ci != NULL; \
206 1.12 skrll cii++, ncpu ? (ci = cpu_infos[cii]) \
207 1.12 skrll : (ci = NULL)
208 1.1 matt #else
209 1.12 skrll #define CPU_IS_PRIMARY(ci) true
210 1.1 matt #define CPU_INFO_FOREACH(cii, ci) \
211 1.1 matt (cii) = 0, (ci) = curcpu(); (cii) == 0; (cii)++
212 1.1 matt #endif
213 1.1 matt
214 1.1 matt #define CPU_INFO_CURPMAP(ci) (curlwp->l_proc->p_vmspace->vm_map.pmap)
215 1.1 matt
216 1.1 matt static inline void
217 1.1 matt cpu_dosoftints(void)
218 1.1 matt {
219 1.1 matt extern void dosoftints(void);
220 1.1 matt struct cpu_info * const ci = curcpu();
221 1.1 matt if (ci->ci_intr_depth == 0
222 1.1 matt && (ci->ci_data.cpu_softints >> ci->ci_cpl) > 0)
223 1.1 matt dosoftints();
224 1.1 matt }
225 1.1 matt
226 1.1 matt static inline bool
227 1.1 matt cpu_intr_p(void)
228 1.1 matt {
229 1.1 matt return curcpu()->ci_intr_depth > 0;
230 1.1 matt }
231 1.1 matt
232 1.1 matt #define LWP_PC(l) cpu_lwp_pc(l)
233 1.1 matt
234 1.1 matt vaddr_t cpu_lwp_pc(struct lwp *);
235 1.1 matt
236 1.1 matt static inline void
237 1.1 matt cpu_idle(void)
238 1.1 matt {
239 1.10 skrll asm volatile("wfi" ::: "memory");
240 1.1 matt }
241 1.1 matt
242 1.4 matt #endif /* _KERNEL */
243 1.1 matt
244 1.1 matt #endif /* _RISCV_CPU_H_ */
245