cpu.h revision 1.118 1 1.118 matt /* $NetBSD: cpu.h,v 1.118 2016/07/11 16:15:35 matt Exp $ */
2 1.8 cgd
3 1.1 deraadt /*-
4 1.5 glass * Copyright (c) 1992, 1993
5 1.5 glass * The Regents of the University of California. All rights reserved.
6 1.1 deraadt *
7 1.1 deraadt * This code is derived from software contributed to Berkeley by
8 1.1 deraadt * Ralph Campbell and Rick Macklem.
9 1.1 deraadt *
10 1.1 deraadt * Redistribution and use in source and binary forms, with or without
11 1.1 deraadt * modification, are permitted provided that the following conditions
12 1.1 deraadt * are met:
13 1.1 deraadt * 1. Redistributions of source code must retain the above copyright
14 1.1 deraadt * notice, this list of conditions and the following disclaimer.
15 1.1 deraadt * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 deraadt * notice, this list of conditions and the following disclaimer in the
17 1.1 deraadt * documentation and/or other materials provided with the distribution.
18 1.71 agc * 3. Neither the name of the University nor the names of its contributors
19 1.1 deraadt * may be used to endorse or promote products derived from this software
20 1.1 deraadt * without specific prior written permission.
21 1.1 deraadt *
22 1.1 deraadt * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 1.1 deraadt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 1.1 deraadt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 1.1 deraadt * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 1.1 deraadt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 1.1 deraadt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 1.1 deraadt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 1.1 deraadt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 1.1 deraadt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 1.1 deraadt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 1.1 deraadt * SUCH DAMAGE.
33 1.1 deraadt *
34 1.8 cgd * @(#)cpu.h 8.4 (Berkeley) 1/4/94
35 1.1 deraadt */
36 1.1 deraadt
37 1.1 deraadt #ifndef _CPU_H_
38 1.1 deraadt #define _CPU_H_
39 1.1 deraadt
40 1.118 matt //#include <mips/cpuregs.h>
41 1.53 simonb
42 1.1 deraadt /*
43 1.13 jonathan * Exported definitions unique to NetBSD/mips cpu support.
44 1.1 deraadt */
45 1.36 soren
46 1.118 matt #ifdef _LOCORE
47 1.118 matt #error Use assym.h to get definitions from <mips/cpu.h>
48 1.118 matt #endif
49 1.118 matt
50 1.68 simonb #ifdef _KERNEL
51 1.55 simonb
52 1.53 simonb #if defined(_KERNEL_OPT)
53 1.98 matt #include "opt_cputype.h"
54 1.53 simonb #include "opt_lockdebug.h"
55 1.98 matt #include "opt_multiprocessor.h"
56 1.53 simonb #endif
57 1.53 simonb
58 1.98 matt #include <sys/cpu_data.h>
59 1.98 matt #include <sys/device_if.h>
60 1.98 matt #include <sys/evcnt.h>
61 1.116 matt #include <sys/kcpuset.h>
62 1.94 matt
63 1.101 cliff typedef struct cpu_watchpoint {
64 1.101 cliff register_t cw_addr;
65 1.101 cliff register_t cw_mask;
66 1.101 cliff uint32_t cw_asid;
67 1.101 cliff uint32_t cw_mode;
68 1.101 cliff } cpu_watchpoint_t;
69 1.118 matt
70 1.101 cliff /* (abstract) mode bits */
71 1.101 cliff #define CPUWATCH_WRITE __BIT(0)
72 1.101 cliff #define CPUWATCH_READ __BIT(1)
73 1.101 cliff #define CPUWATCH_EXEC __BIT(2)
74 1.101 cliff #define CPUWATCH_MASK __BIT(3)
75 1.101 cliff #define CPUWATCH_ASID __BIT(4)
76 1.101 cliff #define CPUWATCH_RWX (CPUWATCH_EXEC|CPUWATCH_READ|CPUWATCH_WRITE)
77 1.101 cliff
78 1.101 cliff #define CPUWATCH_MAX 8 /* max possible number of watchpoints */
79 1.101 cliff
80 1.101 cliff u_int cpuwatch_discover(void);
81 1.101 cliff void cpuwatch_free(cpu_watchpoint_t *);
82 1.101 cliff cpu_watchpoint_t *cpuwatch_alloc(void);
83 1.101 cliff void cpuwatch_set_all(void);
84 1.101 cliff void cpuwatch_clr_all(void);
85 1.101 cliff void cpuwatch_set(cpu_watchpoint_t *);
86 1.101 cliff void cpuwatch_clr(cpu_watchpoint_t *);
87 1.101 cliff
88 1.53 simonb struct cpu_info {
89 1.73 yamt struct cpu_data ci_data; /* MI per-cpu data */
90 1.114 matt void *ci_nmi_stack; /* NMI exception stack */
91 1.98 matt struct cpu_softc *ci_softc; /* chip-dependent hook */
92 1.98 matt device_t ci_dev; /* owning device */
93 1.85 ad cpuid_t ci_cpuid; /* Machine-level identifier */
94 1.98 matt u_long ci_cctr_freq; /* cycle counter frequency */
95 1.58 simonb u_long ci_cpu_freq; /* CPU frequency */
96 1.58 simonb u_long ci_cycles_per_hz; /* CPU freq / hz */
97 1.58 simonb u_long ci_divisor_delay; /* for delay/DELAY */
98 1.90 tsutsui u_long ci_divisor_recip; /* unused, for obsolete microtime(9) */
99 1.82 yamt struct lwp *ci_curlwp; /* currently running lwp */
100 1.98 matt volatile int ci_want_resched; /* user preemption pending */
101 1.78 ad int ci_mtx_count; /* negative count of held mutexes */
102 1.78 ad int ci_mtx_oldspl; /* saved SPL value */
103 1.86 ad int ci_idepth; /* hardware interrupt depth */
104 1.98 matt int ci_cpl; /* current [interrupt] priority level */
105 1.98 matt uint32_t ci_next_cp0_clk_intr; /* for hard clock intr scheduling */
106 1.98 matt struct evcnt ci_ev_count_compare; /* hard clock intr counter */
107 1.98 matt struct evcnt ci_ev_count_compare_missed; /* hard clock miss counter */
108 1.98 matt struct lwp *ci_softlwps[SOFTINT_COUNT];
109 1.98 matt volatile u_int ci_softints;
110 1.98 matt struct evcnt ci_ev_fpu_loads; /* fpu load counter */
111 1.98 matt struct evcnt ci_ev_fpu_saves; /* fpu save counter */
112 1.105 matt struct evcnt ci_ev_dsp_loads; /* dsp load counter */
113 1.105 matt struct evcnt ci_ev_dsp_saves; /* dsp save counter */
114 1.98 matt struct evcnt ci_ev_tlbmisses;
115 1.98 matt
116 1.98 matt /*
117 1.98 matt * Per-cpu pmap information
118 1.98 matt */
119 1.98 matt int ci_tlb_slot; /* reserved tlb entry for cpu_info */
120 1.98 matt u_int ci_pmap_asid_cur; /* current ASID */
121 1.98 matt struct pmap_tlb_info *ci_tlb_info; /* tlb information for this cpu */
122 1.117 matt union pmap_segtab *ci_pmap_segtabs[2];
123 1.117 matt #define ci_pmap_user_segtab ci_pmap_segtabs[0]
124 1.117 matt #define ci_pmap_kern_segtab ci_pmap_segtabs[1]
125 1.98 matt #ifdef _LP64
126 1.117 matt union pmap_segtab *ci_pmap_seg0tabs[2];
127 1.117 matt #define ci_pmap_user_seg0tab ci_pmap_seg0tabs[0]
128 1.117 matt #define ci_pmap_kern_seg0tab ci_pmap_seg0tabs[1]
129 1.118 matt #endif
130 1.98 matt vaddr_t ci_pmap_srcbase; /* starting VA of ephemeral src space */
131 1.98 matt vaddr_t ci_pmap_dstbase; /* starting VA of ephemeral dst space */
132 1.98 matt
133 1.101 cliff u_int ci_cpuwatch_count; /* number of watchpoints on this CPU */
134 1.101 cliff cpu_watchpoint_t ci_cpuwatch_tab[CPUWATCH_MAX];
135 1.98 matt
136 1.98 matt #ifdef MULTIPROCESSOR
137 1.98 matt volatile u_long ci_flags;
138 1.98 matt volatile uint64_t ci_request_ipis;
139 1.98 matt /* bitmask of IPIs requested */
140 1.98 matt /* use on chips where hw cannot pass tag */
141 1.98 matt uint64_t ci_active_ipis; /* bitmask of IPIs being serviced */
142 1.98 matt uint32_t ci_ksp_tlb_slot; /* tlb entry for kernel stack */
143 1.98 matt struct evcnt ci_evcnt_all_ipis; /* aggregated IPI counter */
144 1.98 matt struct evcnt ci_evcnt_per_ipi[NIPIS]; /* individual IPI counters*/
145 1.98 matt struct evcnt ci_evcnt_synci_activate_rqst;
146 1.98 matt struct evcnt ci_evcnt_synci_onproc_rqst;
147 1.98 matt struct evcnt ci_evcnt_synci_deferred_rqst;
148 1.98 matt struct evcnt ci_evcnt_synci_ipi_rqst;
149 1.98 matt
150 1.98 matt #define CPUF_PRIMARY 0x01 /* CPU is primary CPU */
151 1.98 matt #define CPUF_PRESENT 0x02 /* CPU is present */
152 1.98 matt #define CPUF_RUNNING 0x04 /* CPU is running */
153 1.98 matt #define CPUF_PAUSED 0x08 /* CPU is paused */
154 1.98 matt #define CPUF_USERPMAP 0x20 /* CPU has a user pmap activated */
155 1.98 matt #endif
156 1.98 matt
157 1.53 simonb };
158 1.68 simonb
159 1.112 matt #ifdef MULTIPROCESSOR
160 1.113 matt #define CPU_INFO_ITERATOR int
161 1.113 matt #define CPU_INFO_FOREACH(cii, ci) \
162 1.113 matt cii = 0, ci = cpu_infos[0]; cii < ncpu && (ci = cpu_infos[cii]) != NULL; cii++
163 1.113 matt #else
164 1.109 christos #define CPU_INFO_ITERATOR int __unused
165 1.85 ad #define CPU_INFO_FOREACH(cii, ci) \
166 1.112 matt ci = &cpu_info_store; ci != NULL; ci = NULL
167 1.112 matt #endif
168 1.85 ad
169 1.36 soren /*
170 1.36 soren * CTL_MACHDEP definitions.
171 1.36 soren */
172 1.36 soren #define CPU_CONSDEV 1 /* dev_t: console terminal device */
173 1.36 soren #define CPU_BOOTED_KERNEL 2 /* string: booted kernel name */
174 1.36 soren #define CPU_ROOT_DEVICE 3 /* string: root device name */
175 1.66 gmcgarry #define CPU_LLSC 4 /* OS/CPU supports LL/SC instruction */
176 1.107 macallan #define CPU_LMMI 5 /* Loongson multimedia instructions */
177 1.43 jeffs
178 1.43 jeffs /*
179 1.51 wiz * Platform can override, but note this breaks userland compatibility
180 1.43 jeffs * with other mips platforms.
181 1.43 jeffs */
182 1.43 jeffs #ifndef CPU_MAXID
183 1.67 shin #define CPU_MAXID 5 /* number of valid machdep ids */
184 1.42 jeffs #endif
185 1.33 simonb
186 1.82 yamt /* Note: must be kept in sync with -ffixed-?? Makefile.mips. */
187 1.115 matt // MIPS_CURLWP moved to <mips/regdef.h>
188 1.118 matt #define MIPS_CURLWP_QUOTED "$24"
189 1.98 matt #define MIPS_CURLWP_LABEL _L_T8
190 1.98 matt #define MIPS_CURLWP_REG _R_T8
191 1.82 yamt
192 1.77 tsutsui extern struct cpu_info cpu_info_store;
193 1.112 matt #ifdef MULTIPROCESSOR
194 1.112 matt extern struct cpu_info *cpuid_infos[];
195 1.112 matt #endif
196 1.82 yamt register struct lwp *mips_curlwp asm(MIPS_CURLWP_QUOTED);
197 1.77 tsutsui
198 1.82 yamt #define curlwp mips_curlwp
199 1.111 matt #define curcpu() lwp_getcpu(curlwp)
200 1.92 rmind #define curpcb ((struct pcb *)lwp_getpcb(curlwp))
201 1.98 matt #ifdef MULTIPROCESSOR
202 1.98 matt #define cpu_number() (curcpu()->ci_index)
203 1.98 matt #define CPU_IS_PRIMARY(ci) ((ci)->ci_flags & CPUF_PRIMARY)
204 1.98 matt #else
205 1.82 yamt #define cpu_number() (0)
206 1.98 matt #define CPU_IS_PRIMARY(ci) (true)
207 1.98 matt #endif
208 1.77 tsutsui
209 1.21 jonathan /*
210 1.1 deraadt * definitions of cpu-dependent requirements
211 1.1 deraadt * referenced in generic code
212 1.1 deraadt */
213 1.42 jeffs
214 1.98 matt /*
215 1.98 matt * Send an inter-processor interupt to each other CPU (excludes curcpu())
216 1.98 matt */
217 1.98 matt void cpu_broadcast_ipi(int);
218 1.98 matt
219 1.98 matt /*
220 1.116 matt * Send an inter-processor interupt to CPUs in kcpuset (excludes curcpu())
221 1.98 matt */
222 1.116 matt void cpu_multicast_ipi(const kcpuset_t *, int);
223 1.98 matt
224 1.98 matt /*
225 1.98 matt * Send an inter-processor interupt to another CPU.
226 1.98 matt */
227 1.98 matt int cpu_send_ipi(struct cpu_info *, int);
228 1.98 matt
229 1.98 matt /*
230 1.98 matt * cpu_intr(ppl, pc, status); (most state needed by clockframe)
231 1.98 matt */
232 1.98 matt void cpu_intr(int, vaddr_t, uint32_t);
233 1.1 deraadt
234 1.1 deraadt /*
235 1.1 deraadt * Arguments to hardclock and gatherstats encapsulate the previous
236 1.1 deraadt * machine state in an opaque clockframe.
237 1.1 deraadt */
238 1.5 glass struct clockframe {
239 1.98 matt vaddr_t pc; /* program counter at time of interrupt */
240 1.94 matt uint32_t sr; /* status register at time of interrupt */
241 1.98 matt bool intr; /* interrupted a interrupt */
242 1.5 glass };
243 1.1 deraadt
244 1.14 jonathan /*
245 1.79 ad * A port must provde CLKF_USERMODE() for use in machine-independent code.
246 1.79 ad * These differ on r4000 and r3000 systems; provide them in the
247 1.79 ad * port-dependent file that includes this one, using the macros below.
248 1.14 jonathan */
249 1.118 matt uint32_t cpu_clkf_usermode_mask(void);
250 1.14 jonathan
251 1.118 matt #define CLKF_USERMODE(framep) ((framep)->sr & cpu_clkf_usermode_mask())
252 1.118 matt #define CLKF_PC(framep) ((framep)->pc + 0)
253 1.118 matt #define CLKF_INTR(framep) ((framep)->intr + 0)
254 1.18 jonathan
255 1.47 thorpej /*
256 1.98 matt * Misc prototypes and variable declarations.
257 1.47 thorpej */
258 1.98 matt #define LWP_PC(l) cpu_lwp_pc(l)
259 1.98 matt
260 1.98 matt struct proc;
261 1.98 matt struct lwp;
262 1.98 matt struct pcb;
263 1.98 matt struct reg;
264 1.1 deraadt
265 1.1 deraadt /*
266 1.1 deraadt * Preempt the current process if in interrupt from user mode,
267 1.1 deraadt * or after the current trap/syscall if in system mode.
268 1.1 deraadt */
269 1.82 yamt void cpu_need_resched(struct cpu_info *, int);
270 1.98 matt /*
271 1.98 matt * Notify the current lwp (l) that it has a signal pending,
272 1.98 matt * process as soon as possible.
273 1.98 matt */
274 1.98 matt void cpu_signotify(struct lwp *);
275 1.1 deraadt
276 1.1 deraadt /*
277 1.1 deraadt * Give a profiling tick to the current process when the user profiling
278 1.13 jonathan * buffer pages are invalid. On the MIPS, request an ast to send us
279 1.1 deraadt * through trap, marking the proc as needing a profiling tick.
280 1.1 deraadt */
281 1.98 matt void cpu_need_proftick(struct lwp *);
282 1.98 matt void cpu_set_curpri(int);
283 1.1 deraadt
284 1.118 matt /* VM related hooks */
285 1.98 matt void cpu_boot_secondary_processors(void);
286 1.98 matt void * cpu_uarea_alloc(bool);
287 1.98 matt bool cpu_uarea_free(void *);
288 1.98 matt void cpu_proc_fork(struct proc *, struct proc *);
289 1.98 matt vaddr_t cpu_lwp_pc(struct lwp *);
290 1.118 matt #ifdef _LP64
291 1.118 matt void cpu_vmspace_exec(struct lwp *, vaddr_t, vaddr_t);
292 1.118 matt #endif
293 1.27 thorpej
294 1.28 castor #endif /* _KERNEL */
295 1.1 deraadt #endif /* _CPU_H_ */
296