cpu.h revision 1.107 1 1.107 palle /* $NetBSD: cpu.h,v 1.107 2014/01/07 20:11:35 palle Exp $ */
2 1.1 eeh
3 1.1 eeh /*
4 1.1 eeh * Copyright (c) 1992, 1993
5 1.1 eeh * The Regents of the University of California. All rights reserved.
6 1.1 eeh *
7 1.1 eeh * This software was developed by the Computer Systems Engineering group
8 1.1 eeh * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9 1.1 eeh * contributed to Berkeley.
10 1.1 eeh *
11 1.1 eeh * All advertising materials mentioning features or use of this software
12 1.1 eeh * must display the following acknowledgement:
13 1.1 eeh * This product includes software developed by the University of
14 1.1 eeh * California, Lawrence Berkeley Laboratory.
15 1.1 eeh *
16 1.1 eeh * Redistribution and use in source and binary forms, with or without
17 1.1 eeh * modification, are permitted provided that the following conditions
18 1.1 eeh * are met:
19 1.1 eeh * 1. Redistributions of source code must retain the above copyright
20 1.1 eeh * notice, this list of conditions and the following disclaimer.
21 1.1 eeh * 2. Redistributions in binary form must reproduce the above copyright
22 1.1 eeh * notice, this list of conditions and the following disclaimer in the
23 1.1 eeh * documentation and/or other materials provided with the distribution.
24 1.36 agc * 3. Neither the name of the University nor the names of its contributors
25 1.1 eeh * may be used to endorse or promote products derived from this software
26 1.1 eeh * without specific prior written permission.
27 1.1 eeh *
28 1.1 eeh * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 1.1 eeh * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 1.1 eeh * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 1.1 eeh * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 1.1 eeh * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 1.1 eeh * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 1.1 eeh * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 1.1 eeh * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 1.1 eeh * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 1.1 eeh * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 1.1 eeh * SUCH DAMAGE.
39 1.1 eeh *
40 1.1 eeh * @(#)cpu.h 8.4 (Berkeley) 1/5/94
41 1.1 eeh */
42 1.1 eeh
43 1.1 eeh #ifndef _CPU_H_
44 1.1 eeh #define _CPU_H_
45 1.1 eeh
46 1.1 eeh /*
47 1.1 eeh * CTL_MACHDEP definitions.
48 1.1 eeh */
49 1.13 eeh #define CPU_BOOTED_KERNEL 1 /* string: booted kernel name */
50 1.33 pk #define CPU_BOOTED_DEVICE 2 /* string: device booted from */
51 1.33 pk #define CPU_BOOT_ARGS 3 /* string: args booted with */
52 1.33 pk #define CPU_ARCH 4 /* integer: cpu architecture version */
53 1.101 macallan #define CPU_VIS 5 /* 0 - no VIS, 1 - VIS 1.0, etc. */
54 1.101 macallan #define CPU_MAXID 6 /* number of valid machdep ids */
55 1.1 eeh
56 1.95 mrg #if defined(_KERNEL) || defined(_KMEMUSER)
57 1.1 eeh /*
58 1.1 eeh * Exported definitions unique to SPARC cpu support.
59 1.1 eeh */
60 1.1 eeh
61 1.37 tsutsui #if defined(_KERNEL_OPT)
62 1.17 thorpej #include "opt_multiprocessor.h"
63 1.17 thorpej #include "opt_lockdebug.h"
64 1.17 thorpej #endif
65 1.17 thorpej
66 1.1 eeh #include <machine/psl.h>
67 1.1 eeh #include <machine/reg.h>
68 1.74 martin #include <machine/pte.h>
69 1.6 mrg #include <machine/intr.h>
70 1.95 mrg #if defined(_KERNEL)
71 1.43 chs #include <machine/cpuset.h>
72 1.96 mrg #include <sparc64/sparc64/intreg.h>
73 1.95 mrg #endif
74 1.17 thorpej
75 1.46 yamt #include <sys/cpu_data.h>
76 1.75 nakayama #include <sys/evcnt.h>
77 1.95 mrg
78 1.19 eeh /*
79 1.19 eeh * The cpu_info structure is part of a 64KB structure mapped both the kernel
80 1.19 eeh * pmap and a single locked TTE a CPUINFO_VA for that particular processor.
81 1.19 eeh * Each processor's cpu_info is accessible at CPUINFO_VA only for that
82 1.19 eeh * processor. Other processors can access that through an additional mapping
83 1.19 eeh * in the kernel pmap.
84 1.19 eeh *
85 1.19 eeh * The 64KB page contains:
86 1.19 eeh *
87 1.19 eeh * cpu_info
88 1.19 eeh * interrupt stack (all remaining space)
89 1.19 eeh * idle PCB
90 1.19 eeh * idle stack (STACKSPACE - sizeof(PCB))
91 1.19 eeh * 32KB TSB
92 1.19 eeh */
93 1.19 eeh
94 1.17 thorpej struct cpu_info {
95 1.93 martin struct cpu_data ci_data; /* MI per-cpu data */
96 1.93 martin
97 1.43 chs
98 1.42 petrov /*
99 1.42 petrov * SPARC cpu_info structures live at two VAs: one global
100 1.42 petrov * VA (so each CPU can access any other CPU's cpu_info)
101 1.42 petrov * and an alias VA CPUINFO_VA which is the same on each
102 1.42 petrov * CPU and maps to that CPU's cpu_info. Since the alias
103 1.42 petrov * CPUINFO_VA is how we locate our cpu_info, we have to
104 1.42 petrov * self-reference the global VA so that we can return it
105 1.42 petrov * in the curcpu() macro.
106 1.42 petrov */
107 1.50 perry struct cpu_info * volatile ci_self;
108 1.42 petrov
109 1.20 eeh /* Most important fields first */
110 1.34 thorpej struct lwp *ci_curlwp;
111 1.32 chs struct pcb *ci_cpcb;
112 1.19 eeh struct cpu_info *ci_next;
113 1.20 eeh
114 1.34 thorpej struct lwp *ci_fplwp;
115 1.51 cdi
116 1.51 cdi void *ci_eintstack;
117 1.51 cdi
118 1.60 ad int ci_mtx_count;
119 1.60 ad int ci_mtx_oldspl;
120 1.60 ad
121 1.51 cdi /* Spinning up the CPU */
122 1.53 cdi void (*ci_spinup)(void);
123 1.51 cdi paddr_t ci_paddr;
124 1.51 cdi
125 1.38 petrov int ci_cpuid;
126 1.20 eeh
127 1.42 petrov /* CPU PROM information. */
128 1.42 petrov u_int ci_node;
129 1.42 petrov
130 1.65 martin /* %tick and cpu frequency information */
131 1.65 martin u_long ci_tick_increment;
132 1.99 macallan uint64_t ci_cpu_clockrate[2]; /* %tick */
133 1.99 macallan uint64_t ci_system_clockrate[2]; /* %stick */
134 1.65 martin
135 1.75 nakayama /* Interrupts */
136 1.75 nakayama struct intrhand *ci_intrpending[16];
137 1.77 nakayama struct intrhand *ci_tick_ih;
138 1.76 nakayama
139 1.76 nakayama /* Event counters */
140 1.75 nakayama struct evcnt ci_tick_evcnt;
141 1.89 mrg
142 1.89 mrg /* This could be under MULTIPROCESSOR, but there's no good reason */
143 1.76 nakayama struct evcnt ci_ipi_evcnt[IPI_EVCNT_NUM];
144 1.75 nakayama
145 1.42 petrov int ci_flags;
146 1.42 petrov int ci_want_ast;
147 1.42 petrov int ci_want_resched;
148 1.68 martin int ci_idepth;
149 1.42 petrov
150 1.74 martin /*
151 1.74 martin * A context is simply a small number that differentiates multiple mappings
152 1.74 martin * of the same address. Contexts on the spitfire are 13 bits, but could
153 1.74 martin * be as large as 17 bits.
154 1.74 martin *
155 1.74 martin * Each context is either free or attached to a pmap.
156 1.74 martin *
157 1.74 martin * The context table is an array of pointers to psegs. Just dereference
158 1.74 martin * the right pointer and you get to the pmap segment tables. These are
159 1.74 martin * physical addresses, of course.
160 1.74 martin *
161 1.90 mrg * ci_ctx_lock protects this CPUs context allocation/free.
162 1.90 mrg * These are all allocated almost with in the same cacheline.
163 1.74 martin */
164 1.90 mrg kmutex_t ci_ctx_lock;
165 1.74 martin int ci_pmap_next_ctx;
166 1.84 nakayama int ci_numctx;
167 1.74 martin paddr_t *ci_ctxbusy;
168 1.74 martin LIST_HEAD(, pmap) ci_pmap_ctxlist;
169 1.74 martin
170 1.74 martin /*
171 1.74 martin * The TSBs are per cpu too (since MMU context differs between
172 1.74 martin * cpus). These are just caches for the TLBs.
173 1.74 martin */
174 1.74 martin pte_t *ci_tsb_dmmu;
175 1.74 martin pte_t *ci_tsb_immu;
176 1.74 martin
177 1.107 palle #ifdef SUN4V
178 1.107 palle /* MMU Fault Status Area. Will be initialized to the physical
179 1.107 palle address of the bottom of the interrupt stack */
180 1.107 palle paddr_t ci_mmfsa;
181 1.107 palle #endif
182 1.107 palle
183 1.102 nakayama /* probe fault in PCI config space reads */
184 1.102 nakayama bool ci_pci_probe;
185 1.102 nakayama bool ci_pci_fault;
186 1.102 nakayama
187 1.55 mrg volatile void *ci_ddb_regs; /* DDB regs */
188 1.17 thorpej };
189 1.17 thorpej
190 1.95 mrg #endif /* _KERNEL || _KMEMUSER */
191 1.95 mrg
192 1.95 mrg #ifdef _KERNEL
193 1.95 mrg
194 1.42 petrov #define CPUF_PRIMARY 1
195 1.42 petrov
196 1.42 petrov /*
197 1.42 petrov * CPU boot arguments. Used by secondary CPUs at the bootstrap time.
198 1.42 petrov */
199 1.42 petrov struct cpu_bootargs {
200 1.42 petrov u_int cb_node; /* PROM CPU node */
201 1.50 perry volatile int cb_flags;
202 1.42 petrov
203 1.42 petrov vaddr_t cb_ktext;
204 1.42 petrov paddr_t cb_ktextp;
205 1.42 petrov vaddr_t cb_ektext;
206 1.42 petrov
207 1.42 petrov vaddr_t cb_kdata;
208 1.42 petrov paddr_t cb_kdatap;
209 1.42 petrov vaddr_t cb_ekdata;
210 1.42 petrov
211 1.42 petrov paddr_t cb_cpuinfo;
212 1.42 petrov };
213 1.42 petrov
214 1.42 petrov extern struct cpu_bootargs *cpu_args;
215 1.42 petrov
216 1.89 mrg #if defined(MULTIPROCESSOR)
217 1.47 briggs extern int sparc_ncpus;
218 1.89 mrg #else
219 1.89 mrg #define sparc_ncpus 1
220 1.89 mrg #endif
221 1.89 mrg
222 1.19 eeh extern struct cpu_info *cpus;
223 1.83 nakayama extern struct pool_cache *fpstate_cache;
224 1.17 thorpej
225 1.43 chs #define curcpu() (((struct cpu_info *)CPUINFO_VA)->ci_self)
226 1.66 martin #define cpu_number() (curcpu()->ci_index)
227 1.42 petrov #define CPU_IS_PRIMARY(ci) ((ci)->ci_flags & CPUF_PRIMARY)
228 1.42 petrov
229 1.105 christos #define CPU_INFO_ITERATOR int __unused
230 1.105 christos #define CPU_INFO_FOREACH(cii, ci) ci = cpus; ci != NULL; ci = ci->ci_next
231 1.43 chs
232 1.40 cdi #define curlwp curcpu()->ci_curlwp
233 1.40 cdi #define fplwp curcpu()->ci_fplwp
234 1.40 cdi #define curpcb curcpu()->ci_cpcb
235 1.1 eeh
236 1.42 petrov #define want_ast curcpu()->ci_want_ast
237 1.42 petrov #define want_resched curcpu()->ci_want_resched
238 1.42 petrov
239 1.1 eeh /*
240 1.1 eeh * definitions of cpu-dependent requirements
241 1.1 eeh * referenced in generic code
242 1.1 eeh */
243 1.42 petrov #define cpu_wait(p) /* nothing */
244 1.48 martin void cpu_proc_fork(struct proc *, struct proc *);
245 1.38 petrov
246 1.74 martin /* run on the cpu itself */
247 1.74 martin void cpu_pmap_init(struct cpu_info *);
248 1.74 martin /* run upfront to prepare the cpu_info */
249 1.74 martin void cpu_pmap_prepare(struct cpu_info *, bool);
250 1.74 martin
251 1.38 petrov #if defined(MULTIPROCESSOR)
252 1.51 cdi extern vaddr_t cpu_spinup_trampoline;
253 1.51 cdi
254 1.51 cdi extern char *mp_tramp_code;
255 1.51 cdi extern u_long mp_tramp_code_len;
256 1.51 cdi extern u_long mp_tramp_tlb_slots;
257 1.51 cdi extern u_long mp_tramp_func;
258 1.51 cdi extern u_long mp_tramp_ci;
259 1.51 cdi
260 1.53 cdi void cpu_hatch(void);
261 1.53 cdi void cpu_boot_secondary_processors(void);
262 1.57 martin
263 1.57 martin /*
264 1.57 martin * Call a function on other cpus:
265 1.69 martin * multicast - send to everyone in the sparc64_cpuset_t
266 1.57 martin * broadcast - send to to all cpus but ourselves
267 1.57 martin * send - send to just this cpu
268 1.92 martin * The called function do not follow the C ABI, so need to be coded in
269 1.92 martin * assembler.
270 1.57 martin */
271 1.91 martin typedef void (* ipifunc_t)(void *, void *);
272 1.57 martin
273 1.76 nakayama void sparc64_multicast_ipi(sparc64_cpuset_t, ipifunc_t, uint64_t, uint64_t);
274 1.76 nakayama void sparc64_broadcast_ipi(ipifunc_t, uint64_t, uint64_t);
275 1.76 nakayama void sparc64_send_ipi(int, ipifunc_t, uint64_t, uint64_t);
276 1.92 martin
277 1.92 martin /*
278 1.92 martin * Call an arbitrary C function on another cpu (or all others but ourself)
279 1.92 martin */
280 1.92 martin typedef void (*ipi_c_call_func_t)(void*);
281 1.92 martin void sparc64_generic_xcall(struct cpu_info*, ipi_c_call_func_t, void*);
282 1.92 martin
283 1.38 petrov #endif
284 1.35 nakayama
285 1.94 martin /* Provide %pc of a lwp */
286 1.94 martin #define LWP_PC(l) ((l)->l_md.md_tf->tf_pc)
287 1.94 martin
288 1.1 eeh /*
289 1.1 eeh * Arguments to hardclock, softclock and gatherstats encapsulate the
290 1.1 eeh * previous machine state in an opaque clockframe. The ipl is here
291 1.1 eeh * as well for strayintr (see locore.s:interrupt and intr.c:strayintr).
292 1.1 eeh * Note that CLKF_INTR is valid only if CLKF_USERMODE is false.
293 1.1 eeh */
294 1.1 eeh struct clockframe {
295 1.14 eeh struct trapframe64 t;
296 1.1 eeh };
297 1.1 eeh
298 1.1 eeh #define CLKF_USERMODE(framep) (((framep)->t.tf_tstate & TSTATE_PRIV) == 0)
299 1.1 eeh #define CLKF_PC(framep) ((framep)->t.tf_pc)
300 1.30 eeh /* Since some files in sys/kern do not know BIAS, I'm using 0x7ff here */
301 1.30 eeh #define CLKF_INTR(framep) \
302 1.30 eeh ((!CLKF_USERMODE(framep))&& \
303 1.30 eeh (((framep)->t.tf_out[6] & 1 ) ? \
304 1.30 eeh (((vaddr_t)(framep)->t.tf_out[6] < \
305 1.30 eeh (vaddr_t)EINTSTACK-0x7ff) && \
306 1.30 eeh ((vaddr_t)(framep)->t.tf_out[6] > \
307 1.30 eeh (vaddr_t)INTSTACK-0x7ff)) : \
308 1.30 eeh (((vaddr_t)(framep)->t.tf_out[6] < \
309 1.30 eeh (vaddr_t)EINTSTACK) && \
310 1.30 eeh ((vaddr_t)(framep)->t.tf_out[6] > \
311 1.30 eeh (vaddr_t)INTSTACK))))
312 1.1 eeh
313 1.1 eeh /*
314 1.1 eeh * Give a profiling tick to the current process when the user profiling
315 1.1 eeh * buffer pages are invalid. On the sparc, request an ast to send us
316 1.1 eeh * through trap(), marking the proc as needing a profiling tick.
317 1.1 eeh */
318 1.60 ad #define cpu_need_proftick(l) ((l)->l_pflag |= LP_OWEUPC, want_ast = 1)
319 1.1 eeh
320 1.1 eeh /*
321 1.78 nakayama * Notify an LWP that it has a signal pending, process as soon as possible.
322 1.1 eeh */
323 1.78 nakayama void cpu_signotify(struct lwp *);
324 1.1 eeh
325 1.1 eeh /*
326 1.1 eeh * Interrupt handler chains. Interrupt handlers should return 0 for
327 1.1 eeh * ``not me'' or 1 (``I took care of it''). intr_establish() inserts a
328 1.1 eeh * handler into the list. The handler is called with its (single)
329 1.1 eeh * argument, or with a pointer to a clockframe if ih_arg is NULL.
330 1.1 eeh */
331 1.1 eeh struct intrhand {
332 1.53 cdi int (*ih_fun)(void *);
333 1.18 mrg void *ih_arg;
334 1.70 martin /* if we have to take the biglock, we interpose a wrapper
335 1.70 martin * and need to save the original function and arg */
336 1.70 martin int (*ih_realfun)(void *);
337 1.70 martin void *ih_realarg;
338 1.18 mrg short ih_number; /* interrupt number */
339 1.18 mrg /* the H/W provides */
340 1.23 eeh char ih_pil; /* interrupt priority */
341 1.21 eeh struct intrhand *ih_next; /* global list */
342 1.26 eeh struct intrhand *ih_pending; /* interrupt queued */
343 1.52 cdi volatile uint64_t *ih_map; /* Interrupt map reg */
344 1.52 cdi volatile uint64_t *ih_clr; /* clear interrupt reg */
345 1.97 macallan struct evcnt ih_cnt; /* counter for vmstat */
346 1.97 macallan uint32_t ih_ivec;
347 1.97 macallan char ih_name[32]; /* name for the above */
348 1.1 eeh };
349 1.29 mrg extern struct intrhand *intrhand[];
350 1.1 eeh extern struct intrhand *intrlev[MAXINTNUM];
351 1.1 eeh
352 1.81 martin void intr_establish(int level, bool mpsafe, struct intrhand *);
353 1.80 ad void *sparc_softintr_establish(int, int (*)(void *), void *);
354 1.80 ad void sparc_softintr_schedule(void *);
355 1.80 ad void sparc_softintr_disestablish(void *);
356 1.42 petrov
357 1.106 palle /* cpu.c */
358 1.106 palle int cpu_myid(void);
359 1.106 palle
360 1.1 eeh /* disksubr.c */
361 1.1 eeh struct dkbad;
362 1.53 cdi int isbad(struct dkbad *bt, int, int, int);
363 1.1 eeh /* machdep.c */
364 1.62 christos void * reserve_dumppages(void *);
365 1.1 eeh /* clock.c */
366 1.1 eeh struct timeval;
367 1.76 nakayama int tickintr(void *); /* level 10/14 (tick) interrupt code */
368 1.99 macallan int stickintr(void *); /* system tick interrupt code */
369 1.103 macallan int stick2eintr(void *); /* system tick interrupt code */
370 1.53 cdi int clockintr(void *); /* level 10 (clock) interrupt code */
371 1.53 cdi int statintr(void *); /* level 14 (statclock) interrupt code */
372 1.77 nakayama int schedintr(void *); /* level 10 (schedclock) interrupt code */
373 1.76 nakayama void tickintr_establish(int, int (*)(void *));
374 1.99 macallan void stickintr_establish(int, int (*)(void *));
375 1.103 macallan void stick2eintr_establish(int, int (*)(void *));
376 1.103 macallan
377 1.1 eeh /* locore.s */
378 1.14 eeh struct fpstate64;
379 1.53 cdi void savefpstate(struct fpstate64 *);
380 1.53 cdi void loadfpstate(struct fpstate64 *);
381 1.56 martin void clearfpstate(void);
382 1.53 cdi uint64_t probeget(paddr_t, int, int);
383 1.53 cdi int probeset(paddr_t, int, int, uint64_t);
384 1.42 petrov
385 1.50 perry #define write_all_windows() __asm volatile("flushw" : : )
386 1.50 perry #define write_user_windows() __asm volatile("flushw" : : )
387 1.42 petrov
388 1.1 eeh struct pcb;
389 1.53 cdi void snapshot(struct pcb *);
390 1.53 cdi struct frame *getfp(void);
391 1.88 mrg void switchtoctx_us(int);
392 1.88 mrg void switchtoctx_usiii(int);
393 1.85 nakayama void next_tick(long);
394 1.99 macallan void next_stick(long);
395 1.1 eeh /* trap.c */
396 1.98 martin void cpu_vmspace_exec(struct lwp *, vaddr_t, vaddr_t);
397 1.53 cdi int rwindow_save(struct lwp *);
398 1.1 eeh /* cons.c */
399 1.53 cdi int cnrom(void);
400 1.1 eeh /* zs.c */
401 1.53 cdi void zsconsole(struct tty *, int, int, void (**)(struct tty *, int));
402 1.1 eeh /* fb.c */
403 1.53 cdi void fb_unblank(void);
404 1.1 eeh /* kgdb_stub.c */
405 1.1 eeh #ifdef KGDB
406 1.53 cdi void kgdb_attach(int (*)(void *), void (*)(void *, int), void *);
407 1.53 cdi void kgdb_connect(int);
408 1.53 cdi void kgdb_panic(void);
409 1.1 eeh #endif
410 1.5 mrg /* emul.c */
411 1.53 cdi int fixalign(struct lwp *, struct trapframe64 *);
412 1.53 cdi int emulinstr(vaddr_t, struct trapframe64 *);
413 1.1 eeh
414 1.95 mrg #else /* _KERNEL */
415 1.95 mrg
416 1.95 mrg /*
417 1.95 mrg * XXX: provide some definitions for crash(8), probably can share
418 1.95 mrg */
419 1.95 mrg #if defined(_KMEMUSER)
420 1.95 mrg #define curcpu() (((struct cpu_info *)CPUINFO_VA)->ci_self)
421 1.95 mrg #define curlwp curcpu()->ci_curlwp
422 1.95 mrg #endif
423 1.95 mrg
424 1.1 eeh #endif /* _KERNEL */
425 1.1 eeh #endif /* _CPU_H_ */
426