cpu.h revision 1.117 1 /* $NetBSD: cpu.h,v 1.117 2015/09/06 23:48:39 nakayama Exp $ */
2
3 /*
4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This software was developed by the Computer Systems Engineering group
8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9 * contributed to Berkeley.
10 *
11 * All advertising materials mentioning features or use of this software
12 * must display the following acknowledgement:
13 * This product includes software developed by the University of
14 * California, Lawrence Berkeley Laboratory.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 * 3. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * @(#)cpu.h 8.4 (Berkeley) 1/5/94
41 */
42
43 #ifndef _CPU_H_
44 #define _CPU_H_
45
46 /*
47 * CTL_MACHDEP definitions.
48 */
49 #define CPU_BOOTED_KERNEL 1 /* string: booted kernel name */
50 #define CPU_BOOTED_DEVICE 2 /* string: device booted from */
51 #define CPU_BOOT_ARGS 3 /* string: args booted with */
52 #define CPU_ARCH 4 /* integer: cpu architecture version */
53 #define CPU_VIS 5 /* 0 - no VIS, 1 - VIS 1.0, etc. */
54 #define CPU_MAXID 6 /* number of valid machdep ids */
55
56 #if defined(_KERNEL) || defined(_KMEMUSER)
57 /*
58 * Exported definitions unique to SPARC cpu support.
59 */
60
61 #if defined(_KERNEL_OPT)
62 #include "opt_multiprocessor.h"
63 #include "opt_lockdebug.h"
64 #endif
65
66 #include <machine/psl.h>
67 #include <machine/reg.h>
68 #include <machine/pte.h>
69 #include <machine/intr.h>
70 #if defined(_KERNEL)
71 #include <machine/cpuset.h>
72 #include <sparc64/sparc64/intreg.h>
73 #endif
74 #ifdef SUN4V
75 #include <machine/hypervisor.h>
76 #endif
77
78 #include <sys/cpu_data.h>
79 #include <sys/evcnt.h>
80
81 /*
82 * The cpu_info structure is part of a 64KB structure mapped both the kernel
83 * pmap and a single locked TTE a CPUINFO_VA for that particular processor.
84 * Each processor's cpu_info is accessible at CPUINFO_VA only for that
85 * processor. Other processors can access that through an additional mapping
86 * in the kernel pmap.
87 *
88 * The 64KB page contains:
89 *
90 * cpu_info
91 * interrupt stack (all remaining space)
92 * idle PCB
93 * idle stack (STACKSPACE - sizeof(PCB))
94 * 32KB TSB
95 */
96
97 struct cpu_info {
98 struct cpu_data ci_data; /* MI per-cpu data */
99
100
101 /*
102 * SPARC cpu_info structures live at two VAs: one global
103 * VA (so each CPU can access any other CPU's cpu_info)
104 * and an alias VA CPUINFO_VA which is the same on each
105 * CPU and maps to that CPU's cpu_info. Since the alias
106 * CPUINFO_VA is how we locate our cpu_info, we have to
107 * self-reference the global VA so that we can return it
108 * in the curcpu() macro.
109 */
110 struct cpu_info * volatile ci_self;
111
112 /* Most important fields first */
113 struct lwp *ci_curlwp;
114 struct pcb *ci_cpcb;
115 struct cpu_info *ci_next;
116
117 struct lwp *ci_fplwp;
118
119 void *ci_eintstack;
120
121 int ci_mtx_count;
122 int ci_mtx_oldspl;
123
124 /* Spinning up the CPU */
125 void (*ci_spinup)(void);
126 paddr_t ci_paddr;
127
128 int ci_cpuid;
129
130 /* CPU PROM information. */
131 u_int ci_node;
132
133 /* %tick and cpu frequency information */
134 u_long ci_tick_increment;
135 uint64_t ci_cpu_clockrate[2]; /* %tick */
136 uint64_t ci_system_clockrate[2]; /* %stick */
137
138 /* Interrupts */
139 struct intrhand *ci_intrpending[16];
140 struct intrhand *ci_tick_ih;
141
142 /* Event counters */
143 struct evcnt ci_tick_evcnt;
144
145 /* This could be under MULTIPROCESSOR, but there's no good reason */
146 struct evcnt ci_ipi_evcnt[IPI_EVCNT_NUM];
147
148 int ci_flags;
149 int ci_want_ast;
150 int ci_want_resched;
151 int ci_idepth;
152
153 /*
154 * A context is simply a small number that differentiates multiple mappings
155 * of the same address. Contexts on the spitfire are 13 bits, but could
156 * be as large as 17 bits.
157 *
158 * Each context is either free or attached to a pmap.
159 *
160 * The context table is an array of pointers to psegs. Just dereference
161 * the right pointer and you get to the pmap segment tables. These are
162 * physical addresses, of course.
163 *
164 * ci_ctx_lock protects this CPUs context allocation/free.
165 * These are all allocated almost with in the same cacheline.
166 */
167 kmutex_t ci_ctx_lock;
168 int ci_pmap_next_ctx;
169 int ci_numctx;
170 paddr_t *ci_ctxbusy;
171 LIST_HEAD(, pmap) ci_pmap_ctxlist;
172
173 /*
174 * The TSBs are per cpu too (since MMU context differs between
175 * cpus). These are just caches for the TLBs.
176 */
177 pte_t *ci_tsb_dmmu;
178 pte_t *ci_tsb_immu;
179
180 /* TSB description (sun4v). */
181 struct tsb_desc *ci_tsb_desc;
182
183 /* MMU Fault Status Area (sun4v).
184 * Will be initialized to the physical address of the bottom of
185 * the interrupt stack.
186 */
187 paddr_t ci_mmfsa;
188
189 /*
190 * sun4v mondo control fields
191 */
192 paddr_t ci_cpumq; /* cpu mondo queue address */
193 paddr_t ci_devmq; /* device mondo queue address */
194 paddr_t ci_cpuset; /* mondo recipient address */
195 paddr_t ci_mondo; /* mondo message address */
196
197 /* probe fault in PCI config space reads */
198 bool ci_pci_probe;
199 bool ci_pci_fault;
200
201 volatile void *ci_ddb_regs; /* DDB regs */
202 };
203
204 #endif /* _KERNEL || _KMEMUSER */
205
206 #ifdef _KERNEL
207
208 #define CPUF_PRIMARY 1
209
210 /*
211 * CPU boot arguments. Used by secondary CPUs at the bootstrap time.
212 */
213 struct cpu_bootargs {
214 u_int cb_node; /* PROM CPU node */
215 volatile int cb_flags;
216
217 vaddr_t cb_ktext;
218 paddr_t cb_ktextp;
219 vaddr_t cb_ektext;
220
221 vaddr_t cb_kdata;
222 paddr_t cb_kdatap;
223 vaddr_t cb_ekdata;
224
225 paddr_t cb_cpuinfo;
226 int cb_cputyp;
227 };
228
229 extern struct cpu_bootargs *cpu_args;
230
231 #if defined(MULTIPROCESSOR)
232 extern int sparc_ncpus;
233 #else
234 #define sparc_ncpus 1
235 #endif
236
237 extern struct cpu_info *cpus;
238 extern struct pool_cache *fpstate_cache;
239
240 #define curcpu() (((struct cpu_info *)CPUINFO_VA)->ci_self)
241 #define cpu_number() (curcpu()->ci_index)
242 #define CPU_IS_PRIMARY(ci) ((ci)->ci_flags & CPUF_PRIMARY)
243
244 #define CPU_INFO_ITERATOR int __unused
245 #define CPU_INFO_FOREACH(cii, ci) ci = cpus; ci != NULL; ci = ci->ci_next
246
247 #define curlwp curcpu()->ci_curlwp
248 #define fplwp curcpu()->ci_fplwp
249 #define curpcb curcpu()->ci_cpcb
250
251 #define want_ast curcpu()->ci_want_ast
252 #define want_resched curcpu()->ci_want_resched
253
254 /*
255 * definitions of cpu-dependent requirements
256 * referenced in generic code
257 */
258 #define cpu_wait(p) /* nothing */
259 void cpu_proc_fork(struct proc *, struct proc *);
260
261 /* run on the cpu itself */
262 void cpu_pmap_init(struct cpu_info *);
263 /* run upfront to prepare the cpu_info */
264 void cpu_pmap_prepare(struct cpu_info *, bool);
265
266 #if defined(MULTIPROCESSOR)
267 extern vaddr_t cpu_spinup_trampoline;
268
269 extern char *mp_tramp_code;
270 extern u_long mp_tramp_code_len;
271 extern u_long mp_tramp_dtlb_slots, mp_tramp_itlb_slots;
272 extern u_long mp_tramp_func;
273 extern u_long mp_tramp_ci;
274
275 void cpu_hatch(void);
276 void cpu_boot_secondary_processors(void);
277
278 /* Helper functions to retrieve cache info */
279 int cpu_ecache_associativity(int node);
280 int cpu_ecache_size(int node);
281
282 /*
283 * Call a function on other cpus:
284 * multicast - send to everyone in the sparc64_cpuset_t
285 * broadcast - send to to all cpus but ourselves
286 * send - send to just this cpu
287 * The called function do not follow the C ABI, so need to be coded in
288 * assembler.
289 */
290 typedef void (* ipifunc_t)(void *, void *);
291
292 void sparc64_multicast_ipi(sparc64_cpuset_t, ipifunc_t, uint64_t, uint64_t);
293 void sparc64_broadcast_ipi(ipifunc_t, uint64_t, uint64_t);
294 extern void (*sparc64_send_ipi)(int, ipifunc_t, uint64_t, uint64_t);
295
296 /*
297 * Call an arbitrary C function on another cpu (or all others but ourself)
298 */
299 typedef void (*ipi_c_call_func_t)(void*);
300 void sparc64_generic_xcall(struct cpu_info*, ipi_c_call_func_t, void*);
301
302 #endif
303
304 /* Provide %pc of a lwp */
305 #define LWP_PC(l) ((l)->l_md.md_tf->tf_pc)
306
307 /*
308 * Arguments to hardclock, softclock and gatherstats encapsulate the
309 * previous machine state in an opaque clockframe. The ipl is here
310 * as well for strayintr (see locore.s:interrupt and intr.c:strayintr).
311 * Note that CLKF_INTR is valid only if CLKF_USERMODE is false.
312 */
313 struct clockframe {
314 struct trapframe64 t;
315 };
316
317 #define CLKF_USERMODE(framep) (((framep)->t.tf_tstate & TSTATE_PRIV) == 0)
318 #define CLKF_PC(framep) ((framep)->t.tf_pc)
319 /* Since some files in sys/kern do not know BIAS, I'm using 0x7ff here */
320 #define CLKF_INTR(framep) \
321 ((!CLKF_USERMODE(framep))&& \
322 (((framep)->t.tf_out[6] & 1 ) ? \
323 (((vaddr_t)(framep)->t.tf_out[6] < \
324 (vaddr_t)EINTSTACK-0x7ff) && \
325 ((vaddr_t)(framep)->t.tf_out[6] > \
326 (vaddr_t)INTSTACK-0x7ff)) : \
327 (((vaddr_t)(framep)->t.tf_out[6] < \
328 (vaddr_t)EINTSTACK) && \
329 ((vaddr_t)(framep)->t.tf_out[6] > \
330 (vaddr_t)INTSTACK))))
331
332 /*
333 * Give a profiling tick to the current process when the user profiling
334 * buffer pages are invalid. On the sparc, request an ast to send us
335 * through trap(), marking the proc as needing a profiling tick.
336 */
337 #define cpu_need_proftick(l) ((l)->l_pflag |= LP_OWEUPC, want_ast = 1)
338
339 /*
340 * Notify an LWP that it has a signal pending, process as soon as possible.
341 */
342 void cpu_signotify(struct lwp *);
343
344 /*
345 * Interrupt handler chains. Interrupt handlers should return 0 for
346 * ``not me'' or 1 (``I took care of it''). intr_establish() inserts a
347 * handler into the list. The handler is called with its (single)
348 * argument, or with a pointer to a clockframe if ih_arg is NULL.
349 */
350 struct intrhand {
351 int (*ih_fun)(void *);
352 void *ih_arg;
353 /* if we have to take the biglock, we interpose a wrapper
354 * and need to save the original function and arg */
355 int (*ih_realfun)(void *);
356 void *ih_realarg;
357 short ih_number; /* interrupt number */
358 /* the H/W provides */
359 char ih_pil; /* interrupt priority */
360 struct intrhand *ih_next; /* global list */
361 struct intrhand *ih_pending; /* interrupt queued */
362 volatile uint64_t *ih_map; /* Interrupt map reg */
363 volatile uint64_t *ih_clr; /* clear interrupt reg */
364 struct evcnt ih_cnt; /* counter for vmstat */
365 uint32_t ih_ivec;
366 char ih_name[32]; /* name for the above */
367 };
368 extern struct intrhand *intrhand[];
369 extern struct intrhand *intrlev[MAXINTNUM];
370
371 void intr_establish(int level, bool mpsafe, struct intrhand *);
372 void *sparc_softintr_establish(int, int (*)(void *), void *);
373 void sparc_softintr_schedule(void *);
374 void sparc_softintr_disestablish(void *);
375
376 /* cpu.c */
377 int cpu_myid(void);
378
379 /* disksubr.c */
380 struct dkbad;
381 int isbad(struct dkbad *bt, int, int, int);
382 /* machdep.c */
383 void * reserve_dumppages(void *);
384 /* clock.c */
385 struct timeval;
386 int tickintr(void *); /* level 10/14 (tick) interrupt code */
387 int stickintr(void *); /* system tick interrupt code */
388 int stick2eintr(void *); /* system tick interrupt code */
389 int clockintr(void *); /* level 10 (clock) interrupt code */
390 int statintr(void *); /* level 14 (statclock) interrupt code */
391 int schedintr(void *); /* level 10 (schedclock) interrupt code */
392 void tickintr_establish(int, int (*)(void *));
393 void stickintr_establish(int, int (*)(void *));
394 void stick2eintr_establish(int, int (*)(void *));
395
396 /* locore.s */
397 struct fpstate64;
398 void savefpstate(struct fpstate64 *);
399 void loadfpstate(struct fpstate64 *);
400 void clearfpstate(void);
401 uint64_t probeget(paddr_t, int, int);
402 int probeset(paddr_t, int, int, uint64_t);
403 void setcputyp(int);
404
405 #define write_all_windows() __asm volatile("flushw" : : )
406 #define write_user_windows() __asm volatile("flushw" : : )
407
408 struct pcb;
409 void snapshot(struct pcb *);
410 struct frame *getfp(void);
411 void switchtoctx_us(int);
412 void switchtoctx_usiii(int);
413 void next_tick(long);
414 void next_stick(long);
415 /* trap.c */
416 void cpu_vmspace_exec(struct lwp *, vaddr_t, vaddr_t);
417 int rwindow_save(struct lwp *);
418 /* cons.c */
419 int cnrom(void);
420 /* zs.c */
421 void zsconsole(struct tty *, int, int, void (**)(struct tty *, int));
422 /* fb.c */
423 void fb_unblank(void);
424 /* kgdb_stub.c */
425 #ifdef KGDB
426 void kgdb_attach(int (*)(void *), void (*)(void *, int), void *);
427 void kgdb_connect(int);
428 void kgdb_panic(void);
429 #endif
430 /* emul.c */
431 int fixalign(struct lwp *, struct trapframe64 *);
432 int emulinstr(vaddr_t, struct trapframe64 *);
433
434 #else /* _KERNEL */
435
436 /*
437 * XXX: provide some definitions for crash(8), probably can share
438 */
439 #if defined(_KMEMUSER)
440 #define curcpu() (((struct cpu_info *)CPUINFO_VA)->ci_self)
441 #define curlwp curcpu()->ci_curlwp
442 #endif
443
444 #endif /* _KERNEL */
445 #endif /* _CPU_H_ */
446