cpu.h revision 1.51 1 /* $NetBSD: cpu.h,v 1.51 2006/01/27 18:37:49 cdi Exp $ */
2
3 /*
4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This software was developed by the Computer Systems Engineering group
8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9 * contributed to Berkeley.
10 *
11 * All advertising materials mentioning features or use of this software
12 * must display the following acknowledgement:
13 * This product includes software developed by the University of
14 * California, Lawrence Berkeley Laboratory.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 * 3. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * @(#)cpu.h 8.4 (Berkeley) 1/5/94
41 */
42
43 #ifndef _CPU_H_
44 #define _CPU_H_
45
46 /*
47 * CTL_MACHDEP definitions.
48 */
49 #define CPU_BOOTED_KERNEL 1 /* string: booted kernel name */
50 #define CPU_BOOTED_DEVICE 2 /* string: device booted from */
51 #define CPU_BOOT_ARGS 3 /* string: args booted with */
52 #define CPU_ARCH 4 /* integer: cpu architecture version */
53 #define CPU_MAXID 5 /* number of valid machdep ids */
54
55 #define CTL_MACHDEP_NAMES { \
56 { 0, 0 }, \
57 { "booted_kernel", CTLTYPE_STRING }, \
58 { "booted_device", CTLTYPE_STRING }, \
59 { "boot_args", CTLTYPE_STRING }, \
60 { "cpu_arch", CTLTYPE_INT }, \
61 }
62
63 #ifdef _KERNEL
64 /*
65 * Exported definitions unique to SPARC cpu support.
66 */
67
68 #if defined(_KERNEL_OPT)
69 #include "opt_multiprocessor.h"
70 #include "opt_lockdebug.h"
71 #endif
72
73 #include <machine/psl.h>
74 #include <machine/reg.h>
75 #include <machine/intr.h>
76 #include <machine/cpuset.h>
77 #include <sparc64/sparc64/intreg.h>
78
79 #include <sys/cpu_data.h>
80 #include <sys/cc_microtime.h>
81 /*
82 * The cpu_info structure is part of a 64KB structure mapped both the kernel
83 * pmap and a single locked TTE a CPUINFO_VA for that particular processor.
84 * Each processor's cpu_info is accessible at CPUINFO_VA only for that
85 * processor. Other processors can access that through an additional mapping
86 * in the kernel pmap.
87 *
88 * The 64KB page contains:
89 *
90 * cpu_info
91 * interrupt stack (all remaining space)
92 * idle PCB
93 * idle stack (STACKSPACE - sizeof(PCB))
94 * 32KB TSB
95 */
96
97 struct cpu_info {
98
99 /*
100 * SPARC cpu_info structures live at two VAs: one global
101 * VA (so each CPU can access any other CPU's cpu_info)
102 * and an alias VA CPUINFO_VA which is the same on each
103 * CPU and maps to that CPU's cpu_info. Since the alias
104 * CPUINFO_VA is how we locate our cpu_info, we have to
105 * self-reference the global VA so that we can return it
106 * in the curcpu() macro.
107 */
108 struct cpu_info * volatile ci_self;
109
110 /* Most important fields first */
111 struct lwp *ci_curlwp;
112 struct pcb *ci_cpcb;
113 struct cpu_info *ci_next;
114
115 struct lwp *ci_fplwp;
116
117 void *ci_eintstack;
118 struct pcb *ci_idle_u;
119
120 /* Spinning up the CPU */
121 void (*ci_spinup) __P((void));
122 void *ci_initstack;
123 paddr_t ci_paddr;
124
125 int ci_number;
126 int ci_upaid;
127 int ci_cpuid;
128
129 /*
130 * Variables used by cc_microtime().
131 */
132 struct cc_microtime_state ci_cc;
133
134 /* CPU PROM information. */
135 u_int ci_node;
136
137 int ci_flags;
138 int ci_want_ast;
139 int ci_want_resched;
140
141 struct cpu_data ci_data; /* MI per-cpu data */
142 };
143
144 #define CPUF_PRIMARY 1
145
146 /*
147 * CPU boot arguments. Used by secondary CPUs at the bootstrap time.
148 */
149 struct cpu_bootargs {
150 u_int cb_node; /* PROM CPU node */
151 volatile int cb_flags;
152
153 vaddr_t cb_ktext;
154 paddr_t cb_ktextp;
155 vaddr_t cb_ektext;
156
157 vaddr_t cb_kdata;
158 paddr_t cb_kdatap;
159 vaddr_t cb_ekdata;
160
161 paddr_t cb_cpuinfo;
162
163 void *cb_initstack;
164 };
165
166 extern struct cpu_bootargs *cpu_args;
167
168 extern int sparc_ncpus;
169 extern struct cpu_info *cpus;
170
171 #define curcpu() (((struct cpu_info *)CPUINFO_VA)->ci_self)
172 #define cpu_number() (curcpu()->ci_number)
173 #define CPU_IS_PRIMARY(ci) ((ci)->ci_flags & CPUF_PRIMARY)
174
175 #define CPU_INFO_ITERATOR int
176 #define CPU_INFO_FOREACH(cii, ci) cii = 0, ci = cpus; ci != NULL; \
177 ci = ci->ci_next
178
179 #define curlwp curcpu()->ci_curlwp
180 #define fplwp curcpu()->ci_fplwp
181 #define curpcb curcpu()->ci_cpcb
182
183 #define want_ast curcpu()->ci_want_ast
184 #define want_resched curcpu()->ci_want_resched
185
186 /*
187 * definitions of cpu-dependent requirements
188 * referenced in generic code
189 */
190 #define cpu_swapin(p) /* nothing */
191 #define cpu_swapout(p) /* nothing */
192 #define cpu_wait(p) /* nothing */
193 void cpu_proc_fork(struct proc *, struct proc *);
194
195 #if defined(MULTIPROCESSOR)
196 extern vaddr_t cpu_spinup_trampoline;
197
198 extern char *mp_tramp_code;
199 extern u_long mp_tramp_code_len;
200 extern u_long mp_tramp_tlb_slots;
201 extern u_long mp_tramp_func;
202 extern u_long mp_tramp_ci;
203
204 void cpu_hatch __P((void));
205 void cpu_boot_secondary_processors __P((void));
206 #endif
207
208 /*
209 * definitions for MI microtime().
210 */
211 #define microtime(tv) cc_microtime(tv)
212
213 extern uint64_t cpu_clockrate[];
214
215 /*
216 * Arguments to hardclock, softclock and gatherstats encapsulate the
217 * previous machine state in an opaque clockframe. The ipl is here
218 * as well for strayintr (see locore.s:interrupt and intr.c:strayintr).
219 * Note that CLKF_INTR is valid only if CLKF_USERMODE is false.
220 */
221 extern int intstack[];
222 extern int eintstack[];
223 struct clockframe {
224 struct trapframe64 t;
225 };
226
227 #define CLKF_USERMODE(framep) (((framep)->t.tf_tstate & TSTATE_PRIV) == 0)
228 /*
229 * XXX Disable CLKF_BASEPRI() for now. If we use a counter-timer for
230 * the clock, the interrupt remains blocked until the interrupt handler
231 * returns and we write to the clear interrupt register. If we use
232 * %tick for the clock, we could get multiple interrupts, but the
233 * currently enabled INTR_INTERLOCK will prevent the interrupt from being
234 * posted twice anyway.
235 *
236 * Switching to %tick for all machines and disabling INTR_INTERLOCK
237 * in locore.s would allow us to take advantage of CLKF_BASEPRI().
238 */
239 #if 0
240 #define CLKF_BASEPRI(framep) (((framep)->t.tf_oldpil) == 0)
241 #else
242 #define CLKF_BASEPRI(framep) (0)
243 #endif
244 #define CLKF_PC(framep) ((framep)->t.tf_pc)
245 /* Since some files in sys/kern do not know BIAS, I'm using 0x7ff here */
246 #define CLKF_INTR(framep) \
247 ((!CLKF_USERMODE(framep))&& \
248 (((framep)->t.tf_out[6] & 1 ) ? \
249 (((vaddr_t)(framep)->t.tf_out[6] < \
250 (vaddr_t)EINTSTACK-0x7ff) && \
251 ((vaddr_t)(framep)->t.tf_out[6] > \
252 (vaddr_t)INTSTACK-0x7ff)) : \
253 (((vaddr_t)(framep)->t.tf_out[6] < \
254 (vaddr_t)EINTSTACK) && \
255 ((vaddr_t)(framep)->t.tf_out[6] > \
256 (vaddr_t)INTSTACK))))
257
258
259 extern struct intrhand soft01intr, soft01net, soft01clock;
260
261 void setsoftint __P((void));
262 void setsoftnet __P((void));
263
264 /*
265 * Preempt the current process if in interrupt from user mode,
266 * or after the current trap/syscall if in system mode.
267 */
268 #define need_resched(ci) (want_resched = 1, want_ast = 1)
269
270 /*
271 * Give a profiling tick to the current process when the user profiling
272 * buffer pages are invalid. On the sparc, request an ast to send us
273 * through trap(), marking the proc as needing a profiling tick.
274 */
275 #define need_proftick(p) ((p)->p_flag |= P_OWEUPC, want_ast = 1)
276
277 /*
278 * Notify the current process (p) that it has a signal pending,
279 * process as soon as possible.
280 */
281 #define signotify(p) (want_ast = 1)
282
283 /*
284 * Interrupt handler chains. Interrupt handlers should return 0 for
285 * ``not me'' or 1 (``I took care of it''). intr_establish() inserts a
286 * handler into the list. The handler is called with its (single)
287 * argument, or with a pointer to a clockframe if ih_arg is NULL.
288 */
289 struct intrhand {
290 int (*ih_fun) __P((void *));
291 void *ih_arg;
292 short ih_number; /* interrupt number */
293 /* the H/W provides */
294 char ih_pil; /* interrupt priority */
295 struct intrhand *ih_next; /* global list */
296 struct intrhand *ih_pending; /* interrupt queued */
297 volatile u_int64_t *ih_map; /* Interrupt map reg */
298 volatile u_int64_t *ih_clr; /* clear interrupt reg */
299 };
300 extern struct intrhand *intrhand[];
301 extern struct intrhand *intrlev[MAXINTNUM];
302
303 void intr_establish __P((int level, struct intrhand *));
304
305 /* cpu.c */
306 paddr_t cpu_alloc __P((void));
307 void cpu_start __P((int));
308
309 #define mp_pause_cpus() sparc64_ipi_pause_cpus()
310 #define mp_resume_cpus() sparc64_ipi_resume_cpus()
311
312 /* disksubr.c */
313 struct dkbad;
314 int isbad __P((struct dkbad *bt, int, int, int));
315 /* machdep.c */
316 int ldcontrolb __P((caddr_t));
317 void dumpconf __P((void));
318 caddr_t reserve_dumppages __P((caddr_t));
319 /* clock.c */
320 struct timeval;
321 int tickintr __P((void *)); /* level 10 (tick) interrupt code */
322 int clockintr __P((void *));/* level 10 (clock) interrupt code */
323 int statintr __P((void *)); /* level 14 (statclock) interrupt code */
324 /* locore.s */
325 struct fpstate64;
326 void savefpstate __P((struct fpstate64 *));
327 void loadfpstate __P((struct fpstate64 *));
328 u_int64_t probeget __P((paddr_t, int, int));
329 int probeset __P((paddr_t, int, int, u_int64_t));
330
331 #define write_all_windows() __asm volatile("flushw" : : )
332 #define write_user_windows() __asm volatile("flushw" : : )
333
334 void proc_trampoline __P((void));
335 struct pcb;
336 void snapshot __P((struct pcb *));
337 struct frame *getfp __P((void));
338 int xldcontrolb __P((caddr_t, struct pcb *));
339 void copywords __P((const void *, void *, size_t));
340 void qcopy __P((const void *, void *, size_t));
341 void qzero __P((void *, size_t));
342 void switchtoctx __P((int));
343 /* locore2.c */
344 void remrq __P((struct proc *));
345 /* trap.c */
346 void kill_user_windows __P((struct lwp *));
347 int rwindow_save __P((struct lwp *));
348 /* cons.c */
349 int cnrom __P((void));
350 /* zs.c */
351 void zsconsole __P((struct tty *, int, int, void (**)(struct tty *, int)));
352 #ifdef KGDB
353 void zs_kgdb_init __P((void));
354 #endif
355 /* fb.c */
356 void fb_unblank __P((void));
357 /* kgdb_stub.c */
358 #ifdef KGDB
359 void kgdb_attach __P((int (*)(void *), void (*)(void *, int), void *));
360 void kgdb_connect __P((int));
361 void kgdb_panic __P((void));
362 #endif
363 /* emul.c */
364 int fixalign __P((struct lwp *, struct trapframe64 *));
365 int emulinstr __P((vaddr_t, struct trapframe64 *));
366
367 /*
368 *
369 * The SPARC has a Trap Base Register (TBR) which holds the upper 20 bits
370 * of the trap vector table. The next eight bits are supplied by the
371 * hardware when the trap occurs, and the bottom four bits are always
372 * zero (so that we can shove up to 16 bytes of executable code---exactly
373 * four instructions---into each trap vector).
374 *
375 * The hardware allocates half the trap vectors to hardware and half to
376 * software.
377 *
378 * Traps have priorities assigned (lower number => higher priority).
379 */
380
381 struct trapvec {
382 int tv_instr[8]; /* the eight instructions */
383 };
384 extern struct trapvec *trapbase; /* the 256 vectors */
385
386 #endif /* _KERNEL */
387 #endif /* _CPU_H_ */
388