cpu.h revision 1.37 1 /* $NetBSD: cpu.h,v 1.37 2003/11/09 05:29:59 tsutsui Exp $ */
2
3 /*
4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This software was developed by the Computer Systems Engineering group
8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9 * contributed to Berkeley.
10 *
11 * All advertising materials mentioning features or use of this software
12 * must display the following acknowledgement:
13 * This product includes software developed by the University of
14 * California, Lawrence Berkeley Laboratory.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 * 3. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * @(#)cpu.h 8.4 (Berkeley) 1/5/94
41 */
42
43 #ifndef _CPU_H_
44 #define _CPU_H_
45
46 /*
47 * CTL_MACHDEP definitions.
48 */
49 #define CPU_BOOTED_KERNEL 1 /* string: booted kernel name */
50 #define CPU_BOOTED_DEVICE 2 /* string: device booted from */
51 #define CPU_BOOT_ARGS 3 /* string: args booted with */
52 #define CPU_ARCH 4 /* integer: cpu architecture version */
53 #define CPU_MAXID 5 /* number of valid machdep ids */
54
55 #define CTL_MACHDEP_NAMES { \
56 { 0, 0 }, \
57 { "booted_kernel", CTLTYPE_STRING }, \
58 { "booted_device", CTLTYPE_STRING }, \
59 { "boot_args", CTLTYPE_STRING }, \
60 { "cpu_arch", CTLTYPE_INT }, \
61 }
62
63 #ifdef _KERNEL
64 /*
65 * Exported definitions unique to SPARC cpu support.
66 */
67
68 #if defined(_KERNEL_OPT)
69 #include "opt_multiprocessor.h"
70 #include "opt_lockdebug.h"
71 #endif
72
73 #include <machine/psl.h>
74 #include <machine/reg.h>
75 #include <machine/intr.h>
76 #include <sparc64/sparc64/intreg.h>
77
78 #include <sys/sched.h>
79 /*
80 * The cpu_info structure is part of a 64KB structure mapped both the kernel
81 * pmap and a single locked TTE a CPUINFO_VA for that particular processor.
82 * Each processor's cpu_info is accessible at CPUINFO_VA only for that
83 * processor. Other processors can access that through an additional mapping
84 * in the kernel pmap.
85 *
86 * The 64KB page contains:
87 *
88 * cpu_info
89 * interrupt stack (all remaining space)
90 * idle PCB
91 * idle stack (STACKSPACE - sizeof(PCB))
92 * 32KB TSB
93 */
94
95 struct cpu_info {
96 /* Most important fields first */
97 struct lwp *ci_curlwp;
98 struct pcb *ci_cpcb;
99 struct cpu_info *ci_next;
100
101 struct lwp *ci_fplwp;
102 int ci_number;
103 int ci_upaid;
104 struct schedstate_percpu ci_schedstate;
105
106 /*
107 * Variables used by cc_microtime().
108 */
109 struct timeval ci_cc_time;
110 int64_t ci_cc_cc;
111 int64_t ci_cc_ms_delta;
112 int64_t ci_cc_denom;
113
114 /* DEBUG/DIAGNOSTIC stuff */
115 u_long ci_spin_locks;
116 u_long ci_simple_locks;
117
118 /* Spinning up the CPU */
119 void (*ci_spinup) __P((void));
120 void *ci_initstack;
121 paddr_t ci_paddr;
122 };
123
124 extern struct cpu_info *cpus;
125 extern struct cpu_info cpu_info_store;
126
127 #if 1
128 #define curcpu() (&cpu_info_store)
129 #else
130 #define curcpu() ((struct cpu_info *)CPUINFO_VA)
131 #endif
132
133 /*
134 * definitions of cpu-dependent requirements
135 * referenced in generic code
136 */
137 #define cpu_swapin(p) /* nothing */
138 #define cpu_swapout(p) /* nothing */
139 #define cpu_wait(p) /* nothing */
140 #if 1
141 #define cpu_number() 0
142 #else
143 #define cpu_number() (curcpu()->ci_number)
144 #endif
145
146 /* This really should be somewhere else. */
147 #define cpu_proc_fork(p1, p2) /* nothing */
148
149 /*
150 * definitions for MI microtime().
151 */
152 extern struct timeval cc_microset_time;
153 #define microtime(tv) cc_microtime(tv)
154 void cc_microtime __P((struct timeval *));
155 void cc_microset __P((struct cpu_info *));
156
157 extern uint64_t cpu_clockrate[];
158
159 /*
160 * Arguments to hardclock, softclock and gatherstats encapsulate the
161 * previous machine state in an opaque clockframe. The ipl is here
162 * as well for strayintr (see locore.s:interrupt and intr.c:strayintr).
163 * Note that CLKF_INTR is valid only if CLKF_USERMODE is false.
164 */
165 extern int intstack[];
166 extern int eintstack[];
167 struct clockframe {
168 struct trapframe64 t;
169 };
170
171 #define CLKF_USERMODE(framep) (((framep)->t.tf_tstate & TSTATE_PRIV) == 0)
172 /*
173 * XXX Disable CLKF_BASEPRI() for now. If we use a counter-timer for
174 * the clock, the interrupt remains blocked until the interrupt handler
175 * returns and we write to the clear interrupt register. If we use
176 * %tick for the clock, we could get multiple interrupts, but the
177 * currently enabled INTR_INTERLOCK will prevent the interrupt from being
178 * posted twice anyway.
179 *
180 * Switching to %tick for all machines and disabling INTR_INTERLOCK
181 * in locore.s would allow us to take advantage of CLKF_BASEPRI().
182 */
183 #if 0
184 #define CLKF_BASEPRI(framep) (((framep)->t.tf_oldpil) == 0)
185 #else
186 #define CLKF_BASEPRI(framep) (0)
187 #endif
188 #define CLKF_PC(framep) ((framep)->t.tf_pc)
189 /* Since some files in sys/kern do not know BIAS, I'm using 0x7ff here */
190 #define CLKF_INTR(framep) \
191 ((!CLKF_USERMODE(framep))&& \
192 (((framep)->t.tf_out[6] & 1 ) ? \
193 (((vaddr_t)(framep)->t.tf_out[6] < \
194 (vaddr_t)EINTSTACK-0x7ff) && \
195 ((vaddr_t)(framep)->t.tf_out[6] > \
196 (vaddr_t)INTSTACK-0x7ff)) : \
197 (((vaddr_t)(framep)->t.tf_out[6] < \
198 (vaddr_t)EINTSTACK) && \
199 ((vaddr_t)(framep)->t.tf_out[6] > \
200 (vaddr_t)INTSTACK))))
201
202 /*
203 * Software interrupt request `register'.
204 */
205 #ifdef DEPRECATED
206 union sir {
207 int sir_any;
208 char sir_which[4];
209 } sir;
210
211 #define SIR_NET 0
212 #define SIR_CLOCK 1
213 #endif
214
215 extern struct intrhand soft01intr, soft01net, soft01clock;
216
217 #if 0
218 #define setsoftint() send_softint(-1, IPL_SOFTINT, &soft01intr)
219 #define setsoftnet() send_softint(-1, IPL_SOFTNET, &soft01net)
220 #else
221 void setsoftint __P((void));
222 void setsoftnet __P((void));
223 #endif
224
225 int want_ast;
226
227 /*
228 * Preempt the current process if in interrupt from user mode,
229 * or after the current trap/syscall if in system mode.
230 */
231 int want_resched; /* resched() was called */
232 #define need_resched(ci) (want_resched = 1, want_ast = 1)
233
234 /*
235 * Give a profiling tick to the current process when the user profiling
236 * buffer pages are invalid. On the sparc, request an ast to send us
237 * through trap(), marking the proc as needing a profiling tick.
238 */
239 #define need_proftick(p) ((p)->p_flag |= P_OWEUPC, want_ast = 1)
240
241 /*
242 * Notify the current process (p) that it has a signal pending,
243 * process as soon as possible.
244 */
245 #define signotify(p) (want_ast = 1)
246
247 /*
248 * Only one process may own the FPU state.
249 *
250 * XXX this must be per-cpu (eventually)
251 */
252 struct lwp *fplwp; /* FPU owner */
253
254 /*
255 * Interrupt handler chains. Interrupt handlers should return 0 for
256 * ``not me'' or 1 (``I took care of it''). intr_establish() inserts a
257 * handler into the list. The handler is called with its (single)
258 * argument, or with a pointer to a clockframe if ih_arg is NULL.
259 */
260 struct intrhand {
261 int (*ih_fun) __P((void *));
262 void *ih_arg;
263 short ih_number; /* interrupt number */
264 /* the H/W provides */
265 char ih_pil; /* interrupt priority */
266 struct intrhand *ih_next; /* global list */
267 struct intrhand *ih_pending; /* interrupt queued */
268 volatile u_int64_t *ih_map; /* Interrupt map reg */
269 volatile u_int64_t *ih_clr; /* clear interrupt reg */
270 };
271 extern struct intrhand *intrhand[];
272 extern struct intrhand *intrlev[MAXINTNUM];
273
274 void intr_establish __P((int level, struct intrhand *));
275
276 /* cpu.c */
277 paddr_t cpu_alloc __P((void));
278 u_int64_t cpu_init __P((paddr_t, int));
279 /* disksubr.c */
280 struct dkbad;
281 int isbad __P((struct dkbad *bt, int, int, int));
282 /* machdep.c */
283 int ldcontrolb __P((caddr_t));
284 void dumpconf __P((void));
285 caddr_t reserve_dumppages __P((caddr_t));
286 /* clock.c */
287 struct timeval;
288 int tickintr __P((void *)); /* level 10 (tick) interrupt code */
289 int clockintr __P((void *));/* level 10 (clock) interrupt code */
290 int statintr __P((void *)); /* level 14 (statclock) interrupt code */
291 /* locore.s */
292 struct fpstate64;
293 void savefpstate __P((struct fpstate64 *));
294 void loadfpstate __P((struct fpstate64 *));
295 u_int64_t probeget __P((paddr_t, int, int));
296 int probeset __P((paddr_t, int, int, u_int64_t));
297 #if 0
298 void write_all_windows __P((void));
299 void write_user_windows __P((void));
300 #else
301 #define write_all_windows() __asm __volatile("flushw" : : )
302 #define write_user_windows() __asm __volatile("flushw" : : )
303 #endif
304 void proc_trampoline __P((void));
305 struct pcb;
306 void snapshot __P((struct pcb *));
307 struct frame *getfp __P((void));
308 int xldcontrolb __P((caddr_t, struct pcb *));
309 void copywords __P((const void *, void *, size_t));
310 void qcopy __P((const void *, void *, size_t));
311 void qzero __P((void *, size_t));
312 void switchtoctx __P((int));
313 /* locore2.c */
314 void remrq __P((struct proc *));
315 /* trap.c */
316 void kill_user_windows __P((struct lwp *));
317 int rwindow_save __P((struct lwp *));
318 /* amd7930intr.s */
319 void amd7930_trap __P((void));
320 /* cons.c */
321 int cnrom __P((void));
322 /* zs.c */
323 void zsconsole __P((struct tty *, int, int, void (**)(struct tty *, int)));
324 #ifdef KGDB
325 void zs_kgdb_init __P((void));
326 #endif
327 /* fb.c */
328 void fb_unblank __P((void));
329 /* kgdb_stub.c */
330 #ifdef KGDB
331 void kgdb_attach __P((int (*)(void *), void (*)(void *, int), void *));
332 void kgdb_connect __P((int));
333 void kgdb_panic __P((void));
334 #endif
335 /* emul.c */
336 int fixalign __P((struct lwp *, struct trapframe64 *));
337 int emulinstr __P((vaddr_t, struct trapframe64 *));
338
339 /*
340 *
341 * The SPARC has a Trap Base Register (TBR) which holds the upper 20 bits
342 * of the trap vector table. The next eight bits are supplied by the
343 * hardware when the trap occurs, and the bottom four bits are always
344 * zero (so that we can shove up to 16 bytes of executable code---exactly
345 * four instructions---into each trap vector).
346 *
347 * The hardware allocates half the trap vectors to hardware and half to
348 * software.
349 *
350 * Traps have priorities assigned (lower number => higher priority).
351 */
352
353 struct trapvec {
354 int tv_instr[8]; /* the eight instructions */
355 };
356 extern struct trapvec *trapbase; /* the 256 vectors */
357
358 extern void wzero __P((void *, u_int));
359 extern void wcopy __P((const void *, void *, u_int));
360
361 #endif /* _KERNEL */
362 #endif /* _CPU_H_ */
363