cpu.h revision 1.38 1 /* $NetBSD: cpu.h,v 1.38 2003/11/15 05:24:51 petrov Exp $ */
2
3 /*
4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This software was developed by the Computer Systems Engineering group
8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9 * contributed to Berkeley.
10 *
11 * All advertising materials mentioning features or use of this software
12 * must display the following acknowledgement:
13 * This product includes software developed by the University of
14 * California, Lawrence Berkeley Laboratory.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 * 3. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * @(#)cpu.h 8.4 (Berkeley) 1/5/94
41 */
42
43 #ifndef _CPU_H_
44 #define _CPU_H_
45
46 /*
47 * CTL_MACHDEP definitions.
48 */
49 #define CPU_BOOTED_KERNEL 1 /* string: booted kernel name */
50 #define CPU_BOOTED_DEVICE 2 /* string: device booted from */
51 #define CPU_BOOT_ARGS 3 /* string: args booted with */
52 #define CPU_ARCH 4 /* integer: cpu architecture version */
53 #define CPU_MAXID 5 /* number of valid machdep ids */
54
55 #define CTL_MACHDEP_NAMES { \
56 { 0, 0 }, \
57 { "booted_kernel", CTLTYPE_STRING }, \
58 { "booted_device", CTLTYPE_STRING }, \
59 { "boot_args", CTLTYPE_STRING }, \
60 { "cpu_arch", CTLTYPE_INT }, \
61 }
62
63 #ifdef _KERNEL
64 /*
65 * Exported definitions unique to SPARC cpu support.
66 */
67
68 #if defined(_KERNEL_OPT)
69 #include "opt_multiprocessor.h"
70 #include "opt_lockdebug.h"
71 #endif
72
73 #include <machine/psl.h>
74 #include <machine/reg.h>
75 #include <machine/intr.h>
76 #include <sparc64/sparc64/intreg.h>
77
78 #include <sys/sched.h>
79 /*
80 * The cpu_info structure is part of a 64KB structure mapped both the kernel
81 * pmap and a single locked TTE a CPUINFO_VA for that particular processor.
82 * Each processor's cpu_info is accessible at CPUINFO_VA only for that
83 * processor. Other processors can access that through an additional mapping
84 * in the kernel pmap.
85 *
86 * The 64KB page contains:
87 *
88 * cpu_info
89 * interrupt stack (all remaining space)
90 * idle PCB
91 * idle stack (STACKSPACE - sizeof(PCB))
92 * 32KB TSB
93 */
94
95 struct cpu_info {
96 /* Most important fields first */
97 struct lwp *ci_curlwp;
98 struct pcb *ci_cpcb;
99 struct cpu_info *ci_next;
100
101 struct lwp *ci_fplwp;
102 int ci_number;
103 int ci_upaid;
104 int ci_cpuid;
105 struct schedstate_percpu ci_schedstate;
106
107 /*
108 * Variables used by cc_microtime().
109 */
110 struct timeval ci_cc_time;
111 int64_t ci_cc_cc;
112 int64_t ci_cc_ms_delta;
113 int64_t ci_cc_denom;
114
115 /* DEBUG/DIAGNOSTIC stuff */
116 u_long ci_spin_locks;
117 u_long ci_simple_locks;
118
119 /* Spinning up the CPU */
120 void (*ci_spinup) __P((void));
121 void *ci_initstack;
122 paddr_t ci_paddr;
123 };
124
125 extern struct cpu_info *cpus;
126 extern struct cpu_info cpu_info_store;
127
128 #if 1
129 #define curcpu() (&cpu_info_store)
130 #else
131 #define curcpu() ((struct cpu_info *)CPUINFO_VA)
132 #endif
133
134 /*
135 * definitions of cpu-dependent requirements
136 * referenced in generic code
137 */
138 #define cpu_swapin(p) /* nothing */
139 #define cpu_swapout(p) /* nothing */
140 #define cpu_wait(p) /* nothing */
141 #if 1
142 #define cpu_number() 0
143 #else
144 #define cpu_number() (curcpu()->ci_number)
145 #endif
146
147 /* This really should be somewhere else. */
148 #define cpu_proc_fork(p1, p2) /* nothing */
149
150 #if defined(MULTIPROCESSOR)
151 void cpu_boot_secondary_processors __P((void));
152 #define CPU_IS_PRIMARY(ci) (1) /* XXX */
153 #endif
154
155 /*
156 * definitions for MI microtime().
157 */
158 extern struct timeval cc_microset_time;
159 #define microtime(tv) cc_microtime(tv)
160 void cc_microtime __P((struct timeval *));
161 void cc_microset __P((struct cpu_info *));
162
163 extern uint64_t cpu_clockrate[];
164
165 /*
166 * Arguments to hardclock, softclock and gatherstats encapsulate the
167 * previous machine state in an opaque clockframe. The ipl is here
168 * as well for strayintr (see locore.s:interrupt and intr.c:strayintr).
169 * Note that CLKF_INTR is valid only if CLKF_USERMODE is false.
170 */
171 extern int intstack[];
172 extern int eintstack[];
173 struct clockframe {
174 struct trapframe64 t;
175 };
176
177 #define CLKF_USERMODE(framep) (((framep)->t.tf_tstate & TSTATE_PRIV) == 0)
178 /*
179 * XXX Disable CLKF_BASEPRI() for now. If we use a counter-timer for
180 * the clock, the interrupt remains blocked until the interrupt handler
181 * returns and we write to the clear interrupt register. If we use
182 * %tick for the clock, we could get multiple interrupts, but the
183 * currently enabled INTR_INTERLOCK will prevent the interrupt from being
184 * posted twice anyway.
185 *
186 * Switching to %tick for all machines and disabling INTR_INTERLOCK
187 * in locore.s would allow us to take advantage of CLKF_BASEPRI().
188 */
189 #if 0
190 #define CLKF_BASEPRI(framep) (((framep)->t.tf_oldpil) == 0)
191 #else
192 #define CLKF_BASEPRI(framep) (0)
193 #endif
194 #define CLKF_PC(framep) ((framep)->t.tf_pc)
195 /* Since some files in sys/kern do not know BIAS, I'm using 0x7ff here */
196 #define CLKF_INTR(framep) \
197 ((!CLKF_USERMODE(framep))&& \
198 (((framep)->t.tf_out[6] & 1 ) ? \
199 (((vaddr_t)(framep)->t.tf_out[6] < \
200 (vaddr_t)EINTSTACK-0x7ff) && \
201 ((vaddr_t)(framep)->t.tf_out[6] > \
202 (vaddr_t)INTSTACK-0x7ff)) : \
203 (((vaddr_t)(framep)->t.tf_out[6] < \
204 (vaddr_t)EINTSTACK) && \
205 ((vaddr_t)(framep)->t.tf_out[6] > \
206 (vaddr_t)INTSTACK))))
207
208 /*
209 * Software interrupt request `register'.
210 */
211 #ifdef DEPRECATED
212 union sir {
213 int sir_any;
214 char sir_which[4];
215 } sir;
216
217 #define SIR_NET 0
218 #define SIR_CLOCK 1
219 #endif
220
221 extern struct intrhand soft01intr, soft01net, soft01clock;
222
223 #if 0
224 #define setsoftint() send_softint(-1, IPL_SOFTINT, &soft01intr)
225 #define setsoftnet() send_softint(-1, IPL_SOFTNET, &soft01net)
226 #else
227 void setsoftint __P((void));
228 void setsoftnet __P((void));
229 #endif
230
231 int want_ast;
232
233 /*
234 * Preempt the current process if in interrupt from user mode,
235 * or after the current trap/syscall if in system mode.
236 */
237 int want_resched; /* resched() was called */
238 #define need_resched(ci) (want_resched = 1, want_ast = 1)
239
240 /*
241 * Give a profiling tick to the current process when the user profiling
242 * buffer pages are invalid. On the sparc, request an ast to send us
243 * through trap(), marking the proc as needing a profiling tick.
244 */
245 #define need_proftick(p) ((p)->p_flag |= P_OWEUPC, want_ast = 1)
246
247 /*
248 * Notify the current process (p) that it has a signal pending,
249 * process as soon as possible.
250 */
251 #define signotify(p) (want_ast = 1)
252
253 /*
254 * Only one process may own the FPU state.
255 *
256 * XXX this must be per-cpu (eventually)
257 */
258 struct lwp *fplwp; /* FPU owner */
259
260 /*
261 * Interrupt handler chains. Interrupt handlers should return 0 for
262 * ``not me'' or 1 (``I took care of it''). intr_establish() inserts a
263 * handler into the list. The handler is called with its (single)
264 * argument, or with a pointer to a clockframe if ih_arg is NULL.
265 */
266 struct intrhand {
267 int (*ih_fun) __P((void *));
268 void *ih_arg;
269 short ih_number; /* interrupt number */
270 /* the H/W provides */
271 char ih_pil; /* interrupt priority */
272 struct intrhand *ih_next; /* global list */
273 struct intrhand *ih_pending; /* interrupt queued */
274 volatile u_int64_t *ih_map; /* Interrupt map reg */
275 volatile u_int64_t *ih_clr; /* clear interrupt reg */
276 };
277 extern struct intrhand *intrhand[];
278 extern struct intrhand *intrlev[MAXINTNUM];
279
280 void intr_establish __P((int level, struct intrhand *));
281
282 /* cpu.c */
283 paddr_t cpu_alloc __P((void));
284 u_int64_t cpu_init __P((paddr_t, int));
285 /* disksubr.c */
286 struct dkbad;
287 int isbad __P((struct dkbad *bt, int, int, int));
288 /* machdep.c */
289 int ldcontrolb __P((caddr_t));
290 void dumpconf __P((void));
291 caddr_t reserve_dumppages __P((caddr_t));
292 /* clock.c */
293 struct timeval;
294 int tickintr __P((void *)); /* level 10 (tick) interrupt code */
295 int clockintr __P((void *));/* level 10 (clock) interrupt code */
296 int statintr __P((void *)); /* level 14 (statclock) interrupt code */
297 /* locore.s */
298 struct fpstate64;
299 void savefpstate __P((struct fpstate64 *));
300 void loadfpstate __P((struct fpstate64 *));
301 u_int64_t probeget __P((paddr_t, int, int));
302 int probeset __P((paddr_t, int, int, u_int64_t));
303 #if 0
304 void write_all_windows __P((void));
305 void write_user_windows __P((void));
306 #else
307 #define write_all_windows() __asm __volatile("flushw" : : )
308 #define write_user_windows() __asm __volatile("flushw" : : )
309 #endif
310 void proc_trampoline __P((void));
311 struct pcb;
312 void snapshot __P((struct pcb *));
313 struct frame *getfp __P((void));
314 int xldcontrolb __P((caddr_t, struct pcb *));
315 void copywords __P((const void *, void *, size_t));
316 void qcopy __P((const void *, void *, size_t));
317 void qzero __P((void *, size_t));
318 void switchtoctx __P((int));
319 /* locore2.c */
320 void remrq __P((struct proc *));
321 /* trap.c */
322 void kill_user_windows __P((struct lwp *));
323 int rwindow_save __P((struct lwp *));
324 /* amd7930intr.s */
325 void amd7930_trap __P((void));
326 /* cons.c */
327 int cnrom __P((void));
328 /* zs.c */
329 void zsconsole __P((struct tty *, int, int, void (**)(struct tty *, int)));
330 #ifdef KGDB
331 void zs_kgdb_init __P((void));
332 #endif
333 /* fb.c */
334 void fb_unblank __P((void));
335 /* kgdb_stub.c */
336 #ifdef KGDB
337 void kgdb_attach __P((int (*)(void *), void (*)(void *, int), void *));
338 void kgdb_connect __P((int));
339 void kgdb_panic __P((void));
340 #endif
341 /* emul.c */
342 int fixalign __P((struct lwp *, struct trapframe64 *));
343 int emulinstr __P((vaddr_t, struct trapframe64 *));
344
345 /*
346 *
347 * The SPARC has a Trap Base Register (TBR) which holds the upper 20 bits
348 * of the trap vector table. The next eight bits are supplied by the
349 * hardware when the trap occurs, and the bottom four bits are always
350 * zero (so that we can shove up to 16 bytes of executable code---exactly
351 * four instructions---into each trap vector).
352 *
353 * The hardware allocates half the trap vectors to hardware and half to
354 * software.
355 *
356 * Traps have priorities assigned (lower number => higher priority).
357 */
358
359 struct trapvec {
360 int tv_instr[8]; /* the eight instructions */
361 };
362 extern struct trapvec *trapbase; /* the 256 vectors */
363
364 extern void wzero __P((void *, u_int));
365 extern void wcopy __P((const void *, void *, u_int));
366
367 #endif /* _KERNEL */
368 #endif /* _CPU_H_ */
369