cpu.h revision 1.29.6.5 1 /* $NetBSD: cpu.h,v 1.29.6.5 2002/06/20 03:41:23 nathanw Exp $ */
2
3 /*
4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This software was developed by the Computer Systems Engineering group
8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9 * contributed to Berkeley.
10 *
11 * All advertising materials mentioning features or use of this software
12 * must display the following acknowledgement:
13 * This product includes software developed by the University of
14 * California, Lawrence Berkeley Laboratory.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 * 3. All advertising materials mentioning features or use of this software
25 * must display the following acknowledgement:
26 * This product includes software developed by the University of
27 * California, Berkeley and its contributors.
28 * 4. Neither the name of the University nor the names of its contributors
29 * may be used to endorse or promote products derived from this software
30 * without specific prior written permission.
31 *
32 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42 * SUCH DAMAGE.
43 *
44 * @(#)cpu.h 8.4 (Berkeley) 1/5/94
45 */
46
47 #ifndef _CPU_H_
48 #define _CPU_H_
49
50 /*
51 * CTL_MACHDEP definitions.
52 */
53 #define CPU_BOOTED_KERNEL 1 /* string: booted kernel name */
54 #define CPU_MAXID 2 /* number of valid machdep ids */
55
56 #define CTL_MACHDEP_NAMES { \
57 { 0, 0 }, \
58 { "booted_kernel", CTLTYPE_STRING }, \
59 }
60
61 #ifdef _KERNEL
62 /*
63 * Exported definitions unique to SPARC cpu support.
64 */
65
66 #if !defined(_LKM)
67 #include "opt_multiprocessor.h"
68 #include "opt_lockdebug.h"
69 #endif
70
71 #include <machine/psl.h>
72 #include <machine/reg.h>
73 #include <machine/intr.h>
74 #include <sparc64/sparc64/intreg.h>
75
76 #include <sys/sched.h>
77 /*
78 * The cpu_info structure is part of a 64KB structure mapped both the kernel
79 * pmap and a single locked TTE a CPUINFO_VA for that particular processor.
80 * Each processor's cpu_info is accessible at CPUINFO_VA only for that
81 * processor. Other processors can access that through an additional mapping
82 * in the kernel pmap.
83 *
84 * The 64KB page contains:
85 *
86 * cpu_info
87 * interrupt stack (all remaining space)
88 * idle PCB
89 * idle stack (STACKSPACE - sizeof(PCB))
90 * 32KB TSB
91 */
92
93 struct cpu_info {
94 /* Most important fields first */
95 struct proc *ci_curproc;
96 struct pcb *ci_cpcb; /* also initial stack */
97 struct cpu_info *ci_next;
98
99 struct lwp *ci_fplwp;
100 int ci_number;
101 int ci_upaid;
102 struct schedstate_percpu ci_schedstate; /* scheduler state */
103
104 /* DEBUG/DIAGNOSTIC stuff */
105 u_long ci_spin_locks; /* # of spin locks held */
106 u_long ci_simple_locks;/* # of simple locks held */
107
108 /* Spinning up the CPU */
109 void (*ci_spinup) __P((void)); /* spinup routine */
110 void *ci_initstack;
111 paddr_t ci_paddr; /* Phys addr of this structure. */
112 };
113
114 extern struct cpu_info *cpus;
115 extern struct cpu_info cpu_info_store;
116
117 #if 1
118 #define curcpu() (&cpu_info_store)
119 #else
120 #define curcpu() ((struct cpu_info *)CPUINFO_VA)
121 #endif
122
123 /*
124 * definitions of cpu-dependent requirements
125 * referenced in generic code
126 */
127 #define cpu_swapin(p) /* nothing */
128 #define cpu_swapout(p) /* nothing */
129 #define cpu_wait(p) /* nothing */
130 #if 1
131 #define cpu_number() 0
132 #else
133 #define cpu_number() (curcpu()->ci_number)
134 #endif
135
136 /* This really should be somewhere else. */
137 #define cpu_proc_fork(p1, p2) /* nothing */
138
139 /*
140 * Arguments to hardclock, softclock and gatherstats encapsulate the
141 * previous machine state in an opaque clockframe. The ipl is here
142 * as well for strayintr (see locore.s:interrupt and intr.c:strayintr).
143 * Note that CLKF_INTR is valid only if CLKF_USERMODE is false.
144 */
145 extern int intstack[];
146 extern int eintstack[];
147 struct clockframe {
148 struct trapframe64 t;
149 };
150
151 #define CLKF_USERMODE(framep) (((framep)->t.tf_tstate & TSTATE_PRIV) == 0)
152 /*
153 * XXX Disable CLKF_BASEPRI() for now. If we use a counter-timer for
154 * the clock, the interrupt remains blocked until the interrupt handler
155 * returns and we write to the clear interrupt register. If we use
156 * %tick for the clock, we could get multiple interrupts, but the
157 * currently enabled INTR_INTERLOCK will prevent the interrupt from being
158 * posted twice anyway.
159 *
160 * Switching to %tick for all machines and disabling INTR_INTERLOCK
161 * in locore.s would allow us to take advantage of CLKF_BASEPRI().
162 */
163 #if 0
164 #define CLKF_BASEPRI(framep) (((framep)->t.tf_oldpil) == 0)
165 #else
166 #define CLKF_BASEPRI(framep) (0)
167 #endif
168 #define CLKF_PC(framep) ((framep)->t.tf_pc)
169 /* Since some files in sys/kern do not know BIAS, I'm using 0x7ff here */
170 #define CLKF_INTR(framep) \
171 ((!CLKF_USERMODE(framep))&& \
172 (((framep)->t.tf_out[6] & 1 ) ? \
173 (((vaddr_t)(framep)->t.tf_out[6] < \
174 (vaddr_t)EINTSTACK-0x7ff) && \
175 ((vaddr_t)(framep)->t.tf_out[6] > \
176 (vaddr_t)INTSTACK-0x7ff)) : \
177 (((vaddr_t)(framep)->t.tf_out[6] < \
178 (vaddr_t)EINTSTACK) && \
179 ((vaddr_t)(framep)->t.tf_out[6] > \
180 (vaddr_t)INTSTACK))))
181
182 /*
183 * Software interrupt request `register'.
184 */
185 #ifdef DEPRECATED
186 union sir {
187 int sir_any;
188 char sir_which[4];
189 } sir;
190
191 #define SIR_NET 0
192 #define SIR_CLOCK 1
193 #endif
194
195 extern struct intrhand soft01intr, soft01net, soft01clock;
196
197 #if 0
198 #define setsoftint() send_softint(-1, IPL_SOFTINT, &soft01intr)
199 #define setsoftnet() send_softint(-1, IPL_SOFTNET, &soft01net)
200 #else
201 void setsoftint __P((void));
202 void setsoftnet __P((void));
203 #endif
204
205 int want_ast;
206
207 /*
208 * Preempt the current process if in interrupt from user mode,
209 * or after the current trap/syscall if in system mode.
210 */
211 int want_resched; /* resched() was called */
212 #define need_resched(ci) (want_resched = 1, want_ast = 1)
213
214 /*
215 * Give a profiling tick to the current process when the user profiling
216 * buffer pages are invalid. On the sparc, request an ast to send us
217 * through trap(), marking the proc as needing a profiling tick.
218 */
219 #define need_proftick(p) ((p)->p_flag |= P_OWEUPC, want_ast = 1)
220
221 /*
222 * Notify the current process (p) that it has a signal pending,
223 * process as soon as possible.
224 */
225 #define signotify(p) (want_ast = 1)
226
227 /*
228 * Only one process may own the FPU state.
229 *
230 * XXX this must be per-cpu (eventually)
231 */
232 struct lwp *fplwp; /* FPU owner */
233 int foundfpu; /* true => we have an FPU */
234
235 /*
236 * Interrupt handler chains. Interrupt handlers should return 0 for
237 * ``not me'' or 1 (``I took care of it''). intr_establish() inserts a
238 * handler into the list. The handler is called with its (single)
239 * argument, or with a pointer to a clockframe if ih_arg is NULL.
240 */
241 struct intrhand {
242 int (*ih_fun) __P((void *));
243 void *ih_arg;
244 short ih_number; /* interrupt number */
245 /* the H/W provides */
246 char ih_pil; /* interrupt priority */
247 struct intrhand *ih_next; /* global list */
248 struct intrhand *ih_pending; /* interrupt queued */
249 volatile u_int64_t *ih_map; /* Interrupt map reg */
250 volatile u_int64_t *ih_clr; /* clear interrupt reg */
251 };
252 extern struct intrhand *intrhand[];
253 extern struct intrhand *intrlev[MAXINTNUM];
254
255 void intr_establish __P((int level, struct intrhand *));
256
257 /* cpu.c */
258 paddr_t cpu_alloc __P((void));
259 u_int64_t cpu_init __P((paddr_t, int));
260 /* disksubr.c */
261 struct dkbad;
262 int isbad __P((struct dkbad *bt, int, int, int));
263 /* machdep.c */
264 int ldcontrolb __P((caddr_t));
265 void dumpconf __P((void));
266 caddr_t reserve_dumppages __P((caddr_t));
267 /* clock.c */
268 struct timeval;
269 int tickintr __P((void *)); /* level 10 (tick) interrupt code */
270 int clockintr __P((void *));/* level 10 (clock) interrupt code */
271 int statintr __P((void *)); /* level 14 (statclock) interrupt code */
272 /* locore.s */
273 struct fpstate64;
274 void savefpstate __P((struct fpstate64 *));
275 void loadfpstate __P((struct fpstate64 *));
276 u_int64_t probeget __P((paddr_t, int, int));
277 int probeset __P((paddr_t, int, int, u_int64_t));
278 #if 0
279 void write_all_windows __P((void));
280 void write_user_windows __P((void));
281 #else
282 #define write_all_windows() __asm __volatile("flushw" : : )
283 #define write_user_windows() __asm __volatile("flushw" : : )
284 #endif
285 void proc_trampoline __P((void));
286 struct pcb;
287 void snapshot __P((struct pcb *));
288 struct frame *getfp __P((void));
289 int xldcontrolb __P((caddr_t, struct pcb *));
290 void copywords __P((const void *, void *, size_t));
291 void qcopy __P((const void *, void *, size_t));
292 void qzero __P((void *, size_t));
293 void switchtoctx __P((int));
294 /* locore2.c */
295 void remrq __P((struct proc *));
296 /* trap.c */
297 void kill_user_windows __P((struct lwp *));
298 int rwindow_save __P((struct lwp *));
299 /* amd7930intr.s */
300 void amd7930_trap __P((void));
301 /* cons.c */
302 int cnrom __P((void));
303 /* zs.c */
304 void zsconsole __P((struct tty *, int, int, void (**)(struct tty *, int)));
305 #ifdef KGDB
306 void zs_kgdb_init __P((void));
307 #endif
308 /* fb.c */
309 void fb_unblank __P((void));
310 /* kgdb_stub.c */
311 #ifdef KGDB
312 void kgdb_attach __P((int (*)(void *), void (*)(void *, int), void *));
313 void kgdb_connect __P((int));
314 void kgdb_panic __P((void));
315 #endif
316 /* emul.c */
317 int fixalign __P((struct lwp *, struct trapframe64 *));
318 int emulinstr __P((vaddr_t, struct trapframe64 *));
319
320 /*
321 *
322 * The SPARC has a Trap Base Register (TBR) which holds the upper 20 bits
323 * of the trap vector table. The next eight bits are supplied by the
324 * hardware when the trap occurs, and the bottom four bits are always
325 * zero (so that we can shove up to 16 bytes of executable code---exactly
326 * four instructions---into each trap vector).
327 *
328 * The hardware allocates half the trap vectors to hardware and half to
329 * software.
330 *
331 * Traps have priorities assigned (lower number => higher priority).
332 */
333
334 struct trapvec {
335 int tv_instr[8]; /* the eight instructions */
336 };
337 extern struct trapvec *trapbase; /* the 256 vectors */
338
339 extern void wzero __P((void *, u_int));
340 extern void wcopy __P((const void *, void *, u_int));
341
342 #endif /* _KERNEL */
343 #endif /* _CPU_H_ */
344