cpu.h revision 1.41 1 /* $NetBSD: cpu.h,v 1.41 2004/01/04 11:33:31 jdolecek Exp $ */
2
3 /*
4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This software was developed by the Computer Systems Engineering group
8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9 * contributed to Berkeley.
10 *
11 * All advertising materials mentioning features or use of this software
12 * must display the following acknowledgement:
13 * This product includes software developed by the University of
14 * California, Lawrence Berkeley Laboratory.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 * 3. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * @(#)cpu.h 8.4 (Berkeley) 1/5/94
41 */
42
43 #ifndef _CPU_H_
44 #define _CPU_H_
45
46 /*
47 * CTL_MACHDEP definitions.
48 */
49 #define CPU_BOOTED_KERNEL 1 /* string: booted kernel name */
50 #define CPU_BOOTED_DEVICE 2 /* string: device booted from */
51 #define CPU_BOOT_ARGS 3 /* string: args booted with */
52 #define CPU_ARCH 4 /* integer: cpu architecture version */
53 #define CPU_MAXID 5 /* number of valid machdep ids */
54
55 #define CTL_MACHDEP_NAMES { \
56 { 0, 0 }, \
57 { "booted_kernel", CTLTYPE_STRING }, \
58 { "booted_device", CTLTYPE_STRING }, \
59 { "boot_args", CTLTYPE_STRING }, \
60 { "cpu_arch", CTLTYPE_INT }, \
61 }
62
63 #ifdef _KERNEL
64 /*
65 * Exported definitions unique to SPARC cpu support.
66 */
67
68 #if defined(_KERNEL_OPT)
69 #include "opt_multiprocessor.h"
70 #include "opt_lockdebug.h"
71 #endif
72
73 #include <machine/psl.h>
74 #include <machine/reg.h>
75 #include <machine/intr.h>
76 #include <sparc64/sparc64/intreg.h>
77
78 #include <sys/sched.h>
79 /*
80 * The cpu_info structure is part of a 64KB structure mapped both the kernel
81 * pmap and a single locked TTE a CPUINFO_VA for that particular processor.
82 * Each processor's cpu_info is accessible at CPUINFO_VA only for that
83 * processor. Other processors can access that through an additional mapping
84 * in the kernel pmap.
85 *
86 * The 64KB page contains:
87 *
88 * cpu_info
89 * interrupt stack (all remaining space)
90 * idle PCB
91 * idle stack (STACKSPACE - sizeof(PCB))
92 * 32KB TSB
93 */
94
95 struct cpu_info {
96 /* Most important fields first */
97 struct lwp *ci_curlwp;
98 struct pcb *ci_cpcb;
99 struct cpu_info *ci_next;
100
101 struct lwp *ci_fplwp;
102 int ci_number;
103 int ci_upaid;
104 int ci_cpuid;
105 struct schedstate_percpu ci_schedstate;
106
107 /*
108 * Variables used by cc_microtime().
109 */
110 struct timeval ci_cc_time;
111 int64_t ci_cc_cc;
112 int64_t ci_cc_ms_delta;
113 int64_t ci_cc_denom;
114
115 /* DEBUG/DIAGNOSTIC stuff */
116 u_long ci_spin_locks;
117 u_long ci_simple_locks;
118
119 /* Spinning up the CPU */
120 void (*ci_spinup) __P((void));
121 void *ci_initstack;
122 paddr_t ci_paddr;
123 };
124
125 extern struct cpu_info *cpus;
126
127 #define curcpu() ((struct cpu_info *)CPUINFO_VA)
128
129 #define curlwp curcpu()->ci_curlwp
130 #define fplwp curcpu()->ci_fplwp
131 #define curpcb curcpu()->ci_cpcb
132
133 /*
134 * definitions of cpu-dependent requirements
135 * referenced in generic code
136 */
137 #define cpu_swapin(p) /* nothing */
138 #define cpu_swapout(p) /* nothing */
139 #if 1
140 #define cpu_number() 0
141 #else
142 #define cpu_number() (curcpu()->ci_number)
143 #endif
144
145 /* This really should be somewhere else. */
146 #define cpu_proc_fork(p1, p2) /* nothing */
147
148 #if defined(MULTIPROCESSOR)
149 void cpu_boot_secondary_processors __P((void));
150 #define CPU_IS_PRIMARY(ci) (1) /* XXX */
151 #else
152 #define CPU_IS_PRIMARY(ci) (1)
153 #endif
154
155 /*
156 * definitions for MI microtime().
157 */
158 extern struct timeval cc_microset_time;
159 #define microtime(tv) cc_microtime(tv)
160 void cc_microtime __P((struct timeval *));
161 void cc_microset __P((struct cpu_info *));
162
163 extern uint64_t cpu_clockrate[];
164
165 /*
166 * Arguments to hardclock, softclock and gatherstats encapsulate the
167 * previous machine state in an opaque clockframe. The ipl is here
168 * as well for strayintr (see locore.s:interrupt and intr.c:strayintr).
169 * Note that CLKF_INTR is valid only if CLKF_USERMODE is false.
170 */
171 extern int intstack[];
172 extern int eintstack[];
173 struct clockframe {
174 struct trapframe64 t;
175 };
176
177 #define CLKF_USERMODE(framep) (((framep)->t.tf_tstate & TSTATE_PRIV) == 0)
178 /*
179 * XXX Disable CLKF_BASEPRI() for now. If we use a counter-timer for
180 * the clock, the interrupt remains blocked until the interrupt handler
181 * returns and we write to the clear interrupt register. If we use
182 * %tick for the clock, we could get multiple interrupts, but the
183 * currently enabled INTR_INTERLOCK will prevent the interrupt from being
184 * posted twice anyway.
185 *
186 * Switching to %tick for all machines and disabling INTR_INTERLOCK
187 * in locore.s would allow us to take advantage of CLKF_BASEPRI().
188 */
189 #if 0
190 #define CLKF_BASEPRI(framep) (((framep)->t.tf_oldpil) == 0)
191 #else
192 #define CLKF_BASEPRI(framep) (0)
193 #endif
194 #define CLKF_PC(framep) ((framep)->t.tf_pc)
195 /* Since some files in sys/kern do not know BIAS, I'm using 0x7ff here */
196 #define CLKF_INTR(framep) \
197 ((!CLKF_USERMODE(framep))&& \
198 (((framep)->t.tf_out[6] & 1 ) ? \
199 (((vaddr_t)(framep)->t.tf_out[6] < \
200 (vaddr_t)EINTSTACK-0x7ff) && \
201 ((vaddr_t)(framep)->t.tf_out[6] > \
202 (vaddr_t)INTSTACK-0x7ff)) : \
203 (((vaddr_t)(framep)->t.tf_out[6] < \
204 (vaddr_t)EINTSTACK) && \
205 ((vaddr_t)(framep)->t.tf_out[6] > \
206 (vaddr_t)INTSTACK))))
207
208 /*
209 * Software interrupt request `register'.
210 */
211 #ifdef DEPRECATED
212 union sir {
213 int sir_any;
214 char sir_which[4];
215 } sir;
216
217 #define SIR_NET 0
218 #define SIR_CLOCK 1
219 #endif
220
221 extern struct intrhand soft01intr, soft01net, soft01clock;
222
223 #if 0
224 #define setsoftint() send_softint(-1, IPL_SOFTINT, &soft01intr)
225 #define setsoftnet() send_softint(-1, IPL_SOFTNET, &soft01net)
226 #else
227 void setsoftint __P((void));
228 void setsoftnet __P((void));
229 #endif
230
231 int want_ast;
232
233 /*
234 * Preempt the current process if in interrupt from user mode,
235 * or after the current trap/syscall if in system mode.
236 */
237 int want_resched; /* resched() was called */
238 #define need_resched(ci) (want_resched = 1, want_ast = 1)
239
240 /*
241 * Give a profiling tick to the current process when the user profiling
242 * buffer pages are invalid. On the sparc, request an ast to send us
243 * through trap(), marking the proc as needing a profiling tick.
244 */
245 #define need_proftick(p) ((p)->p_flag |= P_OWEUPC, want_ast = 1)
246
247 /*
248 * Notify the current process (p) that it has a signal pending,
249 * process as soon as possible.
250 */
251 #define signotify(p) (want_ast = 1)
252
253 /*
254 * Interrupt handler chains. Interrupt handlers should return 0 for
255 * ``not me'' or 1 (``I took care of it''). intr_establish() inserts a
256 * handler into the list. The handler is called with its (single)
257 * argument, or with a pointer to a clockframe if ih_arg is NULL.
258 */
259 struct intrhand {
260 int (*ih_fun) __P((void *));
261 void *ih_arg;
262 short ih_number; /* interrupt number */
263 /* the H/W provides */
264 char ih_pil; /* interrupt priority */
265 struct intrhand *ih_next; /* global list */
266 struct intrhand *ih_pending; /* interrupt queued */
267 volatile u_int64_t *ih_map; /* Interrupt map reg */
268 volatile u_int64_t *ih_clr; /* clear interrupt reg */
269 };
270 extern struct intrhand *intrhand[];
271 extern struct intrhand *intrlev[MAXINTNUM];
272
273 void intr_establish __P((int level, struct intrhand *));
274
275 /* cpu.c */
276 paddr_t cpu_alloc __P((void));
277 u_int64_t cpu_init __P((paddr_t, int));
278 /* disksubr.c */
279 struct dkbad;
280 int isbad __P((struct dkbad *bt, int, int, int));
281 /* machdep.c */
282 int ldcontrolb __P((caddr_t));
283 void dumpconf __P((void));
284 caddr_t reserve_dumppages __P((caddr_t));
285 /* clock.c */
286 struct timeval;
287 int tickintr __P((void *)); /* level 10 (tick) interrupt code */
288 int clockintr __P((void *));/* level 10 (clock) interrupt code */
289 int statintr __P((void *)); /* level 14 (statclock) interrupt code */
290 /* locore.s */
291 struct fpstate64;
292 void savefpstate __P((struct fpstate64 *));
293 void loadfpstate __P((struct fpstate64 *));
294 u_int64_t probeget __P((paddr_t, int, int));
295 int probeset __P((paddr_t, int, int, u_int64_t));
296 #if 0
297 void write_all_windows __P((void));
298 void write_user_windows __P((void));
299 #else
300 #define write_all_windows() __asm __volatile("flushw" : : )
301 #define write_user_windows() __asm __volatile("flushw" : : )
302 #endif
303 void proc_trampoline __P((void));
304 struct pcb;
305 void snapshot __P((struct pcb *));
306 struct frame *getfp __P((void));
307 int xldcontrolb __P((caddr_t, struct pcb *));
308 void copywords __P((const void *, void *, size_t));
309 void qcopy __P((const void *, void *, size_t));
310 void qzero __P((void *, size_t));
311 void switchtoctx __P((int));
312 /* locore2.c */
313 void remrq __P((struct proc *));
314 /* trap.c */
315 void kill_user_windows __P((struct lwp *));
316 int rwindow_save __P((struct lwp *));
317 /* amd7930intr.s */
318 void amd7930_trap __P((void));
319 /* cons.c */
320 int cnrom __P((void));
321 /* zs.c */
322 void zsconsole __P((struct tty *, int, int, void (**)(struct tty *, int)));
323 #ifdef KGDB
324 void zs_kgdb_init __P((void));
325 #endif
326 /* fb.c */
327 void fb_unblank __P((void));
328 /* kgdb_stub.c */
329 #ifdef KGDB
330 void kgdb_attach __P((int (*)(void *), void (*)(void *, int), void *));
331 void kgdb_connect __P((int));
332 void kgdb_panic __P((void));
333 #endif
334 /* emul.c */
335 int fixalign __P((struct lwp *, struct trapframe64 *));
336 int emulinstr __P((vaddr_t, struct trapframe64 *));
337
338 /*
339 *
340 * The SPARC has a Trap Base Register (TBR) which holds the upper 20 bits
341 * of the trap vector table. The next eight bits are supplied by the
342 * hardware when the trap occurs, and the bottom four bits are always
343 * zero (so that we can shove up to 16 bytes of executable code---exactly
344 * four instructions---into each trap vector).
345 *
346 * The hardware allocates half the trap vectors to hardware and half to
347 * software.
348 *
349 * Traps have priorities assigned (lower number => higher priority).
350 */
351
352 struct trapvec {
353 int tv_instr[8]; /* the eight instructions */
354 };
355 extern struct trapvec *trapbase; /* the 256 vectors */
356
357 extern void wzero __P((void *, u_int));
358 extern void wcopy __P((const void *, void *, u_int));
359
360 #endif /* _KERNEL */
361 #endif /* _CPU_H_ */
362