cpu.h revision 1.80 1 /* $NetBSD: cpu.h,v 1.80 2008/04/29 14:06:31 ad Exp $ */
2
3 /*
4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This software was developed by the Computer Systems Engineering group
8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9 * contributed to Berkeley.
10 *
11 * All advertising materials mentioning features or use of this software
12 * must display the following acknowledgement:
13 * This product includes software developed by the University of
14 * California, Lawrence Berkeley Laboratory.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 * 3. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * @(#)cpu.h 8.4 (Berkeley) 1/5/94
41 */
42
43 #ifndef _CPU_H_
44 #define _CPU_H_
45
46 /*
47 * CTL_MACHDEP definitions.
48 */
49 #define CPU_BOOTED_KERNEL 1 /* string: booted kernel name */
50 #define CPU_BOOTED_DEVICE 2 /* string: device booted from */
51 #define CPU_BOOT_ARGS 3 /* string: args booted with */
52 #define CPU_ARCH 4 /* integer: cpu architecture version */
53 #define CPU_MAXID 5 /* number of valid machdep ids */
54
55 #ifdef _KERNEL
56 /*
57 * Exported definitions unique to SPARC cpu support.
58 */
59
60 #if defined(_KERNEL_OPT)
61 #include "opt_multiprocessor.h"
62 #include "opt_lockdebug.h"
63 #endif
64
65 #include <machine/psl.h>
66 #include <machine/reg.h>
67 #include <machine/pte.h>
68 #include <machine/intr.h>
69 #include <machine/cpuset.h>
70 #include <sparc64/sparc64/intreg.h>
71
72 #include <sys/cpu_data.h>
73 #include <sys/evcnt.h>
74 /*
75 * The cpu_info structure is part of a 64KB structure mapped both the kernel
76 * pmap and a single locked TTE a CPUINFO_VA for that particular processor.
77 * Each processor's cpu_info is accessible at CPUINFO_VA only for that
78 * processor. Other processors can access that through an additional mapping
79 * in the kernel pmap.
80 *
81 * The 64KB page contains:
82 *
83 * cpu_info
84 * interrupt stack (all remaining space)
85 * idle PCB
86 * idle stack (STACKSPACE - sizeof(PCB))
87 * 32KB TSB
88 */
89
90 struct cpu_info {
91
92 /*
93 * SPARC cpu_info structures live at two VAs: one global
94 * VA (so each CPU can access any other CPU's cpu_info)
95 * and an alias VA CPUINFO_VA which is the same on each
96 * CPU and maps to that CPU's cpu_info. Since the alias
97 * CPUINFO_VA is how we locate our cpu_info, we have to
98 * self-reference the global VA so that we can return it
99 * in the curcpu() macro.
100 */
101 struct cpu_info * volatile ci_self;
102
103 /* Most important fields first */
104 struct lwp *ci_curlwp;
105 struct pcb *ci_cpcb;
106 struct cpu_info *ci_next;
107
108 struct lwp *ci_fplwp;
109
110 void *ci_eintstack;
111
112 int ci_mtx_count;
113 int ci_mtx_oldspl;
114
115 /* Spinning up the CPU */
116 void (*ci_spinup)(void);
117 paddr_t ci_paddr;
118
119 int ci_cpuid;
120
121 /* CPU PROM information. */
122 u_int ci_node;
123
124 /* %tick and cpu frequency information */
125 u_long ci_tick_increment;
126 uint64_t ci_cpu_clockrate[2];
127
128 /* Interrupts */
129 struct intrhand *ci_intrpending[16];
130 struct intrhand *ci_tick_ih;
131
132 /* Event counters */
133 struct evcnt ci_tick_evcnt;
134 #ifdef MULTIPROCESSOR
135 struct evcnt ci_ipi_evcnt[IPI_EVCNT_NUM];
136 #endif
137
138 int ci_flags;
139 int ci_want_ast;
140 int ci_want_resched;
141 int ci_idepth;
142
143 /*
144 * A context is simply a small number that differentiates multiple mappings
145 * of the same address. Contexts on the spitfire are 13 bits, but could
146 * be as large as 17 bits.
147 *
148 * Each context is either free or attached to a pmap.
149 *
150 * The context table is an array of pointers to psegs. Just dereference
151 * the right pointer and you get to the pmap segment tables. These are
152 * physical addresses, of course.
153 *
154 */
155 int ci_pmap_next_ctx;
156 paddr_t *ci_ctxbusy;
157 LIST_HEAD(, pmap) ci_pmap_ctxlist;
158 int ci_numctx;
159
160 /*
161 * The TSBs are per cpu too (since MMU context differs between
162 * cpus). These are just caches for the TLBs.
163 */
164 pte_t *ci_tsb_dmmu;
165 pte_t *ci_tsb_immu;
166
167 struct cpu_data ci_data; /* MI per-cpu data */
168
169 volatile void *ci_ddb_regs; /* DDB regs */
170 };
171
172 #define CPUF_PRIMARY 1
173
174 /*
175 * CPU boot arguments. Used by secondary CPUs at the bootstrap time.
176 */
177 struct cpu_bootargs {
178 u_int cb_node; /* PROM CPU node */
179 volatile int cb_flags;
180
181 vaddr_t cb_ktext;
182 paddr_t cb_ktextp;
183 vaddr_t cb_ektext;
184
185 vaddr_t cb_kdata;
186 paddr_t cb_kdatap;
187 vaddr_t cb_ekdata;
188
189 paddr_t cb_cpuinfo;
190 };
191
192 extern struct cpu_bootargs *cpu_args;
193
194 extern int sparc_ncpus;
195 extern struct cpu_info *cpus;
196
197 #define curcpu() (((struct cpu_info *)CPUINFO_VA)->ci_self)
198 #define cpu_number() (curcpu()->ci_index)
199 #define CPU_IS_PRIMARY(ci) ((ci)->ci_flags & CPUF_PRIMARY)
200
201 #define CPU_INFO_ITERATOR int
202 #define CPU_INFO_FOREACH(cii, ci) cii = 0, ci = cpus; ci != NULL; \
203 ci = ci->ci_next
204
205 #define curlwp curcpu()->ci_curlwp
206 #define fplwp curcpu()->ci_fplwp
207 #define curpcb curcpu()->ci_cpcb
208
209 #define want_ast curcpu()->ci_want_ast
210 #define want_resched curcpu()->ci_want_resched
211
212 /*
213 * definitions of cpu-dependent requirements
214 * referenced in generic code
215 */
216 #define cpu_swapin(p) /* nothing */
217 #define cpu_swapout(p) /* nothing */
218 #define cpu_wait(p) /* nothing */
219 void cpu_proc_fork(struct proc *, struct proc *);
220
221 /* run on the cpu itself */
222 void cpu_pmap_init(struct cpu_info *);
223 /* run upfront to prepare the cpu_info */
224 void cpu_pmap_prepare(struct cpu_info *, bool);
225
226 #if defined(MULTIPROCESSOR)
227 extern vaddr_t cpu_spinup_trampoline;
228
229 extern char *mp_tramp_code;
230 extern u_long mp_tramp_code_len;
231 extern u_long mp_tramp_tlb_slots;
232 extern u_long mp_tramp_func;
233 extern u_long mp_tramp_ci;
234
235 void cpu_hatch(void);
236 void cpu_boot_secondary_processors(void);
237
238 /*
239 * Call a function on other cpus:
240 * multicast - send to everyone in the sparc64_cpuset_t
241 * broadcast - send to to all cpus but ourselves
242 * send - send to just this cpu
243 */
244 typedef void (* ipifunc_t)(void *);
245
246 void sparc64_multicast_ipi(sparc64_cpuset_t, ipifunc_t, uint64_t, uint64_t);
247 void sparc64_broadcast_ipi(ipifunc_t, uint64_t, uint64_t);
248 void sparc64_send_ipi(int, ipifunc_t, uint64_t, uint64_t);
249 #endif
250
251 /*
252 * Arguments to hardclock, softclock and gatherstats encapsulate the
253 * previous machine state in an opaque clockframe. The ipl is here
254 * as well for strayintr (see locore.s:interrupt and intr.c:strayintr).
255 * Note that CLKF_INTR is valid only if CLKF_USERMODE is false.
256 */
257 struct clockframe {
258 struct trapframe64 t;
259 };
260
261 #define CLKF_USERMODE(framep) (((framep)->t.tf_tstate & TSTATE_PRIV) == 0)
262 #define CLKF_PC(framep) ((framep)->t.tf_pc)
263 /* Since some files in sys/kern do not know BIAS, I'm using 0x7ff here */
264 #define CLKF_INTR(framep) \
265 ((!CLKF_USERMODE(framep))&& \
266 (((framep)->t.tf_out[6] & 1 ) ? \
267 (((vaddr_t)(framep)->t.tf_out[6] < \
268 (vaddr_t)EINTSTACK-0x7ff) && \
269 ((vaddr_t)(framep)->t.tf_out[6] > \
270 (vaddr_t)INTSTACK-0x7ff)) : \
271 (((vaddr_t)(framep)->t.tf_out[6] < \
272 (vaddr_t)EINTSTACK) && \
273 ((vaddr_t)(framep)->t.tf_out[6] > \
274 (vaddr_t)INTSTACK))))
275
276
277 extern struct intrhand soft01intr, soft01net, soft01clock;
278
279 void setsoftint(void);
280 void setsoftnet(void);
281
282 /*
283 * Give a profiling tick to the current process when the user profiling
284 * buffer pages are invalid. On the sparc, request an ast to send us
285 * through trap(), marking the proc as needing a profiling tick.
286 */
287 #define cpu_need_proftick(l) ((l)->l_pflag |= LP_OWEUPC, want_ast = 1)
288
289 /*
290 * Notify an LWP that it has a signal pending, process as soon as possible.
291 */
292 void cpu_signotify(struct lwp *);
293
294 /*
295 * Interrupt handler chains. Interrupt handlers should return 0 for
296 * ``not me'' or 1 (``I took care of it''). intr_establish() inserts a
297 * handler into the list. The handler is called with its (single)
298 * argument, or with a pointer to a clockframe if ih_arg is NULL.
299 */
300 struct intrhand {
301 int (*ih_fun)(void *);
302 void *ih_arg;
303 /* if we have to take the biglock, we interpose a wrapper
304 * and need to save the original function and arg */
305 int (*ih_realfun)(void *);
306 void *ih_realarg;
307 short ih_number; /* interrupt number */
308 /* the H/W provides */
309 char ih_pil; /* interrupt priority */
310 struct intrhand *ih_next; /* global list */
311 struct intrhand *ih_pending; /* interrupt queued */
312 volatile uint64_t *ih_map; /* Interrupt map reg */
313 volatile uint64_t *ih_clr; /* clear interrupt reg */
314 };
315 extern struct intrhand *intrhand[];
316 extern struct intrhand *intrlev[MAXINTNUM];
317
318 void intr_establish(int level, struct intrhand *);
319 void *sparc_softintr_establish(int, int (*)(void *), void *);
320 void sparc_softintr_schedule(void *);
321 void sparc_softintr_disestablish(void *);
322
323 /* disksubr.c */
324 struct dkbad;
325 int isbad(struct dkbad *bt, int, int, int);
326 /* machdep.c */
327 void * reserve_dumppages(void *);
328 /* clock.c */
329 struct timeval;
330 int tickintr(void *); /* level 10/14 (tick) interrupt code */
331 int clockintr(void *); /* level 10 (clock) interrupt code */
332 int statintr(void *); /* level 14 (statclock) interrupt code */
333 int schedintr(void *); /* level 10 (schedclock) interrupt code */
334 void tickintr_establish(int, int (*)(void *));
335 /* locore.s */
336 struct fpstate64;
337 void savefpstate(struct fpstate64 *);
338 void loadfpstate(struct fpstate64 *);
339 void clearfpstate(void);
340 uint64_t probeget(paddr_t, int, int);
341 int probeset(paddr_t, int, int, uint64_t);
342
343 #define write_all_windows() __asm volatile("flushw" : : )
344 #define write_user_windows() __asm volatile("flushw" : : )
345
346 void lwp_trampoline(void);
347 struct pcb;
348 void snapshot(struct pcb *);
349 struct frame *getfp(void);
350 void switchtoctx(int);
351 /* trap.c */
352 void kill_user_windows(struct lwp *);
353 int rwindow_save(struct lwp *);
354 /* cons.c */
355 int cnrom(void);
356 /* zs.c */
357 void zsconsole(struct tty *, int, int, void (**)(struct tty *, int));
358 /* fb.c */
359 void fb_unblank(void);
360 /* kgdb_stub.c */
361 #ifdef KGDB
362 void kgdb_attach(int (*)(void *), void (*)(void *, int), void *);
363 void kgdb_connect(int);
364 void kgdb_panic(void);
365 #endif
366 /* emul.c */
367 int fixalign(struct lwp *, struct trapframe64 *);
368 int emulinstr(vaddr_t, struct trapframe64 *);
369
370 #endif /* _KERNEL */
371 #endif /* _CPU_H_ */
372