cpu.h revision 1.95 1 /* $NetBSD: cpu.h,v 1.95 2011/04/13 03:35:19 mrg Exp $ */
2
3 /*
4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This software was developed by the Computer Systems Engineering group
8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9 * contributed to Berkeley.
10 *
11 * All advertising materials mentioning features or use of this software
12 * must display the following acknowledgement:
13 * This product includes software developed by the University of
14 * California, Lawrence Berkeley Laboratory.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 * 3. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * @(#)cpu.h 8.4 (Berkeley) 1/5/94
41 */
42
43 #ifndef _CPU_H_
44 #define _CPU_H_
45
46 /*
47 * CTL_MACHDEP definitions.
48 */
49 #define CPU_BOOTED_KERNEL 1 /* string: booted kernel name */
50 #define CPU_BOOTED_DEVICE 2 /* string: device booted from */
51 #define CPU_BOOT_ARGS 3 /* string: args booted with */
52 #define CPU_ARCH 4 /* integer: cpu architecture version */
53 #define CPU_MAXID 5 /* number of valid machdep ids */
54
55 #if defined(_KERNEL) || defined(_KMEMUSER)
56 /*
57 * Exported definitions unique to SPARC cpu support.
58 */
59
60 #if defined(_KERNEL_OPT)
61 #include "opt_multiprocessor.h"
62 #include "opt_lockdebug.h"
63 #endif
64
65 #include <machine/psl.h>
66 #include <machine/reg.h>
67 #include <machine/pte.h>
68 #include <machine/intr.h>
69 #if defined(_KERNEL)
70 #include <machine/cpuset.h>
71 #endif
72
73 #include <sys/cpu_data.h>
74 #include <sys/evcnt.h>
75
76 /*
77 * The cpu_info structure is part of a 64KB structure mapped both the kernel
78 * pmap and a single locked TTE a CPUINFO_VA for that particular processor.
79 * Each processor's cpu_info is accessible at CPUINFO_VA only for that
80 * processor. Other processors can access that through an additional mapping
81 * in the kernel pmap.
82 *
83 * The 64KB page contains:
84 *
85 * cpu_info
86 * interrupt stack (all remaining space)
87 * idle PCB
88 * idle stack (STACKSPACE - sizeof(PCB))
89 * 32KB TSB
90 */
91
92 struct cpu_info {
93 struct cpu_data ci_data; /* MI per-cpu data */
94
95
96 /*
97 * SPARC cpu_info structures live at two VAs: one global
98 * VA (so each CPU can access any other CPU's cpu_info)
99 * and an alias VA CPUINFO_VA which is the same on each
100 * CPU and maps to that CPU's cpu_info. Since the alias
101 * CPUINFO_VA is how we locate our cpu_info, we have to
102 * self-reference the global VA so that we can return it
103 * in the curcpu() macro.
104 */
105 struct cpu_info * volatile ci_self;
106
107 /* Most important fields first */
108 struct lwp *ci_curlwp;
109 struct pcb *ci_cpcb;
110 struct cpu_info *ci_next;
111
112 struct lwp *ci_fplwp;
113
114 void *ci_eintstack;
115
116 int ci_mtx_count;
117 int ci_mtx_oldspl;
118
119 /* Spinning up the CPU */
120 void (*ci_spinup)(void);
121 paddr_t ci_paddr;
122
123 int ci_cpuid;
124
125 /* CPU PROM information. */
126 u_int ci_node;
127
128 /* %tick and cpu frequency information */
129 u_long ci_tick_increment;
130 uint64_t ci_cpu_clockrate[2];
131
132 /* Interrupts */
133 struct intrhand *ci_intrpending[16];
134 struct intrhand *ci_tick_ih;
135
136 /* Event counters */
137 struct evcnt ci_tick_evcnt;
138
139 /* This could be under MULTIPROCESSOR, but there's no good reason */
140 struct evcnt ci_ipi_evcnt[IPI_EVCNT_NUM];
141
142 int ci_flags;
143 int ci_want_ast;
144 int ci_want_resched;
145 int ci_idepth;
146
147 /*
148 * A context is simply a small number that differentiates multiple mappings
149 * of the same address. Contexts on the spitfire are 13 bits, but could
150 * be as large as 17 bits.
151 *
152 * Each context is either free or attached to a pmap.
153 *
154 * The context table is an array of pointers to psegs. Just dereference
155 * the right pointer and you get to the pmap segment tables. These are
156 * physical addresses, of course.
157 *
158 * ci_ctx_lock protects this CPUs context allocation/free.
159 * These are all allocated almost with in the same cacheline.
160 */
161 kmutex_t ci_ctx_lock;
162 int ci_pmap_next_ctx;
163 int ci_numctx;
164 paddr_t *ci_ctxbusy;
165 LIST_HEAD(, pmap) ci_pmap_ctxlist;
166
167 /*
168 * The TSBs are per cpu too (since MMU context differs between
169 * cpus). These are just caches for the TLBs.
170 */
171 pte_t *ci_tsb_dmmu;
172 pte_t *ci_tsb_immu;
173
174 volatile void *ci_ddb_regs; /* DDB regs */
175 };
176
177 #endif /* _KERNEL || _KMEMUSER */
178
179 #ifdef _KERNEL
180
181 #define CPUF_PRIMARY 1
182
183 /*
184 * CPU boot arguments. Used by secondary CPUs at the bootstrap time.
185 */
186 struct cpu_bootargs {
187 u_int cb_node; /* PROM CPU node */
188 volatile int cb_flags;
189
190 vaddr_t cb_ktext;
191 paddr_t cb_ktextp;
192 vaddr_t cb_ektext;
193
194 vaddr_t cb_kdata;
195 paddr_t cb_kdatap;
196 vaddr_t cb_ekdata;
197
198 paddr_t cb_cpuinfo;
199 };
200
201 extern struct cpu_bootargs *cpu_args;
202
203 #if defined(MULTIPROCESSOR)
204 extern int sparc_ncpus;
205 #else
206 #define sparc_ncpus 1
207 #endif
208
209 extern struct cpu_info *cpus;
210 extern struct pool_cache *fpstate_cache;
211
212 #define curcpu() (((struct cpu_info *)CPUINFO_VA)->ci_self)
213 #define cpu_number() (curcpu()->ci_index)
214 #define CPU_IS_PRIMARY(ci) ((ci)->ci_flags & CPUF_PRIMARY)
215
216 #define CPU_INFO_ITERATOR int
217 #define CPU_INFO_FOREACH(cii, ci) cii = 0, ci = cpus; ci != NULL; \
218 ci = ci->ci_next
219
220 #define curlwp curcpu()->ci_curlwp
221 #define fplwp curcpu()->ci_fplwp
222 #define curpcb curcpu()->ci_cpcb
223
224 #define want_ast curcpu()->ci_want_ast
225 #define want_resched curcpu()->ci_want_resched
226
227 /*
228 * definitions of cpu-dependent requirements
229 * referenced in generic code
230 */
231 #define cpu_wait(p) /* nothing */
232 void cpu_proc_fork(struct proc *, struct proc *);
233
234 /* run on the cpu itself */
235 void cpu_pmap_init(struct cpu_info *);
236 /* run upfront to prepare the cpu_info */
237 void cpu_pmap_prepare(struct cpu_info *, bool);
238
239 #if defined(MULTIPROCESSOR)
240 extern vaddr_t cpu_spinup_trampoline;
241
242 extern char *mp_tramp_code;
243 extern u_long mp_tramp_code_len;
244 extern u_long mp_tramp_tlb_slots;
245 extern u_long mp_tramp_func;
246 extern u_long mp_tramp_ci;
247
248 void cpu_hatch(void);
249 void cpu_boot_secondary_processors(void);
250
251 /*
252 * Call a function on other cpus:
253 * multicast - send to everyone in the sparc64_cpuset_t
254 * broadcast - send to to all cpus but ourselves
255 * send - send to just this cpu
256 * The called function do not follow the C ABI, so need to be coded in
257 * assembler.
258 */
259 typedef void (* ipifunc_t)(void *, void *);
260
261 void sparc64_multicast_ipi(sparc64_cpuset_t, ipifunc_t, uint64_t, uint64_t);
262 void sparc64_broadcast_ipi(ipifunc_t, uint64_t, uint64_t);
263 void sparc64_send_ipi(int, ipifunc_t, uint64_t, uint64_t);
264
265 /*
266 * Call an arbitrary C function on another cpu (or all others but ourself)
267 */
268 typedef void (*ipi_c_call_func_t)(void*);
269 void sparc64_generic_xcall(struct cpu_info*, ipi_c_call_func_t, void*);
270
271 #endif
272
273 /* Provide %pc of a lwp */
274 #define LWP_PC(l) ((l)->l_md.md_tf->tf_pc)
275
276 /*
277 * Arguments to hardclock, softclock and gatherstats encapsulate the
278 * previous machine state in an opaque clockframe. The ipl is here
279 * as well for strayintr (see locore.s:interrupt and intr.c:strayintr).
280 * Note that CLKF_INTR is valid only if CLKF_USERMODE is false.
281 */
282 struct clockframe {
283 struct trapframe64 t;
284 };
285
286 #define CLKF_USERMODE(framep) (((framep)->t.tf_tstate & TSTATE_PRIV) == 0)
287 #define CLKF_PC(framep) ((framep)->t.tf_pc)
288 /* Since some files in sys/kern do not know BIAS, I'm using 0x7ff here */
289 #define CLKF_INTR(framep) \
290 ((!CLKF_USERMODE(framep))&& \
291 (((framep)->t.tf_out[6] & 1 ) ? \
292 (((vaddr_t)(framep)->t.tf_out[6] < \
293 (vaddr_t)EINTSTACK-0x7ff) && \
294 ((vaddr_t)(framep)->t.tf_out[6] > \
295 (vaddr_t)INTSTACK-0x7ff)) : \
296 (((vaddr_t)(framep)->t.tf_out[6] < \
297 (vaddr_t)EINTSTACK) && \
298 ((vaddr_t)(framep)->t.tf_out[6] > \
299 (vaddr_t)INTSTACK))))
300
301 /*
302 * Give a profiling tick to the current process when the user profiling
303 * buffer pages are invalid. On the sparc, request an ast to send us
304 * through trap(), marking the proc as needing a profiling tick.
305 */
306 #define cpu_need_proftick(l) ((l)->l_pflag |= LP_OWEUPC, want_ast = 1)
307
308 /*
309 * Notify an LWP that it has a signal pending, process as soon as possible.
310 */
311 void cpu_signotify(struct lwp *);
312
313 /*
314 * Interrupt handler chains. Interrupt handlers should return 0 for
315 * ``not me'' or 1 (``I took care of it''). intr_establish() inserts a
316 * handler into the list. The handler is called with its (single)
317 * argument, or with a pointer to a clockframe if ih_arg is NULL.
318 */
319 struct intrhand {
320 int (*ih_fun)(void *);
321 void *ih_arg;
322 /* if we have to take the biglock, we interpose a wrapper
323 * and need to save the original function and arg */
324 int (*ih_realfun)(void *);
325 void *ih_realarg;
326 short ih_number; /* interrupt number */
327 /* the H/W provides */
328 char ih_pil; /* interrupt priority */
329 struct intrhand *ih_next; /* global list */
330 struct intrhand *ih_pending; /* interrupt queued */
331 volatile uint64_t *ih_map; /* Interrupt map reg */
332 volatile uint64_t *ih_clr; /* clear interrupt reg */
333 };
334 extern struct intrhand *intrhand[];
335 extern struct intrhand *intrlev[MAXINTNUM];
336
337 void intr_establish(int level, bool mpsafe, struct intrhand *);
338 void *sparc_softintr_establish(int, int (*)(void *), void *);
339 void sparc_softintr_schedule(void *);
340 void sparc_softintr_disestablish(void *);
341
342 /* disksubr.c */
343 struct dkbad;
344 int isbad(struct dkbad *bt, int, int, int);
345 /* machdep.c */
346 void * reserve_dumppages(void *);
347 /* clock.c */
348 struct timeval;
349 int tickintr(void *); /* level 10/14 (tick) interrupt code */
350 int clockintr(void *); /* level 10 (clock) interrupt code */
351 int statintr(void *); /* level 14 (statclock) interrupt code */
352 int schedintr(void *); /* level 10 (schedclock) interrupt code */
353 void tickintr_establish(int, int (*)(void *));
354 /* locore.s */
355 struct fpstate64;
356 void savefpstate(struct fpstate64 *);
357 void loadfpstate(struct fpstate64 *);
358 void clearfpstate(void);
359 uint64_t probeget(paddr_t, int, int);
360 int probeset(paddr_t, int, int, uint64_t);
361
362 #define write_all_windows() __asm volatile("flushw" : : )
363 #define write_user_windows() __asm volatile("flushw" : : )
364
365 struct pcb;
366 void snapshot(struct pcb *);
367 struct frame *getfp(void);
368 void switchtoctx_us(int);
369 void switchtoctx_usiii(int);
370 void next_tick(long);
371 /* trap.c */
372 void kill_user_windows(struct lwp *);
373 int rwindow_save(struct lwp *);
374 /* cons.c */
375 int cnrom(void);
376 /* zs.c */
377 void zsconsole(struct tty *, int, int, void (**)(struct tty *, int));
378 /* fb.c */
379 void fb_unblank(void);
380 /* kgdb_stub.c */
381 #ifdef KGDB
382 void kgdb_attach(int (*)(void *), void (*)(void *, int), void *);
383 void kgdb_connect(int);
384 void kgdb_panic(void);
385 #endif
386 /* emul.c */
387 int fixalign(struct lwp *, struct trapframe64 *);
388 int emulinstr(vaddr_t, struct trapframe64 *);
389
390 #else /* _KERNEL */
391
392 /*
393 * XXX: provide some definitions for crash(8), probably can share
394 */
395 #if defined(_KMEMUSER)
396 #define curcpu() (((struct cpu_info *)CPUINFO_VA)->ci_self)
397 #define curlwp curcpu()->ci_curlwp
398 #endif
399
400 #endif /* _KERNEL */
401 #endif /* _CPU_H_ */
402