cpu.h revision 1.97 1 /* $NetBSD: cpu.h,v 1.97 2016/12/10 10:41:07 mrg Exp $ */
2
3 /*
4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This software was developed by the Computer Systems Engineering group
8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9 * contributed to Berkeley.
10 *
11 * All advertising materials mentioning features or use of this software
12 * must display the following acknowledgement:
13 * This product includes software developed by the University of
14 * California, Lawrence Berkeley Laboratory.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 * 3. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * @(#)cpu.h 8.4 (Berkeley) 1/5/94
41 */
42
43 #ifndef _CPU_H_
44 #define _CPU_H_
45
46 /*
47 * CTL_MACHDEP definitions.
48 */
49 #define CPU_BOOTED_KERNEL 1 /* string: booted kernel name */
50 #define CPU_BOOTED_DEVICE 2 /* string: device booted from */
51 #define CPU_BOOT_ARGS 3 /* string: args booted with */
52 #define CPU_ARCH 4 /* integer: cpu architecture version */
53 #define CPU_MAXID 5 /* number of valid machdep ids */
54
55 /*
56 * Exported definitions unique to SPARC cpu support.
57 */
58
59 /* Things needed by crash or the kernel */
60 #if defined(_KERNEL) || defined(_KMEMUSER)
61
62 #if defined(_KERNEL_OPT)
63 #include "opt_multiprocessor.h"
64 #include "opt_lockdebug.h"
65 #include "opt_sparc_arch.h"
66 #endif
67
68 #include <sys/cpu_data.h>
69 #include <sys/evcnt.h>
70
71 #include <machine/intr.h>
72 #include <machine/psl.h>
73
74 #if defined(_KERNEL)
75 #include <sparc/sparc/cpuvar.h>
76 #include <sparc/sparc/intreg.h>
77 #else
78 #include <arch/sparc/sparc/vaddrs.h>
79 #include <arch/sparc/sparc/cache.h>
80 #endif
81
82 struct trapframe;
83
84 /*
85 * Message structure for Inter Processor Communication in MP systems
86 */
87 struct xpmsg {
88 volatile int tag;
89 #define XPMSG15_PAUSECPU 1
90 #define XPMSG_FUNC 4
91 #define XPMSG_FTRP 5
92
93 volatile union {
94 /*
95 * Cross call: ask to run (*func)(arg0,arg1,arg2)
96 * or (*trap)(arg0,arg1,arg2). `trap' should be the
97 * address of a `fast trap' handler that executes in
98 * the trap window (see locore.s).
99 */
100 struct xpmsg_func {
101 void (*func)(int, int, int);
102 void (*trap)(int, int, int);
103 int arg0;
104 int arg1;
105 int arg2;
106 } xpmsg_func;
107 } u;
108 volatile int received;
109 volatile int complete;
110 };
111
112 /*
113 * The cpuinfo structure. This structure maintains information about one
114 * currently installed CPU (there may be several of these if the machine
115 * supports multiple CPUs, as on some Sun4m architectures). The information
116 * in this structure supersedes the old "cpumod", "mmumod", and similar
117 * fields.
118 */
119
120 struct cpu_info {
121 struct cpu_data ci_data; /* MI per-cpu data */
122
123 /*
124 * Primary Inter-processor message area. Keep this aligned
125 * to a cache line boundary if possible, as the structure
126 * itself is one (normal 32 byte) cache-line.
127 */
128 struct xpmsg msg __aligned(32);
129
130 /* Scheduler flags */
131 int ci_want_ast;
132 int ci_want_resched;
133
134 /*
135 * SPARC cpu_info structures live at two VAs: one global
136 * VA (so each CPU can access any other CPU's cpu_info)
137 * and an alias VA CPUINFO_VA which is the same on each
138 * CPU and maps to that CPU's cpu_info. Since the alias
139 * CPUINFO_VA is how we locate our cpu_info, we have to
140 * self-reference the global VA so that we can return it
141 * in the curcpu() macro.
142 */
143 struct cpu_info * volatile ci_self;
144
145 int ci_cpuid; /* CPU index (see cpus[] array) */
146
147 /* Context administration */
148 int *ctx_tbl; /* [4m] SRMMU-edible context table */
149 paddr_t ctx_tbl_pa; /* [4m] ctx table physical address */
150
151 /* Cache information */
152 struct cacheinfo cacheinfo; /* see cache.h */
153
154 /* various flags to workaround anomalies in chips */
155 volatile int flags; /* see CPUFLG_xxx, below */
156
157 /* Per processor counter register (sun4m only) */
158 volatile struct counter_4m *counterreg_4m;
159
160 /* Per processor interrupt mask register (sun4m only) */
161 volatile struct icr_pi *intreg_4m;
162 /*
163 * Send a IPI to (cpi). For Ross cpus we need to read
164 * the pending register to avoid a hardware bug.
165 */
166 #define raise_ipi(cpi,lvl) do { \
167 int x; \
168 (cpi)->intreg_4m->pi_set = PINTR_SINTRLEV(lvl); \
169 x = (cpi)->intreg_4m->pi_pend; __USE(x); \
170 } while (0)
171
172 int sun4_mmu3l; /* [4]: 3-level MMU present */
173 #if defined(SUN4_MMU3L)
174 #define HASSUN4_MMU3L (cpuinfo.sun4_mmu3l)
175 #else
176 #define HASSUN4_MMU3L (0)
177 #endif
178 int ci_idepth; /* Interrupt depth */
179
180 /*
181 * The following pointers point to processes that are somehow
182 * associated with this CPU--running on it, using its FPU,
183 * etc.
184 */
185 struct lwp *ci_curlwp; /* CPU owner */
186 struct lwp *fplwp; /* FPU owner */
187
188 int ci_mtx_count;
189 int ci_mtx_oldspl;
190
191 /*
192 * Idle PCB and Interrupt stack;
193 */
194 void *eintstack; /* End of interrupt stack */
195 #define INT_STACK_SIZE (128 * 128) /* 128 128-byte stack frames */
196 void *redzone; /* DEBUG: stack red zone */
197 #define REDSIZE (8*96) /* some room for bouncing */
198
199 struct pcb *curpcb; /* CPU's PCB & kernel stack */
200
201 /* locore defined: */
202 void (*get_syncflt)(void); /* Not C-callable */
203 int (*get_asyncflt)(u_int *, u_int *);
204
205 /* Synchronous Fault Status; temporary storage */
206 struct {
207 int sfsr;
208 int sfva;
209 } syncfltdump;
210
211 /*
212 * Cache handling functions.
213 * Most cache flush function come in two flavours: one that
214 * acts only on the CPU it executes on, and another that
215 * uses inter-processor signals to flush the cache on
216 * all processor modules.
217 * The `ft_' versions are fast trap cache flush handlers.
218 */
219 void (*cache_flush)(void *, u_int);
220 void (*vcache_flush_page)(int, int);
221 void (*sp_vcache_flush_page)(int, int);
222 void (*ft_vcache_flush_page)(int, int);
223 void (*vcache_flush_segment)(int, int, int);
224 void (*sp_vcache_flush_segment)(int, int, int);
225 void (*ft_vcache_flush_segment)(int, int, int);
226 void (*vcache_flush_region)(int, int);
227 void (*sp_vcache_flush_region)(int, int);
228 void (*ft_vcache_flush_region)(int, int);
229 void (*vcache_flush_context)(int);
230 void (*sp_vcache_flush_context)(int);
231 void (*ft_vcache_flush_context)(int);
232
233 /* The are helpers for (*cache_flush)() */
234 void (*sp_vcache_flush_range)(int, int, int);
235 void (*ft_vcache_flush_range)(int, int, int);
236
237 void (*pcache_flush_page)(paddr_t, int);
238 void (*pure_vcache_flush)(void);
239 void (*cache_flush_all)(void);
240
241 /* Support for hardware-assisted page clear/copy */
242 void (*zero_page)(paddr_t);
243 void (*copy_page)(paddr_t, paddr_t);
244
245 /* Virtual addresses for use in pmap copy_page/zero_page */
246 void * vpage[2];
247 int *vpage_pte[2]; /* pte location of vpage[] */
248
249 void (*cache_enable)(void);
250
251 int cpu_type; /* Type: see CPUTYP_xxx below */
252
253 /* Inter-processor message area (high priority but used infrequently) */
254 struct xpmsg msg_lev15;
255
256 /* CPU information */
257 int node; /* PROM node for this CPU */
258 int mid; /* Module ID for MP systems */
259 int mbus; /* 1 if CPU is on MBus */
260 int mxcc; /* 1 if a MBus-level MXCC is present */
261 const char *cpu_longname; /* CPU model */
262 int cpu_impl; /* CPU implementation code */
263 int cpu_vers; /* CPU version code */
264 int mmu_impl; /* MMU implementation code */
265 int mmu_vers; /* MMU version code */
266 int master; /* 1 if this is bootup CPU */
267
268 vaddr_t mailbox; /* VA of CPU's mailbox */
269
270 int mmu_ncontext; /* Number of contexts supported */
271 int mmu_nregion; /* Number of regions supported */
272 int mmu_nsegment; /* [4/4c] Segments */
273 int mmu_npmeg; /* [4/4c] Pmegs */
274
275 /* XXX - we currently don't actually use the following */
276 int arch; /* Architecture: CPU_SUN4x */
277 int class; /* Class: SuperSPARC, microSPARC... */
278 int classlvl; /* Iteration in class: 1, 2, etc. */
279 int classsublvl; /* stepping in class (version) */
280
281 int hz; /* Clock speed */
282
283 /* FPU information */
284 int fpupresent; /* true if FPU is present */
285 int fpuvers; /* FPU revision */
286 const char *fpu_name; /* FPU model */
287 char fpu_namebuf[32];/* Buffer for FPU name, if necessary */
288
289 /* XXX */
290 volatile void *ci_ddb_regs; /* DDB regs */
291
292 /*
293 * The following are function pointers to do interesting CPU-dependent
294 * things without having to do type-tests all the time
295 */
296
297 /* bootup things: access to physical memory */
298 u_int (*read_physmem)(u_int addr, int space);
299 void (*write_physmem)(u_int addr, u_int data);
300 void (*cache_tablewalks)(void);
301 void (*mmu_enable)(void);
302 void (*hotfix)(struct cpu_info *);
303
304
305 #if 0
306 /* hardware-assisted block operation routines */
307 void (*hwbcopy)(const void *from, void *to, size_t len);
308 void (*hwbzero)(void *buf, size_t len);
309
310 /* routine to clear mbus-sbus buffers */
311 void (*mbusflush)(void);
312 #endif
313
314 /*
315 * Memory error handler; parity errors, unhandled NMIs and other
316 * unrecoverable faults end up here.
317 */
318 void (*memerr)(unsigned, u_int, u_int, struct trapframe *);
319 void (*idlespin)(struct cpu_info *);
320 /* Module Control Registers */
321 /*bus_space_handle_t*/ long ci_mbusport;
322 /*bus_space_handle_t*/ long ci_mxccregs;
323
324 u_int ci_tt; /* Last trap (if tracing) */
325
326 /*
327 * Start/End VA's of this cpu_info region; we upload the other pages
328 * in this region that aren't part of the cpu_info to uvm.
329 */
330 vaddr_t ci_free_sva1, ci_free_eva1, ci_free_sva2, ci_free_eva2;
331
332 struct evcnt ci_savefpstate;
333 struct evcnt ci_savefpstate_null;
334 struct evcnt ci_xpmsg_mutex_fail;
335 struct evcnt ci_xpmsg_mutex_fail_call;
336 struct evcnt ci_intrcnt[16];
337 struct evcnt ci_sintrcnt[16];
338 };
339
340 /*
341 * definitions of cpu-dependent requirements
342 * referenced in generic code
343 */
344 #define cpuinfo (*(struct cpu_info *)CPUINFO_VA)
345 #define curcpu() (cpuinfo.ci_self)
346 #define curlwp (cpuinfo.ci_curlwp)
347 #define CPU_IS_PRIMARY(ci) ((ci)->master)
348
349 #define cpu_number() (cpuinfo.ci_cpuid)
350
351 #endif /* _KERNEL || _KMEMUSER */
352
353 /* Kernel only things. */
354 #if defined(_KERNEL)
355 void cpu_proc_fork(struct proc *, struct proc *);
356
357 #if defined(MULTIPROCESSOR)
358 void cpu_boot_secondary_processors(void);
359 #endif
360
361 /*
362 * Arguments to hardclock, softclock and statclock encapsulate the
363 * previous machine state in an opaque clockframe. The ipl is here
364 * as well for strayintr (see locore.s:interrupt and intr.c:strayintr).
365 * Note that CLKF_INTR is valid only if CLKF_USERMODE is false.
366 */
367 struct clockframe {
368 u_int psr; /* psr before interrupt, excluding PSR_ET */
369 u_int pc; /* pc at interrupt */
370 u_int npc; /* npc at interrupt */
371 u_int ipl; /* actual interrupt priority level */
372 u_int fp; /* %fp at interrupt */
373 };
374 typedef struct clockframe clockframe;
375
376 extern int eintstack[];
377
378 #define CLKF_USERMODE(framep) (((framep)->psr & PSR_PS) == 0)
379 #define CLKF_LOPRI(framep,n) (((framep)->psr & PSR_PIL) < (n) << 8)
380 #define CLKF_PC(framep) ((framep)->pc)
381 #if defined(MULTIPROCESSOR)
382 #define CLKF_INTR(framep) \
383 ((framep)->fp > (u_int)cpuinfo.eintstack - INT_STACK_SIZE && \
384 (framep)->fp < (u_int)cpuinfo.eintstack)
385 #else
386 #define CLKF_INTR(framep) ((framep)->fp < (u_int)eintstack)
387 #endif
388
389 void sparc_softintr_init(void);
390
391 /*
392 * Preempt the current process on the target CPU if in interrupt from
393 * user mode, or after the current trap/syscall if in system mode.
394 */
395 #define cpu_need_resched(ci, flags) do { \
396 __USE(flags); \
397 (ci)->ci_want_resched = 1; \
398 (ci)->ci_want_ast = 1; \
399 \
400 /* Just interrupt the target CPU, so it can notice its AST */ \
401 if (((flags) & RESCHED_IMMED) || (ci)->ci_cpuid != cpu_number()) \
402 XCALL0(sparc_noop, 1U << (ci)->ci_cpuid); \
403 } while (/*CONSTCOND*/0)
404
405 /*
406 * Give a profiling tick to the current process when the user profiling
407 * buffer pages are invalid. On the sparc, request an ast to send us
408 * through trap(), marking the proc as needing a profiling tick.
409 */
410 #define cpu_need_proftick(l) ((l)->l_pflag |= LP_OWEUPC, cpuinfo.ci_want_ast = 1)
411
412 /*
413 * Notify the current process (p) that it has a signal pending,
414 * process as soon as possible.
415 */
416 #define cpu_signotify(l) do { \
417 (l)->l_cpu->ci_want_ast = 1; \
418 \
419 /* Just interrupt the target CPU, so it can notice its AST */ \
420 if ((l)->l_cpu->ci_cpuid != cpu_number()) \
421 XCALL0(sparc_noop, 1U << (l)->l_cpu->ci_cpuid); \
422 } while (/*CONSTCOND*/0)
423
424 /* CPU architecture version */
425 extern int cpu_arch;
426
427 /* Number of CPUs in the system */
428 extern int sparc_ncpus;
429
430 /* Provide %pc of a lwp */
431 #define LWP_PC(l) ((l)->l_md.md_tf->tf_pc)
432
433 /*
434 * Interrupt handler chains. Interrupt handlers should return 0 for
435 * ``not me'' or 1 (``I took care of it''). intr_establish() inserts a
436 * handler into the list. The handler is called with its (single)
437 * argument, or with a pointer to a clockframe if ih_arg is NULL.
438 *
439 * realfun/realarg are used to chain callers, usually with the
440 * biglock wrapper.
441 */
442 extern struct intrhand {
443 int (*ih_fun)(void *);
444 void *ih_arg;
445 struct intrhand *ih_next;
446 int ih_classipl;
447 int (*ih_realfun)(void *);
448 void *ih_realarg;
449 } *intrhand[15];
450
451 void intr_establish(int, int, struct intrhand *, void (*)(void), bool);
452 void intr_disestablish(int, struct intrhand *);
453
454 void intr_lock_kernel(void);
455 void intr_unlock_kernel(void);
456
457 /* disksubr.c */
458 struct dkbad;
459 int isbad(struct dkbad *, int, int, int);
460
461 /* machdep.c */
462 int ldcontrolb(void *);
463 void dumpconf(void);
464 void * reserve_dumppages(void *);
465 void wcopy(const void *, void *, u_int);
466 void wzero(void *, u_int);
467
468 /* clock.c */
469 struct timeval;
470 void lo_microtime(struct timeval *);
471 void schedintr(void *);
472
473 /* locore.s */
474 struct fpstate;
475 void ipi_savefpstate(struct fpstate *);
476 void savefpstate(struct fpstate *);
477 void loadfpstate(struct fpstate *);
478 int probeget(void *, int);
479 void write_all_windows(void);
480 void write_user_windows(void);
481 void lwp_trampoline(void);
482 struct pcb;
483 void snapshot(struct pcb *);
484 struct frame *getfp(void);
485 int xldcontrolb(void *, struct pcb *);
486 void copywords(const void *, void *, size_t);
487 void qcopy(const void *, void *, size_t);
488 void qzero(void *, size_t);
489
490 /* trap.c */
491 void cpu_vmspace_exec(struct lwp *, vaddr_t, vaddr_t);
492 int rwindow_save(struct lwp *);
493
494 /* cons.c */
495 int cnrom(void);
496
497 /* zs.c */
498 void zsconsole(struct tty *, int, int, void (**)(struct tty *, int));
499 #ifdef KGDB
500 void zs_kgdb_init(void);
501 #endif
502
503 /* fb.c */
504 void fb_unblank(void);
505
506 /* kgdb_stub.c */
507 #ifdef KGDB
508 void kgdb_attach(int (*)(void *), void (*)(void *, int), void *);
509 void kgdb_connect(int);
510 void kgdb_panic(void);
511 #endif
512
513 /* emul.c */
514 struct trapframe;
515 int fixalign(struct lwp *, struct trapframe *, void **);
516 int emulinstr(int, struct trapframe *);
517
518 /* cpu.c */
519 void mp_pause_cpus(void);
520 void mp_resume_cpus(void);
521 void mp_halt_cpus(void);
522 #ifdef DDB
523 void mp_pause_cpus_ddb(void);
524 void mp_resume_cpus_ddb(void);
525 #endif
526
527 /* intr.c */
528 u_int setitr(u_int);
529 u_int getitr(void);
530
531
532 /*
533 *
534 * The SPARC has a Trap Base Register (TBR) which holds the upper 20 bits
535 * of the trap vector table. The next eight bits are supplied by the
536 * hardware when the trap occurs, and the bottom four bits are always
537 * zero (so that we can shove up to 16 bytes of executable code---exactly
538 * four instructions---into each trap vector).
539 *
540 * The hardware allocates half the trap vectors to hardware and half to
541 * software.
542 *
543 * Traps have priorities assigned (lower number => higher priority).
544 */
545
546 struct trapvec {
547 int tv_instr[4]; /* the four instructions */
548 };
549
550 extern struct trapvec *trapbase; /* the 256 vectors */
551
552 #endif /* _KERNEL */
553 #endif /* _CPU_H_ */
554