cpu.h revision 1.96 1 1.96 mrg /* $NetBSD: cpu.h,v 1.96 2016/12/10 09:51:43 mrg Exp $ */
2 1.10 deraadt
3 1.1 deraadt /*
4 1.1 deraadt * Copyright (c) 1992, 1993
5 1.1 deraadt * The Regents of the University of California. All rights reserved.
6 1.1 deraadt *
7 1.1 deraadt * This software was developed by the Computer Systems Engineering group
8 1.1 deraadt * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9 1.1 deraadt * contributed to Berkeley.
10 1.1 deraadt *
11 1.1 deraadt * All advertising materials mentioning features or use of this software
12 1.1 deraadt * must display the following acknowledgement:
13 1.1 deraadt * This product includes software developed by the University of
14 1.1 deraadt * California, Lawrence Berkeley Laboratory.
15 1.1 deraadt *
16 1.1 deraadt * Redistribution and use in source and binary forms, with or without
17 1.1 deraadt * modification, are permitted provided that the following conditions
18 1.1 deraadt * are met:
19 1.1 deraadt * 1. Redistributions of source code must retain the above copyright
20 1.1 deraadt * notice, this list of conditions and the following disclaimer.
21 1.1 deraadt * 2. Redistributions in binary form must reproduce the above copyright
22 1.1 deraadt * notice, this list of conditions and the following disclaimer in the
23 1.1 deraadt * documentation and/or other materials provided with the distribution.
24 1.64 agc * 3. Neither the name of the University nor the names of its contributors
25 1.1 deraadt * may be used to endorse or promote products derived from this software
26 1.1 deraadt * without specific prior written permission.
27 1.1 deraadt *
28 1.1 deraadt * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 1.1 deraadt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 1.1 deraadt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 1.1 deraadt * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 1.1 deraadt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 1.1 deraadt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 1.1 deraadt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 1.1 deraadt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 1.1 deraadt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 1.1 deraadt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 1.1 deraadt * SUCH DAMAGE.
39 1.1 deraadt *
40 1.7 deraadt * @(#)cpu.h 8.4 (Berkeley) 1/5/94
41 1.1 deraadt */
42 1.1 deraadt
43 1.1 deraadt #ifndef _CPU_H_
44 1.1 deraadt #define _CPU_H_
45 1.1 deraadt
46 1.1 deraadt /*
47 1.21 pk * CTL_MACHDEP definitions.
48 1.1 deraadt */
49 1.34 pk #define CPU_BOOTED_KERNEL 1 /* string: booted kernel name */
50 1.49 darrenr #define CPU_BOOTED_DEVICE 2 /* string: device booted from */
51 1.49 darrenr #define CPU_BOOT_ARGS 3 /* string: args booted with */
52 1.50 pk #define CPU_ARCH 4 /* integer: cpu architecture version */
53 1.50 pk #define CPU_MAXID 5 /* number of valid machdep ids */
54 1.1 deraadt
55 1.12 jtc #ifdef _KERNEL
56 1.1 deraadt /*
57 1.1 deraadt * Exported definitions unique to SPARC cpu support.
58 1.1 deraadt */
59 1.1 deraadt
60 1.65 tsutsui #if defined(_KERNEL_OPT)
61 1.35 thorpej #include "opt_multiprocessor.h"
62 1.36 thorpej #include "opt_lockdebug.h"
63 1.47 darrenr #include "opt_sparc_arch.h"
64 1.35 thorpej #endif
65 1.35 thorpej
66 1.96 mrg #include <sys/cpu_data.h>
67 1.96 mrg #include <sys/evcnt.h>
68 1.96 mrg
69 1.52 pk #include <machine/intr.h>
70 1.1 deraadt #include <machine/psl.h>
71 1.96 mrg
72 1.96 mrg #if defined(_KERNEL)
73 1.36 thorpej #include <sparc/sparc/cpuvar.h>
74 1.1 deraadt #include <sparc/sparc/intreg.h>
75 1.96 mrg #else
76 1.96 mrg #include <arch/sparc/sparc/vaddrs.h>
77 1.96 mrg #include <arch/sparc/sparc/cache.h>
78 1.96 mrg #endif
79 1.96 mrg
80 1.96 mrg /*
81 1.96 mrg * Message structure for Inter Processor Communication in MP systems
82 1.96 mrg */
83 1.96 mrg struct xpmsg {
84 1.96 mrg volatile int tag;
85 1.96 mrg #define XPMSG15_PAUSECPU 1
86 1.96 mrg #define XPMSG_FUNC 4
87 1.96 mrg #define XPMSG_FTRP 5
88 1.96 mrg
89 1.96 mrg volatile union {
90 1.96 mrg /*
91 1.96 mrg * Cross call: ask to run (*func)(arg0,arg1,arg2)
92 1.96 mrg * or (*trap)(arg0,arg1,arg2). `trap' should be the
93 1.96 mrg * address of a `fast trap' handler that executes in
94 1.96 mrg * the trap window (see locore.s).
95 1.96 mrg */
96 1.96 mrg struct xpmsg_func {
97 1.96 mrg void (*func)(int, int, int);
98 1.96 mrg void (*trap)(int, int, int);
99 1.96 mrg int arg0;
100 1.96 mrg int arg1;
101 1.96 mrg int arg2;
102 1.96 mrg } xpmsg_func;
103 1.96 mrg } u;
104 1.96 mrg volatile int received;
105 1.96 mrg volatile int complete;
106 1.96 mrg };
107 1.96 mrg
108 1.96 mrg /*
109 1.96 mrg * The cpuinfo structure. This structure maintains information about one
110 1.96 mrg * currently installed CPU (there may be several of these if the machine
111 1.96 mrg * supports multiple CPUs, as on some Sun4m architectures). The information
112 1.96 mrg * in this structure supersedes the old "cpumod", "mmumod", and similar
113 1.96 mrg * fields.
114 1.96 mrg */
115 1.96 mrg
116 1.96 mrg struct cpu_info {
117 1.96 mrg struct cpu_data ci_data; /* MI per-cpu data */
118 1.96 mrg
119 1.96 mrg /*
120 1.96 mrg * Primary Inter-processor message area. Keep this aligned
121 1.96 mrg * to a cache line boundary if possible, as the structure
122 1.96 mrg * itself is one (normal 32 byte) cache-line.
123 1.96 mrg */
124 1.96 mrg struct xpmsg msg __aligned(32);
125 1.96 mrg
126 1.96 mrg /* Scheduler flags */
127 1.96 mrg int ci_want_ast;
128 1.96 mrg int ci_want_resched;
129 1.96 mrg
130 1.96 mrg /*
131 1.96 mrg * SPARC cpu_info structures live at two VAs: one global
132 1.96 mrg * VA (so each CPU can access any other CPU's cpu_info)
133 1.96 mrg * and an alias VA CPUINFO_VA which is the same on each
134 1.96 mrg * CPU and maps to that CPU's cpu_info. Since the alias
135 1.96 mrg * CPUINFO_VA is how we locate our cpu_info, we have to
136 1.96 mrg * self-reference the global VA so that we can return it
137 1.96 mrg * in the curcpu() macro.
138 1.96 mrg */
139 1.96 mrg struct cpu_info * volatile ci_self;
140 1.96 mrg
141 1.96 mrg int ci_cpuid; /* CPU index (see cpus[] array) */
142 1.96 mrg
143 1.96 mrg /* Context administration */
144 1.96 mrg int *ctx_tbl; /* [4m] SRMMU-edible context table */
145 1.96 mrg paddr_t ctx_tbl_pa; /* [4m] ctx table physical address */
146 1.96 mrg
147 1.96 mrg /* Cache information */
148 1.96 mrg struct cacheinfo cacheinfo; /* see cache.h */
149 1.96 mrg
150 1.96 mrg /* various flags to workaround anomalies in chips */
151 1.96 mrg volatile int flags; /* see CPUFLG_xxx, below */
152 1.96 mrg
153 1.96 mrg /* Per processor counter register (sun4m only) */
154 1.96 mrg volatile struct counter_4m *counterreg_4m;
155 1.96 mrg
156 1.96 mrg /* Per processor interrupt mask register (sun4m only) */
157 1.96 mrg volatile struct icr_pi *intreg_4m;
158 1.96 mrg /*
159 1.96 mrg * Send a IPI to (cpi). For Ross cpus we need to read
160 1.96 mrg * the pending register to avoid a hardware bug.
161 1.96 mrg */
162 1.96 mrg #define raise_ipi(cpi,lvl) do { \
163 1.96 mrg int x; \
164 1.96 mrg (cpi)->intreg_4m->pi_set = PINTR_SINTRLEV(lvl); \
165 1.96 mrg x = (cpi)->intreg_4m->pi_pend; __USE(x); \
166 1.96 mrg } while (0)
167 1.96 mrg
168 1.96 mrg int sun4_mmu3l; /* [4]: 3-level MMU present */
169 1.96 mrg #if defined(SUN4_MMU3L)
170 1.96 mrg #define HASSUN4_MMU3L (cpuinfo.sun4_mmu3l)
171 1.96 mrg #else
172 1.96 mrg #define HASSUN4_MMU3L (0)
173 1.96 mrg #endif
174 1.96 mrg int ci_idepth; /* Interrupt depth */
175 1.96 mrg
176 1.96 mrg /*
177 1.96 mrg * The following pointers point to processes that are somehow
178 1.96 mrg * associated with this CPU--running on it, using its FPU,
179 1.96 mrg * etc.
180 1.96 mrg */
181 1.96 mrg struct lwp *ci_curlwp; /* CPU owner */
182 1.96 mrg struct lwp *fplwp; /* FPU owner */
183 1.96 mrg
184 1.96 mrg int ci_mtx_count;
185 1.96 mrg int ci_mtx_oldspl;
186 1.96 mrg
187 1.96 mrg /*
188 1.96 mrg * Idle PCB and Interrupt stack;
189 1.96 mrg */
190 1.96 mrg void *eintstack; /* End of interrupt stack */
191 1.96 mrg #define INT_STACK_SIZE (128 * 128) /* 128 128-byte stack frames */
192 1.96 mrg void *redzone; /* DEBUG: stack red zone */
193 1.96 mrg #define REDSIZE (8*96) /* some room for bouncing */
194 1.96 mrg
195 1.96 mrg struct pcb *curpcb; /* CPU's PCB & kernel stack */
196 1.96 mrg
197 1.96 mrg /* locore defined: */
198 1.96 mrg void (*get_syncflt)(void); /* Not C-callable */
199 1.96 mrg int (*get_asyncflt)(u_int *, u_int *);
200 1.96 mrg
201 1.96 mrg /* Synchronous Fault Status; temporary storage */
202 1.96 mrg struct {
203 1.96 mrg int sfsr;
204 1.96 mrg int sfva;
205 1.96 mrg } syncfltdump;
206 1.96 mrg
207 1.96 mrg /*
208 1.96 mrg * Cache handling functions.
209 1.96 mrg * Most cache flush function come in two flavours: one that
210 1.96 mrg * acts only on the CPU it executes on, and another that
211 1.96 mrg * uses inter-processor signals to flush the cache on
212 1.96 mrg * all processor modules.
213 1.96 mrg * The `ft_' versions are fast trap cache flush handlers.
214 1.96 mrg */
215 1.96 mrg void (*cache_flush)(void *, u_int);
216 1.96 mrg void (*vcache_flush_page)(int, int);
217 1.96 mrg void (*sp_vcache_flush_page)(int, int);
218 1.96 mrg void (*ft_vcache_flush_page)(int, int);
219 1.96 mrg void (*vcache_flush_segment)(int, int, int);
220 1.96 mrg void (*sp_vcache_flush_segment)(int, int, int);
221 1.96 mrg void (*ft_vcache_flush_segment)(int, int, int);
222 1.96 mrg void (*vcache_flush_region)(int, int);
223 1.96 mrg void (*sp_vcache_flush_region)(int, int);
224 1.96 mrg void (*ft_vcache_flush_region)(int, int);
225 1.96 mrg void (*vcache_flush_context)(int);
226 1.96 mrg void (*sp_vcache_flush_context)(int);
227 1.96 mrg void (*ft_vcache_flush_context)(int);
228 1.96 mrg
229 1.96 mrg /* The are helpers for (*cache_flush)() */
230 1.96 mrg void (*sp_vcache_flush_range)(int, int, int);
231 1.96 mrg void (*ft_vcache_flush_range)(int, int, int);
232 1.96 mrg
233 1.96 mrg void (*pcache_flush_page)(paddr_t, int);
234 1.96 mrg void (*pure_vcache_flush)(void);
235 1.96 mrg void (*cache_flush_all)(void);
236 1.96 mrg
237 1.96 mrg /* Support for hardware-assisted page clear/copy */
238 1.96 mrg void (*zero_page)(paddr_t);
239 1.96 mrg void (*copy_page)(paddr_t, paddr_t);
240 1.96 mrg
241 1.96 mrg /* Virtual addresses for use in pmap copy_page/zero_page */
242 1.96 mrg void * vpage[2];
243 1.96 mrg int *vpage_pte[2]; /* pte location of vpage[] */
244 1.96 mrg
245 1.96 mrg void (*cache_enable)(void);
246 1.96 mrg
247 1.96 mrg int cpu_type; /* Type: see CPUTYP_xxx below */
248 1.96 mrg
249 1.96 mrg /* Inter-processor message area (high priority but used infrequently) */
250 1.96 mrg struct xpmsg msg_lev15;
251 1.96 mrg
252 1.96 mrg /* CPU information */
253 1.96 mrg int node; /* PROM node for this CPU */
254 1.96 mrg int mid; /* Module ID for MP systems */
255 1.96 mrg int mbus; /* 1 if CPU is on MBus */
256 1.96 mrg int mxcc; /* 1 if a MBus-level MXCC is present */
257 1.96 mrg const char *cpu_longname; /* CPU model */
258 1.96 mrg int cpu_impl; /* CPU implementation code */
259 1.96 mrg int cpu_vers; /* CPU version code */
260 1.96 mrg int mmu_impl; /* MMU implementation code */
261 1.96 mrg int mmu_vers; /* MMU version code */
262 1.96 mrg int master; /* 1 if this is bootup CPU */
263 1.96 mrg
264 1.96 mrg vaddr_t mailbox; /* VA of CPU's mailbox */
265 1.96 mrg
266 1.96 mrg int mmu_ncontext; /* Number of contexts supported */
267 1.96 mrg int mmu_nregion; /* Number of regions supported */
268 1.96 mrg int mmu_nsegment; /* [4/4c] Segments */
269 1.96 mrg int mmu_npmeg; /* [4/4c] Pmegs */
270 1.96 mrg
271 1.96 mrg /* XXX - we currently don't actually use the following */
272 1.96 mrg int arch; /* Architecture: CPU_SUN4x */
273 1.96 mrg int class; /* Class: SuperSPARC, microSPARC... */
274 1.96 mrg int classlvl; /* Iteration in class: 1, 2, etc. */
275 1.96 mrg int classsublvl; /* stepping in class (version) */
276 1.96 mrg
277 1.96 mrg int hz; /* Clock speed */
278 1.96 mrg
279 1.96 mrg /* FPU information */
280 1.96 mrg int fpupresent; /* true if FPU is present */
281 1.96 mrg int fpuvers; /* FPU revision */
282 1.96 mrg const char *fpu_name; /* FPU model */
283 1.96 mrg char fpu_namebuf[32];/* Buffer for FPU name, if necessary */
284 1.96 mrg
285 1.96 mrg /* XXX */
286 1.96 mrg volatile void *ci_ddb_regs; /* DDB regs */
287 1.96 mrg
288 1.96 mrg /*
289 1.96 mrg * The following are function pointers to do interesting CPU-dependent
290 1.96 mrg * things without having to do type-tests all the time
291 1.96 mrg */
292 1.96 mrg
293 1.96 mrg /* bootup things: access to physical memory */
294 1.96 mrg u_int (*read_physmem)(u_int addr, int space);
295 1.96 mrg void (*write_physmem)(u_int addr, u_int data);
296 1.96 mrg void (*cache_tablewalks)(void);
297 1.96 mrg void (*mmu_enable)(void);
298 1.96 mrg void (*hotfix)(struct cpu_info *);
299 1.96 mrg
300 1.96 mrg
301 1.96 mrg #if 0
302 1.96 mrg /* hardware-assisted block operation routines */
303 1.96 mrg void (*hwbcopy)(const void *from, void *to, size_t len);
304 1.96 mrg void (*hwbzero)(void *buf, size_t len);
305 1.96 mrg
306 1.96 mrg /* routine to clear mbus-sbus buffers */
307 1.96 mrg void (*mbusflush)(void);
308 1.96 mrg #endif
309 1.96 mrg
310 1.96 mrg /*
311 1.96 mrg * Memory error handler; parity errors, unhandled NMIs and other
312 1.96 mrg * unrecoverable faults end up here.
313 1.96 mrg */
314 1.96 mrg void (*memerr)(unsigned, u_int, u_int, struct trapframe *);
315 1.96 mrg void (*idlespin)(struct cpu_info *);
316 1.96 mrg /* Module Control Registers */
317 1.96 mrg /*bus_space_handle_t*/ long ci_mbusport;
318 1.96 mrg /*bus_space_handle_t*/ long ci_mxccregs;
319 1.96 mrg
320 1.96 mrg u_int ci_tt; /* Last trap (if tracing) */
321 1.96 mrg
322 1.96 mrg /*
323 1.96 mrg * Start/End VA's of this cpu_info region; we upload the other pages
324 1.96 mrg * in this region that aren't part of the cpu_info to uvm.
325 1.96 mrg */
326 1.96 mrg vaddr_t ci_free_sva1, ci_free_eva1, ci_free_sva2, ci_free_eva2;
327 1.96 mrg
328 1.96 mrg struct evcnt ci_savefpstate;
329 1.96 mrg struct evcnt ci_savefpstate_null;
330 1.96 mrg struct evcnt ci_xpmsg_mutex_fail;
331 1.96 mrg struct evcnt ci_xpmsg_mutex_fail_call;
332 1.96 mrg struct evcnt ci_intrcnt[16];
333 1.96 mrg struct evcnt ci_sintrcnt[16];
334 1.96 mrg };
335 1.1 deraadt
336 1.1 deraadt /*
337 1.1 deraadt * definitions of cpu-dependent requirements
338 1.1 deraadt * referenced in generic code
339 1.1 deraadt */
340 1.39 thorpej #define curcpu() (cpuinfo.ci_self)
341 1.62 thorpej #define curlwp (cpuinfo.ci_curlwp)
342 1.40 thorpej #define CPU_IS_PRIMARY(ci) ((ci)->master)
343 1.37 thorpej
344 1.62 thorpej #define cpu_number() (cpuinfo.ci_cpuid)
345 1.76 uwe void cpu_proc_fork(struct proc *, struct proc *);
346 1.35 thorpej
347 1.35 thorpej #if defined(MULTIPROCESSOR)
348 1.74 uwe void cpu_boot_secondary_processors(void);
349 1.35 thorpej #endif
350 1.1 deraadt
351 1.1 deraadt /*
352 1.60 pk * Arguments to hardclock, softclock and statclock encapsulate the
353 1.1 deraadt * previous machine state in an opaque clockframe. The ipl is here
354 1.1 deraadt * as well for strayintr (see locore.s:interrupt and intr.c:strayintr).
355 1.1 deraadt * Note that CLKF_INTR is valid only if CLKF_USERMODE is false.
356 1.1 deraadt */
357 1.1 deraadt struct clockframe {
358 1.1 deraadt u_int psr; /* psr before interrupt, excluding PSR_ET */
359 1.1 deraadt u_int pc; /* pc at interrupt */
360 1.1 deraadt u_int npc; /* npc at interrupt */
361 1.1 deraadt u_int ipl; /* actual interrupt priority level */
362 1.1 deraadt u_int fp; /* %fp at interrupt */
363 1.1 deraadt };
364 1.3 deraadt typedef struct clockframe clockframe;
365 1.1 deraadt
366 1.1 deraadt extern int eintstack[];
367 1.1 deraadt
368 1.1 deraadt #define CLKF_USERMODE(framep) (((framep)->psr & PSR_PS) == 0)
369 1.60 pk #define CLKF_LOPRI(framep,n) (((framep)->psr & PSR_PIL) < (n) << 8)
370 1.1 deraadt #define CLKF_PC(framep) ((framep)->pc)
371 1.30 pk #if defined(MULTIPROCESSOR)
372 1.30 pk #define CLKF_INTR(framep) \
373 1.30 pk ((framep)->fp > (u_int)cpuinfo.eintstack - INT_STACK_SIZE && \
374 1.30 pk (framep)->fp < (u_int)cpuinfo.eintstack)
375 1.30 pk #else
376 1.1 deraadt #define CLKF_INTR(framep) ((framep)->fp < (u_int)eintstack)
377 1.30 pk #endif
378 1.21 pk
379 1.83 ad void sparc_softintr_init(void);
380 1.1 deraadt
381 1.1 deraadt /*
382 1.57 pk * Preempt the current process on the target CPU if in interrupt from
383 1.57 pk * user mode, or after the current trap/syscall if in system mode.
384 1.1 deraadt */
385 1.81 yamt #define cpu_need_resched(ci, flags) do { \
386 1.95 christos __USE(flags); \
387 1.81 yamt (ci)->ci_want_resched = 1; \
388 1.81 yamt (ci)->ci_want_ast = 1; \
389 1.57 pk \
390 1.57 pk /* Just interrupt the target CPU, so it can notice its AST */ \
391 1.81 yamt if (((flags) & RESCHED_IMMED) || (ci)->ci_cpuid != cpu_number()) \
392 1.57 pk XCALL0(sparc_noop, 1U << (ci)->ci_cpuid); \
393 1.74 uwe } while (/*CONSTCOND*/0)
394 1.1 deraadt
395 1.1 deraadt /*
396 1.1 deraadt * Give a profiling tick to the current process when the user profiling
397 1.21 pk * buffer pages are invalid. On the sparc, request an ast to send us
398 1.1 deraadt * through trap(), marking the proc as needing a profiling tick.
399 1.1 deraadt */
400 1.81 yamt #define cpu_need_proftick(l) ((l)->l_pflag |= LP_OWEUPC, cpuinfo.ci_want_ast = 1)
401 1.1 deraadt
402 1.1 deraadt /*
403 1.1 deraadt * Notify the current process (p) that it has a signal pending,
404 1.1 deraadt * process as soon as possible.
405 1.1 deraadt */
406 1.78 ad #define cpu_signotify(l) do { \
407 1.85 mrg (l)->l_cpu->ci_want_ast = 1; \
408 1.78 ad \
409 1.78 ad /* Just interrupt the target CPU, so it can notice its AST */ \
410 1.85 mrg if ((l)->l_cpu->ci_cpuid != cpu_number()) \
411 1.85 mrg XCALL0(sparc_noop, 1U << (l)->l_cpu->ci_cpuid); \
412 1.78 ad } while (/*CONSTCOND*/0)
413 1.51 pk
414 1.51 pk /* CPU architecture version */
415 1.51 pk extern int cpu_arch;
416 1.1 deraadt
417 1.28 pk /* Number of CPUs in the system */
418 1.72 briggs extern int sparc_ncpus;
419 1.1 deraadt
420 1.91 martin /* Provide %pc of a lwp */
421 1.91 martin #define LWP_PC(l) ((l)->l_md.md_tf->tf_pc)
422 1.91 martin
423 1.1 deraadt /*
424 1.1 deraadt * Interrupt handler chains. Interrupt handlers should return 0 for
425 1.1 deraadt * ``not me'' or 1 (``I took care of it''). intr_establish() inserts a
426 1.1 deraadt * handler into the list. The handler is called with its (single)
427 1.1 deraadt * argument, or with a pointer to a clockframe if ih_arg is NULL.
428 1.88 mrg *
429 1.88 mrg * realfun/realarg are used to chain callers, usually with the
430 1.88 mrg * biglock wrapper.
431 1.1 deraadt */
432 1.25 tv extern struct intrhand {
433 1.54 pk int (*ih_fun)(void *);
434 1.1 deraadt void *ih_arg;
435 1.1 deraadt struct intrhand *ih_next;
436 1.52 pk int ih_classipl;
437 1.88 mrg int (*ih_realfun)(void *);
438 1.88 mrg void *ih_realarg;
439 1.1 deraadt } *intrhand[15];
440 1.1 deraadt
441 1.89 mrg void intr_establish(int, int, struct intrhand *, void (*)(void), bool);
442 1.74 uwe void intr_disestablish(int, struct intrhand *);
443 1.1 deraadt
444 1.54 pk void intr_lock_kernel(void);
445 1.54 pk void intr_unlock_kernel(void);
446 1.19 christos
447 1.19 christos /* disksubr.c */
448 1.19 christos struct dkbad;
449 1.74 uwe int isbad(struct dkbad *, int, int, int);
450 1.74 uwe
451 1.19 christos /* machdep.c */
452 1.80 christos int ldcontrolb(void *);
453 1.54 pk void dumpconf(void);
454 1.80 christos void * reserve_dumppages(void *);
455 1.74 uwe void wcopy(const void *, void *, u_int);
456 1.74 uwe void wzero(void *, u_int);
457 1.74 uwe
458 1.19 christos /* clock.c */
459 1.19 christos struct timeval;
460 1.54 pk void lo_microtime(struct timeval *);
461 1.56 pk void schedintr(void *);
462 1.74 uwe
463 1.19 christos /* locore.s */
464 1.19 christos struct fpstate;
465 1.90 mrg void ipi_savefpstate(struct fpstate *);
466 1.54 pk void savefpstate(struct fpstate *);
467 1.54 pk void loadfpstate(struct fpstate *);
468 1.80 christos int probeget(void *, int);
469 1.54 pk void write_all_windows(void);
470 1.54 pk void write_user_windows(void);
471 1.81 yamt void lwp_trampoline(void);
472 1.19 christos struct pcb;
473 1.54 pk void snapshot(struct pcb *);
474 1.54 pk struct frame *getfp(void);
475 1.80 christos int xldcontrolb(void *, struct pcb *);
476 1.54 pk void copywords(const void *, void *, size_t);
477 1.54 pk void qcopy(const void *, void *, size_t);
478 1.54 pk void qzero(void *, size_t);
479 1.74 uwe
480 1.19 christos /* trap.c */
481 1.92 martin void cpu_vmspace_exec(struct lwp *, vaddr_t, vaddr_t);
482 1.62 thorpej int rwindow_save(struct lwp *);
483 1.74 uwe
484 1.19 christos /* cons.c */
485 1.54 pk int cnrom(void);
486 1.74 uwe
487 1.19 christos /* zs.c */
488 1.54 pk void zsconsole(struct tty *, int, int, void (**)(struct tty *, int));
489 1.19 christos #ifdef KGDB
490 1.54 pk void zs_kgdb_init(void);
491 1.19 christos #endif
492 1.74 uwe
493 1.19 christos /* fb.c */
494 1.54 pk void fb_unblank(void);
495 1.74 uwe
496 1.19 christos /* kgdb_stub.c */
497 1.19 christos #ifdef KGDB
498 1.54 pk void kgdb_attach(int (*)(void *), void (*)(void *, int), void *);
499 1.54 pk void kgdb_connect(int);
500 1.54 pk void kgdb_panic(void);
501 1.19 christos #endif
502 1.74 uwe
503 1.24 pk /* emul.c */
504 1.24 pk struct trapframe;
505 1.94 martin int fixalign(struct lwp *, struct trapframe *, void **);
506 1.54 pk int emulinstr(int, struct trapframe *);
507 1.74 uwe
508 1.28 pk /* cpu.c */
509 1.54 pk void mp_pause_cpus(void);
510 1.54 pk void mp_resume_cpus(void);
511 1.54 pk void mp_halt_cpus(void);
512 1.61 pk #ifdef DDB
513 1.61 pk void mp_pause_cpus_ddb(void);
514 1.61 pk void mp_resume_cpus_ddb(void);
515 1.61 pk #endif
516 1.74 uwe
517 1.63 pk /* intr.c */
518 1.63 pk u_int setitr(u_int);
519 1.63 pk u_int getitr(void);
520 1.4 deraadt
521 1.74 uwe
522 1.4 deraadt /*
523 1.4 deraadt *
524 1.4 deraadt * The SPARC has a Trap Base Register (TBR) which holds the upper 20 bits
525 1.4 deraadt * of the trap vector table. The next eight bits are supplied by the
526 1.4 deraadt * hardware when the trap occurs, and the bottom four bits are always
527 1.4 deraadt * zero (so that we can shove up to 16 bytes of executable code---exactly
528 1.4 deraadt * four instructions---into each trap vector).
529 1.4 deraadt *
530 1.4 deraadt * The hardware allocates half the trap vectors to hardware and half to
531 1.4 deraadt * software.
532 1.4 deraadt *
533 1.4 deraadt * Traps have priorities assigned (lower number => higher priority).
534 1.4 deraadt */
535 1.4 deraadt
536 1.4 deraadt struct trapvec {
537 1.4 deraadt int tv_instr[4]; /* the four instructions */
538 1.4 deraadt };
539 1.74 uwe
540 1.21 pk extern struct trapvec *trapbase; /* the 256 vectors */
541 1.11 deraadt
542 1.12 jtc #endif /* _KERNEL */
543 1.1 deraadt #endif /* _CPU_H_ */
544