cpu.h revision 1.97 1 1.97 mrg /* $NetBSD: cpu.h,v 1.97 2016/12/10 10:41:07 mrg Exp $ */
2 1.10 deraadt
3 1.1 deraadt /*
4 1.1 deraadt * Copyright (c) 1992, 1993
5 1.1 deraadt * The Regents of the University of California. All rights reserved.
6 1.1 deraadt *
7 1.1 deraadt * This software was developed by the Computer Systems Engineering group
8 1.1 deraadt * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9 1.1 deraadt * contributed to Berkeley.
10 1.1 deraadt *
11 1.1 deraadt * All advertising materials mentioning features or use of this software
12 1.1 deraadt * must display the following acknowledgement:
13 1.1 deraadt * This product includes software developed by the University of
14 1.1 deraadt * California, Lawrence Berkeley Laboratory.
15 1.1 deraadt *
16 1.1 deraadt * Redistribution and use in source and binary forms, with or without
17 1.1 deraadt * modification, are permitted provided that the following conditions
18 1.1 deraadt * are met:
19 1.1 deraadt * 1. Redistributions of source code must retain the above copyright
20 1.1 deraadt * notice, this list of conditions and the following disclaimer.
21 1.1 deraadt * 2. Redistributions in binary form must reproduce the above copyright
22 1.1 deraadt * notice, this list of conditions and the following disclaimer in the
23 1.1 deraadt * documentation and/or other materials provided with the distribution.
24 1.64 agc * 3. Neither the name of the University nor the names of its contributors
25 1.1 deraadt * may be used to endorse or promote products derived from this software
26 1.1 deraadt * without specific prior written permission.
27 1.1 deraadt *
28 1.1 deraadt * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 1.1 deraadt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 1.1 deraadt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 1.1 deraadt * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 1.1 deraadt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 1.1 deraadt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 1.1 deraadt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 1.1 deraadt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 1.1 deraadt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 1.1 deraadt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 1.1 deraadt * SUCH DAMAGE.
39 1.1 deraadt *
40 1.7 deraadt * @(#)cpu.h 8.4 (Berkeley) 1/5/94
41 1.1 deraadt */
42 1.1 deraadt
43 1.1 deraadt #ifndef _CPU_H_
44 1.1 deraadt #define _CPU_H_
45 1.1 deraadt
46 1.1 deraadt /*
47 1.21 pk * CTL_MACHDEP definitions.
48 1.1 deraadt */
49 1.34 pk #define CPU_BOOTED_KERNEL 1 /* string: booted kernel name */
50 1.49 darrenr #define CPU_BOOTED_DEVICE 2 /* string: device booted from */
51 1.49 darrenr #define CPU_BOOT_ARGS 3 /* string: args booted with */
52 1.50 pk #define CPU_ARCH 4 /* integer: cpu architecture version */
53 1.50 pk #define CPU_MAXID 5 /* number of valid machdep ids */
54 1.1 deraadt
55 1.1 deraadt /*
56 1.1 deraadt * Exported definitions unique to SPARC cpu support.
57 1.1 deraadt */
58 1.1 deraadt
59 1.97 mrg /* Things needed by crash or the kernel */
60 1.97 mrg #if defined(_KERNEL) || defined(_KMEMUSER)
61 1.97 mrg
62 1.65 tsutsui #if defined(_KERNEL_OPT)
63 1.35 thorpej #include "opt_multiprocessor.h"
64 1.36 thorpej #include "opt_lockdebug.h"
65 1.47 darrenr #include "opt_sparc_arch.h"
66 1.35 thorpej #endif
67 1.35 thorpej
68 1.96 mrg #include <sys/cpu_data.h>
69 1.96 mrg #include <sys/evcnt.h>
70 1.96 mrg
71 1.52 pk #include <machine/intr.h>
72 1.1 deraadt #include <machine/psl.h>
73 1.96 mrg
74 1.96 mrg #if defined(_KERNEL)
75 1.36 thorpej #include <sparc/sparc/cpuvar.h>
76 1.1 deraadt #include <sparc/sparc/intreg.h>
77 1.96 mrg #else
78 1.96 mrg #include <arch/sparc/sparc/vaddrs.h>
79 1.96 mrg #include <arch/sparc/sparc/cache.h>
80 1.96 mrg #endif
81 1.96 mrg
82 1.97 mrg struct trapframe;
83 1.97 mrg
84 1.96 mrg /*
85 1.96 mrg * Message structure for Inter Processor Communication in MP systems
86 1.96 mrg */
87 1.96 mrg struct xpmsg {
88 1.96 mrg volatile int tag;
89 1.96 mrg #define XPMSG15_PAUSECPU 1
90 1.96 mrg #define XPMSG_FUNC 4
91 1.96 mrg #define XPMSG_FTRP 5
92 1.96 mrg
93 1.96 mrg volatile union {
94 1.96 mrg /*
95 1.96 mrg * Cross call: ask to run (*func)(arg0,arg1,arg2)
96 1.96 mrg * or (*trap)(arg0,arg1,arg2). `trap' should be the
97 1.96 mrg * address of a `fast trap' handler that executes in
98 1.96 mrg * the trap window (see locore.s).
99 1.96 mrg */
100 1.96 mrg struct xpmsg_func {
101 1.96 mrg void (*func)(int, int, int);
102 1.96 mrg void (*trap)(int, int, int);
103 1.96 mrg int arg0;
104 1.96 mrg int arg1;
105 1.96 mrg int arg2;
106 1.96 mrg } xpmsg_func;
107 1.96 mrg } u;
108 1.96 mrg volatile int received;
109 1.96 mrg volatile int complete;
110 1.96 mrg };
111 1.96 mrg
112 1.96 mrg /*
113 1.96 mrg * The cpuinfo structure. This structure maintains information about one
114 1.96 mrg * currently installed CPU (there may be several of these if the machine
115 1.96 mrg * supports multiple CPUs, as on some Sun4m architectures). The information
116 1.96 mrg * in this structure supersedes the old "cpumod", "mmumod", and similar
117 1.96 mrg * fields.
118 1.96 mrg */
119 1.96 mrg
120 1.96 mrg struct cpu_info {
121 1.96 mrg struct cpu_data ci_data; /* MI per-cpu data */
122 1.96 mrg
123 1.96 mrg /*
124 1.96 mrg * Primary Inter-processor message area. Keep this aligned
125 1.96 mrg * to a cache line boundary if possible, as the structure
126 1.96 mrg * itself is one (normal 32 byte) cache-line.
127 1.96 mrg */
128 1.96 mrg struct xpmsg msg __aligned(32);
129 1.96 mrg
130 1.96 mrg /* Scheduler flags */
131 1.96 mrg int ci_want_ast;
132 1.96 mrg int ci_want_resched;
133 1.96 mrg
134 1.96 mrg /*
135 1.96 mrg * SPARC cpu_info structures live at two VAs: one global
136 1.96 mrg * VA (so each CPU can access any other CPU's cpu_info)
137 1.96 mrg * and an alias VA CPUINFO_VA which is the same on each
138 1.96 mrg * CPU and maps to that CPU's cpu_info. Since the alias
139 1.96 mrg * CPUINFO_VA is how we locate our cpu_info, we have to
140 1.96 mrg * self-reference the global VA so that we can return it
141 1.96 mrg * in the curcpu() macro.
142 1.96 mrg */
143 1.96 mrg struct cpu_info * volatile ci_self;
144 1.96 mrg
145 1.96 mrg int ci_cpuid; /* CPU index (see cpus[] array) */
146 1.96 mrg
147 1.96 mrg /* Context administration */
148 1.96 mrg int *ctx_tbl; /* [4m] SRMMU-edible context table */
149 1.96 mrg paddr_t ctx_tbl_pa; /* [4m] ctx table physical address */
150 1.96 mrg
151 1.96 mrg /* Cache information */
152 1.96 mrg struct cacheinfo cacheinfo; /* see cache.h */
153 1.96 mrg
154 1.96 mrg /* various flags to workaround anomalies in chips */
155 1.96 mrg volatile int flags; /* see CPUFLG_xxx, below */
156 1.96 mrg
157 1.96 mrg /* Per processor counter register (sun4m only) */
158 1.96 mrg volatile struct counter_4m *counterreg_4m;
159 1.96 mrg
160 1.96 mrg /* Per processor interrupt mask register (sun4m only) */
161 1.96 mrg volatile struct icr_pi *intreg_4m;
162 1.96 mrg /*
163 1.96 mrg * Send a IPI to (cpi). For Ross cpus we need to read
164 1.96 mrg * the pending register to avoid a hardware bug.
165 1.96 mrg */
166 1.96 mrg #define raise_ipi(cpi,lvl) do { \
167 1.96 mrg int x; \
168 1.96 mrg (cpi)->intreg_4m->pi_set = PINTR_SINTRLEV(lvl); \
169 1.96 mrg x = (cpi)->intreg_4m->pi_pend; __USE(x); \
170 1.96 mrg } while (0)
171 1.96 mrg
172 1.96 mrg int sun4_mmu3l; /* [4]: 3-level MMU present */
173 1.96 mrg #if defined(SUN4_MMU3L)
174 1.96 mrg #define HASSUN4_MMU3L (cpuinfo.sun4_mmu3l)
175 1.96 mrg #else
176 1.96 mrg #define HASSUN4_MMU3L (0)
177 1.96 mrg #endif
178 1.96 mrg int ci_idepth; /* Interrupt depth */
179 1.96 mrg
180 1.96 mrg /*
181 1.96 mrg * The following pointers point to processes that are somehow
182 1.96 mrg * associated with this CPU--running on it, using its FPU,
183 1.96 mrg * etc.
184 1.96 mrg */
185 1.96 mrg struct lwp *ci_curlwp; /* CPU owner */
186 1.96 mrg struct lwp *fplwp; /* FPU owner */
187 1.96 mrg
188 1.96 mrg int ci_mtx_count;
189 1.96 mrg int ci_mtx_oldspl;
190 1.96 mrg
191 1.96 mrg /*
192 1.96 mrg * Idle PCB and Interrupt stack;
193 1.96 mrg */
194 1.96 mrg void *eintstack; /* End of interrupt stack */
195 1.96 mrg #define INT_STACK_SIZE (128 * 128) /* 128 128-byte stack frames */
196 1.96 mrg void *redzone; /* DEBUG: stack red zone */
197 1.96 mrg #define REDSIZE (8*96) /* some room for bouncing */
198 1.96 mrg
199 1.96 mrg struct pcb *curpcb; /* CPU's PCB & kernel stack */
200 1.96 mrg
201 1.96 mrg /* locore defined: */
202 1.96 mrg void (*get_syncflt)(void); /* Not C-callable */
203 1.96 mrg int (*get_asyncflt)(u_int *, u_int *);
204 1.96 mrg
205 1.96 mrg /* Synchronous Fault Status; temporary storage */
206 1.96 mrg struct {
207 1.96 mrg int sfsr;
208 1.96 mrg int sfva;
209 1.96 mrg } syncfltdump;
210 1.96 mrg
211 1.96 mrg /*
212 1.96 mrg * Cache handling functions.
213 1.96 mrg * Most cache flush function come in two flavours: one that
214 1.96 mrg * acts only on the CPU it executes on, and another that
215 1.96 mrg * uses inter-processor signals to flush the cache on
216 1.96 mrg * all processor modules.
217 1.96 mrg * The `ft_' versions are fast trap cache flush handlers.
218 1.96 mrg */
219 1.96 mrg void (*cache_flush)(void *, u_int);
220 1.96 mrg void (*vcache_flush_page)(int, int);
221 1.96 mrg void (*sp_vcache_flush_page)(int, int);
222 1.96 mrg void (*ft_vcache_flush_page)(int, int);
223 1.96 mrg void (*vcache_flush_segment)(int, int, int);
224 1.96 mrg void (*sp_vcache_flush_segment)(int, int, int);
225 1.96 mrg void (*ft_vcache_flush_segment)(int, int, int);
226 1.96 mrg void (*vcache_flush_region)(int, int);
227 1.96 mrg void (*sp_vcache_flush_region)(int, int);
228 1.96 mrg void (*ft_vcache_flush_region)(int, int);
229 1.96 mrg void (*vcache_flush_context)(int);
230 1.96 mrg void (*sp_vcache_flush_context)(int);
231 1.96 mrg void (*ft_vcache_flush_context)(int);
232 1.96 mrg
233 1.96 mrg /* The are helpers for (*cache_flush)() */
234 1.96 mrg void (*sp_vcache_flush_range)(int, int, int);
235 1.96 mrg void (*ft_vcache_flush_range)(int, int, int);
236 1.96 mrg
237 1.96 mrg void (*pcache_flush_page)(paddr_t, int);
238 1.96 mrg void (*pure_vcache_flush)(void);
239 1.96 mrg void (*cache_flush_all)(void);
240 1.96 mrg
241 1.96 mrg /* Support for hardware-assisted page clear/copy */
242 1.96 mrg void (*zero_page)(paddr_t);
243 1.96 mrg void (*copy_page)(paddr_t, paddr_t);
244 1.96 mrg
245 1.96 mrg /* Virtual addresses for use in pmap copy_page/zero_page */
246 1.96 mrg void * vpage[2];
247 1.96 mrg int *vpage_pte[2]; /* pte location of vpage[] */
248 1.96 mrg
249 1.96 mrg void (*cache_enable)(void);
250 1.96 mrg
251 1.96 mrg int cpu_type; /* Type: see CPUTYP_xxx below */
252 1.96 mrg
253 1.96 mrg /* Inter-processor message area (high priority but used infrequently) */
254 1.96 mrg struct xpmsg msg_lev15;
255 1.96 mrg
256 1.96 mrg /* CPU information */
257 1.96 mrg int node; /* PROM node for this CPU */
258 1.96 mrg int mid; /* Module ID for MP systems */
259 1.96 mrg int mbus; /* 1 if CPU is on MBus */
260 1.96 mrg int mxcc; /* 1 if a MBus-level MXCC is present */
261 1.96 mrg const char *cpu_longname; /* CPU model */
262 1.96 mrg int cpu_impl; /* CPU implementation code */
263 1.96 mrg int cpu_vers; /* CPU version code */
264 1.96 mrg int mmu_impl; /* MMU implementation code */
265 1.96 mrg int mmu_vers; /* MMU version code */
266 1.96 mrg int master; /* 1 if this is bootup CPU */
267 1.96 mrg
268 1.96 mrg vaddr_t mailbox; /* VA of CPU's mailbox */
269 1.96 mrg
270 1.96 mrg int mmu_ncontext; /* Number of contexts supported */
271 1.96 mrg int mmu_nregion; /* Number of regions supported */
272 1.96 mrg int mmu_nsegment; /* [4/4c] Segments */
273 1.96 mrg int mmu_npmeg; /* [4/4c] Pmegs */
274 1.96 mrg
275 1.96 mrg /* XXX - we currently don't actually use the following */
276 1.96 mrg int arch; /* Architecture: CPU_SUN4x */
277 1.96 mrg int class; /* Class: SuperSPARC, microSPARC... */
278 1.96 mrg int classlvl; /* Iteration in class: 1, 2, etc. */
279 1.96 mrg int classsublvl; /* stepping in class (version) */
280 1.96 mrg
281 1.96 mrg int hz; /* Clock speed */
282 1.96 mrg
283 1.96 mrg /* FPU information */
284 1.96 mrg int fpupresent; /* true if FPU is present */
285 1.96 mrg int fpuvers; /* FPU revision */
286 1.96 mrg const char *fpu_name; /* FPU model */
287 1.96 mrg char fpu_namebuf[32];/* Buffer for FPU name, if necessary */
288 1.96 mrg
289 1.96 mrg /* XXX */
290 1.96 mrg volatile void *ci_ddb_regs; /* DDB regs */
291 1.96 mrg
292 1.96 mrg /*
293 1.96 mrg * The following are function pointers to do interesting CPU-dependent
294 1.96 mrg * things without having to do type-tests all the time
295 1.96 mrg */
296 1.96 mrg
297 1.96 mrg /* bootup things: access to physical memory */
298 1.96 mrg u_int (*read_physmem)(u_int addr, int space);
299 1.96 mrg void (*write_physmem)(u_int addr, u_int data);
300 1.96 mrg void (*cache_tablewalks)(void);
301 1.96 mrg void (*mmu_enable)(void);
302 1.96 mrg void (*hotfix)(struct cpu_info *);
303 1.96 mrg
304 1.96 mrg
305 1.96 mrg #if 0
306 1.96 mrg /* hardware-assisted block operation routines */
307 1.96 mrg void (*hwbcopy)(const void *from, void *to, size_t len);
308 1.96 mrg void (*hwbzero)(void *buf, size_t len);
309 1.96 mrg
310 1.96 mrg /* routine to clear mbus-sbus buffers */
311 1.96 mrg void (*mbusflush)(void);
312 1.96 mrg #endif
313 1.96 mrg
314 1.96 mrg /*
315 1.96 mrg * Memory error handler; parity errors, unhandled NMIs and other
316 1.96 mrg * unrecoverable faults end up here.
317 1.96 mrg */
318 1.96 mrg void (*memerr)(unsigned, u_int, u_int, struct trapframe *);
319 1.96 mrg void (*idlespin)(struct cpu_info *);
320 1.96 mrg /* Module Control Registers */
321 1.96 mrg /*bus_space_handle_t*/ long ci_mbusport;
322 1.96 mrg /*bus_space_handle_t*/ long ci_mxccregs;
323 1.96 mrg
324 1.96 mrg u_int ci_tt; /* Last trap (if tracing) */
325 1.96 mrg
326 1.96 mrg /*
327 1.96 mrg * Start/End VA's of this cpu_info region; we upload the other pages
328 1.96 mrg * in this region that aren't part of the cpu_info to uvm.
329 1.96 mrg */
330 1.96 mrg vaddr_t ci_free_sva1, ci_free_eva1, ci_free_sva2, ci_free_eva2;
331 1.96 mrg
332 1.96 mrg struct evcnt ci_savefpstate;
333 1.96 mrg struct evcnt ci_savefpstate_null;
334 1.96 mrg struct evcnt ci_xpmsg_mutex_fail;
335 1.96 mrg struct evcnt ci_xpmsg_mutex_fail_call;
336 1.96 mrg struct evcnt ci_intrcnt[16];
337 1.96 mrg struct evcnt ci_sintrcnt[16];
338 1.96 mrg };
339 1.1 deraadt
340 1.1 deraadt /*
341 1.1 deraadt * definitions of cpu-dependent requirements
342 1.1 deraadt * referenced in generic code
343 1.1 deraadt */
344 1.97 mrg #define cpuinfo (*(struct cpu_info *)CPUINFO_VA)
345 1.39 thorpej #define curcpu() (cpuinfo.ci_self)
346 1.62 thorpej #define curlwp (cpuinfo.ci_curlwp)
347 1.40 thorpej #define CPU_IS_PRIMARY(ci) ((ci)->master)
348 1.37 thorpej
349 1.62 thorpej #define cpu_number() (cpuinfo.ci_cpuid)
350 1.97 mrg
351 1.97 mrg #endif /* _KERNEL || _KMEMUSER */
352 1.97 mrg
353 1.97 mrg /* Kernel only things. */
354 1.97 mrg #if defined(_KERNEL)
355 1.76 uwe void cpu_proc_fork(struct proc *, struct proc *);
356 1.35 thorpej
357 1.35 thorpej #if defined(MULTIPROCESSOR)
358 1.74 uwe void cpu_boot_secondary_processors(void);
359 1.35 thorpej #endif
360 1.1 deraadt
361 1.1 deraadt /*
362 1.60 pk * Arguments to hardclock, softclock and statclock encapsulate the
363 1.1 deraadt * previous machine state in an opaque clockframe. The ipl is here
364 1.1 deraadt * as well for strayintr (see locore.s:interrupt and intr.c:strayintr).
365 1.1 deraadt * Note that CLKF_INTR is valid only if CLKF_USERMODE is false.
366 1.1 deraadt */
367 1.1 deraadt struct clockframe {
368 1.1 deraadt u_int psr; /* psr before interrupt, excluding PSR_ET */
369 1.1 deraadt u_int pc; /* pc at interrupt */
370 1.1 deraadt u_int npc; /* npc at interrupt */
371 1.1 deraadt u_int ipl; /* actual interrupt priority level */
372 1.1 deraadt u_int fp; /* %fp at interrupt */
373 1.1 deraadt };
374 1.3 deraadt typedef struct clockframe clockframe;
375 1.1 deraadt
376 1.1 deraadt extern int eintstack[];
377 1.1 deraadt
378 1.1 deraadt #define CLKF_USERMODE(framep) (((framep)->psr & PSR_PS) == 0)
379 1.60 pk #define CLKF_LOPRI(framep,n) (((framep)->psr & PSR_PIL) < (n) << 8)
380 1.1 deraadt #define CLKF_PC(framep) ((framep)->pc)
381 1.30 pk #if defined(MULTIPROCESSOR)
382 1.30 pk #define CLKF_INTR(framep) \
383 1.30 pk ((framep)->fp > (u_int)cpuinfo.eintstack - INT_STACK_SIZE && \
384 1.30 pk (framep)->fp < (u_int)cpuinfo.eintstack)
385 1.30 pk #else
386 1.1 deraadt #define CLKF_INTR(framep) ((framep)->fp < (u_int)eintstack)
387 1.30 pk #endif
388 1.21 pk
389 1.83 ad void sparc_softintr_init(void);
390 1.1 deraadt
391 1.1 deraadt /*
392 1.57 pk * Preempt the current process on the target CPU if in interrupt from
393 1.57 pk * user mode, or after the current trap/syscall if in system mode.
394 1.1 deraadt */
395 1.81 yamt #define cpu_need_resched(ci, flags) do { \
396 1.95 christos __USE(flags); \
397 1.81 yamt (ci)->ci_want_resched = 1; \
398 1.81 yamt (ci)->ci_want_ast = 1; \
399 1.57 pk \
400 1.57 pk /* Just interrupt the target CPU, so it can notice its AST */ \
401 1.81 yamt if (((flags) & RESCHED_IMMED) || (ci)->ci_cpuid != cpu_number()) \
402 1.57 pk XCALL0(sparc_noop, 1U << (ci)->ci_cpuid); \
403 1.74 uwe } while (/*CONSTCOND*/0)
404 1.1 deraadt
405 1.1 deraadt /*
406 1.1 deraadt * Give a profiling tick to the current process when the user profiling
407 1.21 pk * buffer pages are invalid. On the sparc, request an ast to send us
408 1.1 deraadt * through trap(), marking the proc as needing a profiling tick.
409 1.1 deraadt */
410 1.81 yamt #define cpu_need_proftick(l) ((l)->l_pflag |= LP_OWEUPC, cpuinfo.ci_want_ast = 1)
411 1.1 deraadt
412 1.1 deraadt /*
413 1.1 deraadt * Notify the current process (p) that it has a signal pending,
414 1.1 deraadt * process as soon as possible.
415 1.1 deraadt */
416 1.78 ad #define cpu_signotify(l) do { \
417 1.85 mrg (l)->l_cpu->ci_want_ast = 1; \
418 1.78 ad \
419 1.78 ad /* Just interrupt the target CPU, so it can notice its AST */ \
420 1.85 mrg if ((l)->l_cpu->ci_cpuid != cpu_number()) \
421 1.85 mrg XCALL0(sparc_noop, 1U << (l)->l_cpu->ci_cpuid); \
422 1.78 ad } while (/*CONSTCOND*/0)
423 1.51 pk
424 1.51 pk /* CPU architecture version */
425 1.51 pk extern int cpu_arch;
426 1.1 deraadt
427 1.28 pk /* Number of CPUs in the system */
428 1.72 briggs extern int sparc_ncpus;
429 1.1 deraadt
430 1.91 martin /* Provide %pc of a lwp */
431 1.91 martin #define LWP_PC(l) ((l)->l_md.md_tf->tf_pc)
432 1.91 martin
433 1.1 deraadt /*
434 1.1 deraadt * Interrupt handler chains. Interrupt handlers should return 0 for
435 1.1 deraadt * ``not me'' or 1 (``I took care of it''). intr_establish() inserts a
436 1.1 deraadt * handler into the list. The handler is called with its (single)
437 1.1 deraadt * argument, or with a pointer to a clockframe if ih_arg is NULL.
438 1.88 mrg *
439 1.88 mrg * realfun/realarg are used to chain callers, usually with the
440 1.88 mrg * biglock wrapper.
441 1.1 deraadt */
442 1.25 tv extern struct intrhand {
443 1.54 pk int (*ih_fun)(void *);
444 1.1 deraadt void *ih_arg;
445 1.1 deraadt struct intrhand *ih_next;
446 1.52 pk int ih_classipl;
447 1.88 mrg int (*ih_realfun)(void *);
448 1.88 mrg void *ih_realarg;
449 1.1 deraadt } *intrhand[15];
450 1.1 deraadt
451 1.89 mrg void intr_establish(int, int, struct intrhand *, void (*)(void), bool);
452 1.74 uwe void intr_disestablish(int, struct intrhand *);
453 1.1 deraadt
454 1.54 pk void intr_lock_kernel(void);
455 1.54 pk void intr_unlock_kernel(void);
456 1.19 christos
457 1.19 christos /* disksubr.c */
458 1.19 christos struct dkbad;
459 1.74 uwe int isbad(struct dkbad *, int, int, int);
460 1.74 uwe
461 1.19 christos /* machdep.c */
462 1.80 christos int ldcontrolb(void *);
463 1.54 pk void dumpconf(void);
464 1.80 christos void * reserve_dumppages(void *);
465 1.74 uwe void wcopy(const void *, void *, u_int);
466 1.74 uwe void wzero(void *, u_int);
467 1.74 uwe
468 1.19 christos /* clock.c */
469 1.19 christos struct timeval;
470 1.54 pk void lo_microtime(struct timeval *);
471 1.56 pk void schedintr(void *);
472 1.74 uwe
473 1.19 christos /* locore.s */
474 1.19 christos struct fpstate;
475 1.90 mrg void ipi_savefpstate(struct fpstate *);
476 1.54 pk void savefpstate(struct fpstate *);
477 1.54 pk void loadfpstate(struct fpstate *);
478 1.80 christos int probeget(void *, int);
479 1.54 pk void write_all_windows(void);
480 1.54 pk void write_user_windows(void);
481 1.81 yamt void lwp_trampoline(void);
482 1.19 christos struct pcb;
483 1.54 pk void snapshot(struct pcb *);
484 1.54 pk struct frame *getfp(void);
485 1.80 christos int xldcontrolb(void *, struct pcb *);
486 1.54 pk void copywords(const void *, void *, size_t);
487 1.54 pk void qcopy(const void *, void *, size_t);
488 1.54 pk void qzero(void *, size_t);
489 1.74 uwe
490 1.19 christos /* trap.c */
491 1.92 martin void cpu_vmspace_exec(struct lwp *, vaddr_t, vaddr_t);
492 1.62 thorpej int rwindow_save(struct lwp *);
493 1.74 uwe
494 1.19 christos /* cons.c */
495 1.54 pk int cnrom(void);
496 1.74 uwe
497 1.19 christos /* zs.c */
498 1.54 pk void zsconsole(struct tty *, int, int, void (**)(struct tty *, int));
499 1.19 christos #ifdef KGDB
500 1.54 pk void zs_kgdb_init(void);
501 1.19 christos #endif
502 1.74 uwe
503 1.19 christos /* fb.c */
504 1.54 pk void fb_unblank(void);
505 1.74 uwe
506 1.19 christos /* kgdb_stub.c */
507 1.19 christos #ifdef KGDB
508 1.54 pk void kgdb_attach(int (*)(void *), void (*)(void *, int), void *);
509 1.54 pk void kgdb_connect(int);
510 1.54 pk void kgdb_panic(void);
511 1.19 christos #endif
512 1.74 uwe
513 1.24 pk /* emul.c */
514 1.24 pk struct trapframe;
515 1.94 martin int fixalign(struct lwp *, struct trapframe *, void **);
516 1.54 pk int emulinstr(int, struct trapframe *);
517 1.74 uwe
518 1.28 pk /* cpu.c */
519 1.54 pk void mp_pause_cpus(void);
520 1.54 pk void mp_resume_cpus(void);
521 1.54 pk void mp_halt_cpus(void);
522 1.61 pk #ifdef DDB
523 1.61 pk void mp_pause_cpus_ddb(void);
524 1.61 pk void mp_resume_cpus_ddb(void);
525 1.61 pk #endif
526 1.74 uwe
527 1.63 pk /* intr.c */
528 1.63 pk u_int setitr(u_int);
529 1.63 pk u_int getitr(void);
530 1.4 deraadt
531 1.74 uwe
532 1.4 deraadt /*
533 1.4 deraadt *
534 1.4 deraadt * The SPARC has a Trap Base Register (TBR) which holds the upper 20 bits
535 1.4 deraadt * of the trap vector table. The next eight bits are supplied by the
536 1.4 deraadt * hardware when the trap occurs, and the bottom four bits are always
537 1.4 deraadt * zero (so that we can shove up to 16 bytes of executable code---exactly
538 1.4 deraadt * four instructions---into each trap vector).
539 1.4 deraadt *
540 1.4 deraadt * The hardware allocates half the trap vectors to hardware and half to
541 1.4 deraadt * software.
542 1.4 deraadt *
543 1.4 deraadt * Traps have priorities assigned (lower number => higher priority).
544 1.4 deraadt */
545 1.4 deraadt
546 1.4 deraadt struct trapvec {
547 1.4 deraadt int tv_instr[4]; /* the four instructions */
548 1.4 deraadt };
549 1.74 uwe
550 1.21 pk extern struct trapvec *trapbase; /* the 256 vectors */
551 1.11 deraadt
552 1.12 jtc #endif /* _KERNEL */
553 1.1 deraadt #endif /* _CPU_H_ */
554