cpu.h revision 1.110 1 1.110 ryo /* $NetBSD: cpu.h,v 1.110 2021/08/14 17:51:19 ryo Exp $ */
2 1.10 deraadt
3 1.1 deraadt /*
4 1.1 deraadt * Copyright (c) 1992, 1993
5 1.1 deraadt * The Regents of the University of California. All rights reserved.
6 1.1 deraadt *
7 1.1 deraadt * This software was developed by the Computer Systems Engineering group
8 1.1 deraadt * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9 1.1 deraadt * contributed to Berkeley.
10 1.1 deraadt *
11 1.1 deraadt * All advertising materials mentioning features or use of this software
12 1.1 deraadt * must display the following acknowledgement:
13 1.1 deraadt * This product includes software developed by the University of
14 1.1 deraadt * California, Lawrence Berkeley Laboratory.
15 1.1 deraadt *
16 1.1 deraadt * Redistribution and use in source and binary forms, with or without
17 1.1 deraadt * modification, are permitted provided that the following conditions
18 1.1 deraadt * are met:
19 1.1 deraadt * 1. Redistributions of source code must retain the above copyright
20 1.1 deraadt * notice, this list of conditions and the following disclaimer.
21 1.1 deraadt * 2. Redistributions in binary form must reproduce the above copyright
22 1.1 deraadt * notice, this list of conditions and the following disclaimer in the
23 1.1 deraadt * documentation and/or other materials provided with the distribution.
24 1.64 agc * 3. Neither the name of the University nor the names of its contributors
25 1.1 deraadt * may be used to endorse or promote products derived from this software
26 1.1 deraadt * without specific prior written permission.
27 1.1 deraadt *
28 1.1 deraadt * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 1.1 deraadt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 1.1 deraadt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 1.1 deraadt * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 1.1 deraadt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 1.1 deraadt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 1.1 deraadt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 1.1 deraadt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 1.1 deraadt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 1.1 deraadt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 1.1 deraadt * SUCH DAMAGE.
39 1.1 deraadt *
40 1.7 deraadt * @(#)cpu.h 8.4 (Berkeley) 1/5/94
41 1.1 deraadt */
42 1.1 deraadt
43 1.1 deraadt #ifndef _CPU_H_
44 1.1 deraadt #define _CPU_H_
45 1.1 deraadt
46 1.1 deraadt /*
47 1.21 pk * CTL_MACHDEP definitions.
48 1.1 deraadt */
49 1.34 pk #define CPU_BOOTED_KERNEL 1 /* string: booted kernel name */
50 1.49 darrenr #define CPU_BOOTED_DEVICE 2 /* string: device booted from */
51 1.49 darrenr #define CPU_BOOT_ARGS 3 /* string: args booted with */
52 1.50 pk #define CPU_ARCH 4 /* integer: cpu architecture version */
53 1.1 deraadt
54 1.1 deraadt /*
55 1.1 deraadt * Exported definitions unique to SPARC cpu support.
56 1.1 deraadt */
57 1.1 deraadt
58 1.100 mrg /*
59 1.100 mrg * Sun-4 and Sun-4c virtual address cache.
60 1.100 mrg *
61 1.100 mrg * Sun-4 virtual caches come in two flavors, write-through (Sun-4c)
62 1.100 mrg * and write-back (Sun-4). The write-back caches are much faster
63 1.100 mrg * but require a bit more care.
64 1.100 mrg *
65 1.100 mrg * This is exported via sysctl so be careful changing it.
66 1.100 mrg */
67 1.100 mrg enum vactype { VAC_UNKNOWN, VAC_NONE, VAC_WRITETHROUGH, VAC_WRITEBACK };
68 1.100 mrg
69 1.100 mrg /*
70 1.100 mrg * Cache control information.
71 1.100 mrg *
72 1.100 mrg * This is exported via sysctl so be careful changing it.
73 1.100 mrg */
74 1.100 mrg
75 1.100 mrg struct cacheinfo {
76 1.100 mrg int c_totalsize; /* total size, in bytes */
77 1.100 mrg /* if split, MAX(icache,dcache) */
78 1.100 mrg int c_enabled; /* true => cache is enabled */
79 1.100 mrg int c_hwflush; /* true => have hardware flush */
80 1.100 mrg int c_linesize; /* line size, in bytes */
81 1.100 mrg /* if split, MIN(icache,dcache) */
82 1.100 mrg int c_l2linesize; /* log2(linesize) */
83 1.100 mrg int c_nlines; /* precomputed # of lines to flush */
84 1.100 mrg int c_physical; /* true => cache has physical
85 1.100 mrg address tags */
86 1.100 mrg int c_associativity; /* # of "buckets" in cache line */
87 1.100 mrg int c_split; /* true => cache is split */
88 1.100 mrg
89 1.100 mrg int ic_totalsize; /* instruction cache */
90 1.100 mrg int ic_enabled;
91 1.100 mrg int ic_linesize;
92 1.100 mrg int ic_l2linesize;
93 1.100 mrg int ic_nlines;
94 1.100 mrg int ic_associativity;
95 1.100 mrg
96 1.100 mrg int dc_totalsize; /* data cache */
97 1.100 mrg int dc_enabled;
98 1.100 mrg int dc_linesize;
99 1.100 mrg int dc_l2linesize;
100 1.100 mrg int dc_nlines;
101 1.100 mrg int dc_associativity;
102 1.100 mrg
103 1.100 mrg int ec_totalsize; /* external cache info */
104 1.100 mrg int ec_enabled;
105 1.100 mrg int ec_linesize;
106 1.100 mrg int ec_l2linesize;
107 1.100 mrg int ec_nlines;
108 1.100 mrg int ec_associativity;
109 1.100 mrg
110 1.100 mrg enum vactype c_vactype;
111 1.100 mrg
112 1.100 mrg int c_flags;
113 1.100 mrg #define CACHE_PAGETABLES 0x1 /* caching pagetables OK on (sun4m) */
114 1.100 mrg #define CACHE_TRAPPAGEBUG 0x2 /* trap page can't be cached (sun4) */
115 1.100 mrg #define CACHE_MANDATORY 0x4 /* if cache is on, don't use
116 1.100 mrg uncached access */
117 1.100 mrg };
118 1.100 mrg
119 1.97 mrg /* Things needed by crash or the kernel */
120 1.97 mrg #if defined(_KERNEL) || defined(_KMEMUSER)
121 1.97 mrg
122 1.65 tsutsui #if defined(_KERNEL_OPT)
123 1.110 ryo #include "opt_gprof.h"
124 1.35 thorpej #include "opt_multiprocessor.h"
125 1.36 thorpej #include "opt_lockdebug.h"
126 1.47 darrenr #include "opt_sparc_arch.h"
127 1.35 thorpej #endif
128 1.35 thorpej
129 1.96 mrg #include <sys/cpu_data.h>
130 1.96 mrg #include <sys/evcnt.h>
131 1.96 mrg
132 1.52 pk #include <machine/intr.h>
133 1.1 deraadt #include <machine/psl.h>
134 1.96 mrg
135 1.96 mrg #if defined(_KERNEL)
136 1.36 thorpej #include <sparc/sparc/cpuvar.h>
137 1.1 deraadt #include <sparc/sparc/intreg.h>
138 1.96 mrg #endif
139 1.96 mrg
140 1.97 mrg struct trapframe;
141 1.97 mrg
142 1.96 mrg /*
143 1.96 mrg * Message structure for Inter Processor Communication in MP systems
144 1.96 mrg */
145 1.96 mrg struct xpmsg {
146 1.96 mrg volatile int tag;
147 1.96 mrg #define XPMSG15_PAUSECPU 1
148 1.96 mrg #define XPMSG_FUNC 4
149 1.96 mrg #define XPMSG_FTRP 5
150 1.96 mrg
151 1.96 mrg volatile union {
152 1.96 mrg /*
153 1.96 mrg * Cross call: ask to run (*func)(arg0,arg1,arg2)
154 1.96 mrg * or (*trap)(arg0,arg1,arg2). `trap' should be the
155 1.96 mrg * address of a `fast trap' handler that executes in
156 1.96 mrg * the trap window (see locore.s).
157 1.96 mrg */
158 1.96 mrg struct xpmsg_func {
159 1.96 mrg void (*func)(int, int, int);
160 1.96 mrg void (*trap)(int, int, int);
161 1.96 mrg int arg0;
162 1.96 mrg int arg1;
163 1.96 mrg int arg2;
164 1.96 mrg } xpmsg_func;
165 1.96 mrg } u;
166 1.96 mrg volatile int received;
167 1.96 mrg volatile int complete;
168 1.96 mrg };
169 1.96 mrg
170 1.96 mrg /*
171 1.96 mrg * The cpuinfo structure. This structure maintains information about one
172 1.96 mrg * currently installed CPU (there may be several of these if the machine
173 1.96 mrg * supports multiple CPUs, as on some Sun4m architectures). The information
174 1.96 mrg * in this structure supersedes the old "cpumod", "mmumod", and similar
175 1.96 mrg * fields.
176 1.96 mrg */
177 1.96 mrg
178 1.96 mrg struct cpu_info {
179 1.96 mrg /*
180 1.96 mrg * Primary Inter-processor message area. Keep this aligned
181 1.96 mrg * to a cache line boundary if possible, as the structure
182 1.100 mrg * itself is one or less (32/64 byte) cache-line.
183 1.96 mrg */
184 1.100 mrg struct xpmsg msg __aligned(64);
185 1.96 mrg
186 1.96 mrg /* Scheduler flags */
187 1.96 mrg int ci_want_ast;
188 1.96 mrg int ci_want_resched;
189 1.96 mrg
190 1.96 mrg /*
191 1.96 mrg * SPARC cpu_info structures live at two VAs: one global
192 1.96 mrg * VA (so each CPU can access any other CPU's cpu_info)
193 1.96 mrg * and an alias VA CPUINFO_VA which is the same on each
194 1.96 mrg * CPU and maps to that CPU's cpu_info. Since the alias
195 1.96 mrg * CPUINFO_VA is how we locate our cpu_info, we have to
196 1.96 mrg * self-reference the global VA so that we can return it
197 1.96 mrg * in the curcpu() macro.
198 1.96 mrg */
199 1.96 mrg struct cpu_info * volatile ci_self;
200 1.96 mrg
201 1.96 mrg int ci_cpuid; /* CPU index (see cpus[] array) */
202 1.96 mrg
203 1.96 mrg /* Context administration */
204 1.96 mrg int *ctx_tbl; /* [4m] SRMMU-edible context table */
205 1.96 mrg paddr_t ctx_tbl_pa; /* [4m] ctx table physical address */
206 1.96 mrg
207 1.96 mrg /* Cache information */
208 1.100 mrg struct cacheinfo cacheinfo; /* see above */
209 1.96 mrg
210 1.96 mrg /* various flags to workaround anomalies in chips */
211 1.96 mrg volatile int flags; /* see CPUFLG_xxx, below */
212 1.96 mrg
213 1.96 mrg /* Per processor counter register (sun4m only) */
214 1.96 mrg volatile struct counter_4m *counterreg_4m;
215 1.96 mrg
216 1.96 mrg /* Per processor interrupt mask register (sun4m only) */
217 1.96 mrg volatile struct icr_pi *intreg_4m;
218 1.96 mrg /*
219 1.96 mrg * Send a IPI to (cpi). For Ross cpus we need to read
220 1.96 mrg * the pending register to avoid a hardware bug.
221 1.96 mrg */
222 1.96 mrg #define raise_ipi(cpi,lvl) do { \
223 1.99 macallan volatile int x; \
224 1.96 mrg (cpi)->intreg_4m->pi_set = PINTR_SINTRLEV(lvl); \
225 1.96 mrg x = (cpi)->intreg_4m->pi_pend; __USE(x); \
226 1.96 mrg } while (0)
227 1.96 mrg
228 1.96 mrg int sun4_mmu3l; /* [4]: 3-level MMU present */
229 1.96 mrg #if defined(SUN4_MMU3L)
230 1.96 mrg #define HASSUN4_MMU3L (cpuinfo.sun4_mmu3l)
231 1.96 mrg #else
232 1.96 mrg #define HASSUN4_MMU3L (0)
233 1.96 mrg #endif
234 1.96 mrg int ci_idepth; /* Interrupt depth */
235 1.96 mrg
236 1.96 mrg /*
237 1.96 mrg * The following pointers point to processes that are somehow
238 1.96 mrg * associated with this CPU--running on it, using its FPU,
239 1.96 mrg * etc.
240 1.96 mrg */
241 1.96 mrg struct lwp *ci_curlwp; /* CPU owner */
242 1.103 ad struct lwp *ci_onproc; /* current user LWP / kthread */
243 1.96 mrg struct lwp *fplwp; /* FPU owner */
244 1.96 mrg
245 1.96 mrg int ci_mtx_count;
246 1.96 mrg int ci_mtx_oldspl;
247 1.96 mrg
248 1.96 mrg /*
249 1.96 mrg * Idle PCB and Interrupt stack;
250 1.96 mrg */
251 1.96 mrg void *eintstack; /* End of interrupt stack */
252 1.96 mrg #define INT_STACK_SIZE (128 * 128) /* 128 128-byte stack frames */
253 1.96 mrg void *redzone; /* DEBUG: stack red zone */
254 1.96 mrg #define REDSIZE (8*96) /* some room for bouncing */
255 1.96 mrg
256 1.96 mrg struct pcb *curpcb; /* CPU's PCB & kernel stack */
257 1.96 mrg
258 1.96 mrg /* locore defined: */
259 1.96 mrg void (*get_syncflt)(void); /* Not C-callable */
260 1.96 mrg int (*get_asyncflt)(u_int *, u_int *);
261 1.96 mrg
262 1.96 mrg /* Synchronous Fault Status; temporary storage */
263 1.96 mrg struct {
264 1.96 mrg int sfsr;
265 1.96 mrg int sfva;
266 1.96 mrg } syncfltdump;
267 1.96 mrg
268 1.96 mrg /*
269 1.96 mrg * Cache handling functions.
270 1.96 mrg * Most cache flush function come in two flavours: one that
271 1.96 mrg * acts only on the CPU it executes on, and another that
272 1.96 mrg * uses inter-processor signals to flush the cache on
273 1.96 mrg * all processor modules.
274 1.96 mrg * The `ft_' versions are fast trap cache flush handlers.
275 1.96 mrg */
276 1.96 mrg void (*cache_flush)(void *, u_int);
277 1.96 mrg void (*vcache_flush_page)(int, int);
278 1.96 mrg void (*sp_vcache_flush_page)(int, int);
279 1.96 mrg void (*ft_vcache_flush_page)(int, int);
280 1.96 mrg void (*vcache_flush_segment)(int, int, int);
281 1.96 mrg void (*sp_vcache_flush_segment)(int, int, int);
282 1.96 mrg void (*ft_vcache_flush_segment)(int, int, int);
283 1.96 mrg void (*vcache_flush_region)(int, int);
284 1.96 mrg void (*sp_vcache_flush_region)(int, int);
285 1.96 mrg void (*ft_vcache_flush_region)(int, int);
286 1.96 mrg void (*vcache_flush_context)(int);
287 1.96 mrg void (*sp_vcache_flush_context)(int);
288 1.96 mrg void (*ft_vcache_flush_context)(int);
289 1.96 mrg
290 1.96 mrg /* The are helpers for (*cache_flush)() */
291 1.96 mrg void (*sp_vcache_flush_range)(int, int, int);
292 1.96 mrg void (*ft_vcache_flush_range)(int, int, int);
293 1.96 mrg
294 1.96 mrg void (*pcache_flush_page)(paddr_t, int);
295 1.96 mrg void (*pure_vcache_flush)(void);
296 1.96 mrg void (*cache_flush_all)(void);
297 1.96 mrg
298 1.96 mrg /* Support for hardware-assisted page clear/copy */
299 1.96 mrg void (*zero_page)(paddr_t);
300 1.96 mrg void (*copy_page)(paddr_t, paddr_t);
301 1.96 mrg
302 1.96 mrg /* Virtual addresses for use in pmap copy_page/zero_page */
303 1.96 mrg void * vpage[2];
304 1.96 mrg int *vpage_pte[2]; /* pte location of vpage[] */
305 1.96 mrg
306 1.96 mrg void (*cache_enable)(void);
307 1.96 mrg
308 1.96 mrg int cpu_type; /* Type: see CPUTYP_xxx below */
309 1.96 mrg
310 1.96 mrg /* Inter-processor message area (high priority but used infrequently) */
311 1.96 mrg struct xpmsg msg_lev15;
312 1.96 mrg
313 1.96 mrg /* CPU information */
314 1.96 mrg int node; /* PROM node for this CPU */
315 1.96 mrg int mid; /* Module ID for MP systems */
316 1.96 mrg int mbus; /* 1 if CPU is on MBus */
317 1.96 mrg int mxcc; /* 1 if a MBus-level MXCC is present */
318 1.96 mrg const char *cpu_longname; /* CPU model */
319 1.96 mrg int cpu_impl; /* CPU implementation code */
320 1.96 mrg int cpu_vers; /* CPU version code */
321 1.96 mrg int mmu_impl; /* MMU implementation code */
322 1.96 mrg int mmu_vers; /* MMU version code */
323 1.96 mrg int master; /* 1 if this is bootup CPU */
324 1.96 mrg
325 1.96 mrg vaddr_t mailbox; /* VA of CPU's mailbox */
326 1.96 mrg
327 1.96 mrg int mmu_ncontext; /* Number of contexts supported */
328 1.96 mrg int mmu_nregion; /* Number of regions supported */
329 1.96 mrg int mmu_nsegment; /* [4/4c] Segments */
330 1.96 mrg int mmu_npmeg; /* [4/4c] Pmegs */
331 1.96 mrg
332 1.96 mrg /* XXX - we currently don't actually use the following */
333 1.96 mrg int arch; /* Architecture: CPU_SUN4x */
334 1.96 mrg int class; /* Class: SuperSPARC, microSPARC... */
335 1.96 mrg int classlvl; /* Iteration in class: 1, 2, etc. */
336 1.96 mrg int classsublvl; /* stepping in class (version) */
337 1.96 mrg
338 1.96 mrg int hz; /* Clock speed */
339 1.96 mrg
340 1.96 mrg /* FPU information */
341 1.96 mrg int fpupresent; /* true if FPU is present */
342 1.96 mrg int fpuvers; /* FPU revision */
343 1.96 mrg const char *fpu_name; /* FPU model */
344 1.96 mrg char fpu_namebuf[32];/* Buffer for FPU name, if necessary */
345 1.96 mrg
346 1.96 mrg /* XXX */
347 1.96 mrg volatile void *ci_ddb_regs; /* DDB regs */
348 1.96 mrg
349 1.96 mrg /*
350 1.96 mrg * The following are function pointers to do interesting CPU-dependent
351 1.96 mrg * things without having to do type-tests all the time
352 1.96 mrg */
353 1.96 mrg
354 1.96 mrg /* bootup things: access to physical memory */
355 1.96 mrg u_int (*read_physmem)(u_int addr, int space);
356 1.96 mrg void (*write_physmem)(u_int addr, u_int data);
357 1.96 mrg void (*cache_tablewalks)(void);
358 1.96 mrg void (*mmu_enable)(void);
359 1.96 mrg void (*hotfix)(struct cpu_info *);
360 1.96 mrg
361 1.96 mrg
362 1.96 mrg #if 0
363 1.96 mrg /* hardware-assisted block operation routines */
364 1.96 mrg void (*hwbcopy)(const void *from, void *to, size_t len);
365 1.96 mrg void (*hwbzero)(void *buf, size_t len);
366 1.96 mrg
367 1.96 mrg /* routine to clear mbus-sbus buffers */
368 1.96 mrg void (*mbusflush)(void);
369 1.96 mrg #endif
370 1.96 mrg
371 1.96 mrg /*
372 1.96 mrg * Memory error handler; parity errors, unhandled NMIs and other
373 1.96 mrg * unrecoverable faults end up here.
374 1.96 mrg */
375 1.96 mrg void (*memerr)(unsigned, u_int, u_int, struct trapframe *);
376 1.104 ad void (*idlespin)(void);
377 1.96 mrg /* Module Control Registers */
378 1.96 mrg /*bus_space_handle_t*/ long ci_mbusport;
379 1.96 mrg /*bus_space_handle_t*/ long ci_mxccregs;
380 1.96 mrg
381 1.96 mrg u_int ci_tt; /* Last trap (if tracing) */
382 1.96 mrg
383 1.96 mrg /*
384 1.96 mrg * Start/End VA's of this cpu_info region; we upload the other pages
385 1.96 mrg * in this region that aren't part of the cpu_info to uvm.
386 1.96 mrg */
387 1.96 mrg vaddr_t ci_free_sva1, ci_free_eva1, ci_free_sva2, ci_free_eva2;
388 1.96 mrg
389 1.96 mrg struct evcnt ci_savefpstate;
390 1.96 mrg struct evcnt ci_savefpstate_null;
391 1.96 mrg struct evcnt ci_xpmsg_mutex_fail;
392 1.96 mrg struct evcnt ci_xpmsg_mutex_fail_call;
393 1.99 macallan struct evcnt ci_xpmsg_mutex_not_held;
394 1.99 macallan struct evcnt ci_xpmsg_bogus;
395 1.96 mrg struct evcnt ci_intrcnt[16];
396 1.96 mrg struct evcnt ci_sintrcnt[16];
397 1.108 ad
398 1.108 ad struct cpu_data ci_data; /* MI per-cpu data */
399 1.110 ryo
400 1.110 ryo #if defined(GPROF) && defined(MULTIPROCESSOR)
401 1.110 ryo struct gmonparam *ci_gmon; /* MI per-cpu GPROF */
402 1.110 ryo #endif
403 1.96 mrg };
404 1.1 deraadt
405 1.105 christos #endif /* _KERNEL || _KMEMUSER */
406 1.105 christos
407 1.105 christos /* Kernel only things. */
408 1.105 christos #if defined(_KERNEL)
409 1.105 christos
410 1.1 deraadt /*
411 1.1 deraadt * definitions of cpu-dependent requirements
412 1.1 deraadt * referenced in generic code
413 1.1 deraadt */
414 1.97 mrg #define cpuinfo (*(struct cpu_info *)CPUINFO_VA)
415 1.39 thorpej #define curcpu() (cpuinfo.ci_self)
416 1.62 thorpej #define curlwp (cpuinfo.ci_curlwp)
417 1.40 thorpej #define CPU_IS_PRIMARY(ci) ((ci)->master)
418 1.37 thorpej
419 1.62 thorpej #define cpu_number() (cpuinfo.ci_cpuid)
420 1.97 mrg
421 1.76 uwe void cpu_proc_fork(struct proc *, struct proc *);
422 1.35 thorpej
423 1.35 thorpej #if defined(MULTIPROCESSOR)
424 1.74 uwe void cpu_boot_secondary_processors(void);
425 1.35 thorpej #endif
426 1.1 deraadt
427 1.1 deraadt /*
428 1.60 pk * Arguments to hardclock, softclock and statclock encapsulate the
429 1.1 deraadt * previous machine state in an opaque clockframe. The ipl is here
430 1.1 deraadt * as well for strayintr (see locore.s:interrupt and intr.c:strayintr).
431 1.1 deraadt * Note that CLKF_INTR is valid only if CLKF_USERMODE is false.
432 1.1 deraadt */
433 1.1 deraadt struct clockframe {
434 1.1 deraadt u_int psr; /* psr before interrupt, excluding PSR_ET */
435 1.1 deraadt u_int pc; /* pc at interrupt */
436 1.1 deraadt u_int npc; /* npc at interrupt */
437 1.1 deraadt u_int ipl; /* actual interrupt priority level */
438 1.1 deraadt u_int fp; /* %fp at interrupt */
439 1.1 deraadt };
440 1.3 deraadt typedef struct clockframe clockframe;
441 1.1 deraadt
442 1.1 deraadt extern int eintstack[];
443 1.1 deraadt
444 1.1 deraadt #define CLKF_USERMODE(framep) (((framep)->psr & PSR_PS) == 0)
445 1.60 pk #define CLKF_LOPRI(framep,n) (((framep)->psr & PSR_PIL) < (n) << 8)
446 1.1 deraadt #define CLKF_PC(framep) ((framep)->pc)
447 1.30 pk #if defined(MULTIPROCESSOR)
448 1.30 pk #define CLKF_INTR(framep) \
449 1.30 pk ((framep)->fp > (u_int)cpuinfo.eintstack - INT_STACK_SIZE && \
450 1.30 pk (framep)->fp < (u_int)cpuinfo.eintstack)
451 1.30 pk #else
452 1.1 deraadt #define CLKF_INTR(framep) ((framep)->fp < (u_int)eintstack)
453 1.30 pk #endif
454 1.21 pk
455 1.83 ad void sparc_softintr_init(void);
456 1.1 deraadt
457 1.1 deraadt /*
458 1.57 pk * Preempt the current process on the target CPU if in interrupt from
459 1.57 pk * user mode, or after the current trap/syscall if in system mode.
460 1.1 deraadt */
461 1.102 ad #define cpu_need_resched(ci, l, flags) do { \
462 1.95 christos __USE(flags); \
463 1.81 yamt (ci)->ci_want_ast = 1; \
464 1.57 pk \
465 1.57 pk /* Just interrupt the target CPU, so it can notice its AST */ \
466 1.102 ad if ((flags & RESCHED_REMOTE) != 0) \
467 1.57 pk XCALL0(sparc_noop, 1U << (ci)->ci_cpuid); \
468 1.74 uwe } while (/*CONSTCOND*/0)
469 1.1 deraadt
470 1.1 deraadt /*
471 1.1 deraadt * Give a profiling tick to the current process when the user profiling
472 1.21 pk * buffer pages are invalid. On the sparc, request an ast to send us
473 1.1 deraadt * through trap(), marking the proc as needing a profiling tick.
474 1.1 deraadt */
475 1.81 yamt #define cpu_need_proftick(l) ((l)->l_pflag |= LP_OWEUPC, cpuinfo.ci_want_ast = 1)
476 1.1 deraadt
477 1.1 deraadt /*
478 1.1 deraadt * Notify the current process (p) that it has a signal pending,
479 1.1 deraadt * process as soon as possible.
480 1.1 deraadt */
481 1.78 ad #define cpu_signotify(l) do { \
482 1.85 mrg (l)->l_cpu->ci_want_ast = 1; \
483 1.78 ad \
484 1.78 ad /* Just interrupt the target CPU, so it can notice its AST */ \
485 1.85 mrg if ((l)->l_cpu->ci_cpuid != cpu_number()) \
486 1.85 mrg XCALL0(sparc_noop, 1U << (l)->l_cpu->ci_cpuid); \
487 1.78 ad } while (/*CONSTCOND*/0)
488 1.51 pk
489 1.51 pk /* CPU architecture version */
490 1.51 pk extern int cpu_arch;
491 1.1 deraadt
492 1.28 pk /* Number of CPUs in the system */
493 1.72 briggs extern int sparc_ncpus;
494 1.1 deraadt
495 1.91 martin /* Provide %pc of a lwp */
496 1.91 martin #define LWP_PC(l) ((l)->l_md.md_tf->tf_pc)
497 1.91 martin
498 1.109 mrg /* Hardware cross-call mutex */
499 1.109 mrg extern kmutex_t xpmsg_mutex;
500 1.109 mrg
501 1.1 deraadt /*
502 1.1 deraadt * Interrupt handler chains. Interrupt handlers should return 0 for
503 1.1 deraadt * ``not me'' or 1 (``I took care of it''). intr_establish() inserts a
504 1.1 deraadt * handler into the list. The handler is called with its (single)
505 1.1 deraadt * argument, or with a pointer to a clockframe if ih_arg is NULL.
506 1.88 mrg *
507 1.88 mrg * realfun/realarg are used to chain callers, usually with the
508 1.88 mrg * biglock wrapper.
509 1.1 deraadt */
510 1.25 tv extern struct intrhand {
511 1.54 pk int (*ih_fun)(void *);
512 1.1 deraadt void *ih_arg;
513 1.1 deraadt struct intrhand *ih_next;
514 1.52 pk int ih_classipl;
515 1.88 mrg int (*ih_realfun)(void *);
516 1.88 mrg void *ih_realarg;
517 1.1 deraadt } *intrhand[15];
518 1.1 deraadt
519 1.89 mrg void intr_establish(int, int, struct intrhand *, void (*)(void), bool);
520 1.74 uwe void intr_disestablish(int, struct intrhand *);
521 1.1 deraadt
522 1.54 pk void intr_lock_kernel(void);
523 1.54 pk void intr_unlock_kernel(void);
524 1.19 christos
525 1.19 christos /* disksubr.c */
526 1.19 christos struct dkbad;
527 1.74 uwe int isbad(struct dkbad *, int, int, int);
528 1.74 uwe
529 1.19 christos /* machdep.c */
530 1.80 christos int ldcontrolb(void *);
531 1.80 christos void * reserve_dumppages(void *);
532 1.74 uwe void wcopy(const void *, void *, u_int);
533 1.74 uwe void wzero(void *, u_int);
534 1.74 uwe
535 1.19 christos /* clock.c */
536 1.19 christos struct timeval;
537 1.54 pk void lo_microtime(struct timeval *);
538 1.56 pk void schedintr(void *);
539 1.74 uwe
540 1.19 christos /* locore.s */
541 1.19 christos struct fpstate;
542 1.90 mrg void ipi_savefpstate(struct fpstate *);
543 1.54 pk void savefpstate(struct fpstate *);
544 1.54 pk void loadfpstate(struct fpstate *);
545 1.80 christos int probeget(void *, int);
546 1.54 pk void write_all_windows(void);
547 1.54 pk void write_user_windows(void);
548 1.81 yamt void lwp_trampoline(void);
549 1.19 christos struct pcb;
550 1.54 pk void snapshot(struct pcb *);
551 1.54 pk struct frame *getfp(void);
552 1.80 christos int xldcontrolb(void *, struct pcb *);
553 1.54 pk void copywords(const void *, void *, size_t);
554 1.54 pk void qcopy(const void *, void *, size_t);
555 1.54 pk void qzero(void *, size_t);
556 1.74 uwe
557 1.19 christos /* trap.c */
558 1.92 martin void cpu_vmspace_exec(struct lwp *, vaddr_t, vaddr_t);
559 1.62 thorpej int rwindow_save(struct lwp *);
560 1.74 uwe
561 1.19 christos /* cons.c */
562 1.54 pk int cnrom(void);
563 1.74 uwe
564 1.19 christos /* zs.c */
565 1.54 pk void zsconsole(struct tty *, int, int, void (**)(struct tty *, int));
566 1.19 christos #ifdef KGDB
567 1.54 pk void zs_kgdb_init(void);
568 1.19 christos #endif
569 1.74 uwe
570 1.19 christos /* fb.c */
571 1.54 pk void fb_unblank(void);
572 1.74 uwe
573 1.19 christos /* kgdb_stub.c */
574 1.19 christos #ifdef KGDB
575 1.54 pk void kgdb_attach(int (*)(void *), void (*)(void *, int), void *);
576 1.54 pk void kgdb_connect(int);
577 1.54 pk void kgdb_panic(void);
578 1.19 christos #endif
579 1.74 uwe
580 1.24 pk /* emul.c */
581 1.24 pk struct trapframe;
582 1.94 martin int fixalign(struct lwp *, struct trapframe *, void **);
583 1.54 pk int emulinstr(int, struct trapframe *);
584 1.74 uwe
585 1.28 pk /* cpu.c */
586 1.54 pk void mp_pause_cpus(void);
587 1.54 pk void mp_resume_cpus(void);
588 1.54 pk void mp_halt_cpus(void);
589 1.61 pk #ifdef DDB
590 1.61 pk void mp_pause_cpus_ddb(void);
591 1.61 pk void mp_resume_cpus_ddb(void);
592 1.61 pk #endif
593 1.74 uwe
594 1.63 pk /* intr.c */
595 1.63 pk u_int setitr(u_int);
596 1.63 pk u_int getitr(void);
597 1.4 deraadt
598 1.74 uwe
599 1.4 deraadt /*
600 1.4 deraadt *
601 1.4 deraadt * The SPARC has a Trap Base Register (TBR) which holds the upper 20 bits
602 1.4 deraadt * of the trap vector table. The next eight bits are supplied by the
603 1.4 deraadt * hardware when the trap occurs, and the bottom four bits are always
604 1.4 deraadt * zero (so that we can shove up to 16 bytes of executable code---exactly
605 1.4 deraadt * four instructions---into each trap vector).
606 1.4 deraadt *
607 1.4 deraadt * The hardware allocates half the trap vectors to hardware and half to
608 1.4 deraadt * software.
609 1.4 deraadt *
610 1.4 deraadt * Traps have priorities assigned (lower number => higher priority).
611 1.4 deraadt */
612 1.4 deraadt
613 1.4 deraadt struct trapvec {
614 1.4 deraadt int tv_instr[4]; /* the four instructions */
615 1.4 deraadt };
616 1.74 uwe
617 1.21 pk extern struct trapvec *trapbase; /* the 256 vectors */
618 1.11 deraadt
619 1.12 jtc #endif /* _KERNEL */
620 1.1 deraadt #endif /* _CPU_H_ */
621