cpu.h revision 1.134 1 1.134 riastrad /* $NetBSD: cpu.h,v 1.134 2023/07/13 12:06:20 riastradh Exp $ */
2 1.1 eeh
3 1.1 eeh /*
4 1.1 eeh * Copyright (c) 1992, 1993
5 1.1 eeh * The Regents of the University of California. All rights reserved.
6 1.1 eeh *
7 1.1 eeh * This software was developed by the Computer Systems Engineering group
8 1.1 eeh * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9 1.1 eeh * contributed to Berkeley.
10 1.1 eeh *
11 1.1 eeh * All advertising materials mentioning features or use of this software
12 1.1 eeh * must display the following acknowledgement:
13 1.1 eeh * This product includes software developed by the University of
14 1.1 eeh * California, Lawrence Berkeley Laboratory.
15 1.1 eeh *
16 1.1 eeh * Redistribution and use in source and binary forms, with or without
17 1.1 eeh * modification, are permitted provided that the following conditions
18 1.1 eeh * are met:
19 1.1 eeh * 1. Redistributions of source code must retain the above copyright
20 1.1 eeh * notice, this list of conditions and the following disclaimer.
21 1.1 eeh * 2. Redistributions in binary form must reproduce the above copyright
22 1.1 eeh * notice, this list of conditions and the following disclaimer in the
23 1.1 eeh * documentation and/or other materials provided with the distribution.
24 1.36 agc * 3. Neither the name of the University nor the names of its contributors
25 1.1 eeh * may be used to endorse or promote products derived from this software
26 1.1 eeh * without specific prior written permission.
27 1.1 eeh *
28 1.1 eeh * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 1.1 eeh * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 1.1 eeh * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 1.1 eeh * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 1.1 eeh * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 1.1 eeh * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 1.1 eeh * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 1.1 eeh * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 1.1 eeh * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 1.1 eeh * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 1.1 eeh * SUCH DAMAGE.
39 1.1 eeh *
40 1.1 eeh * @(#)cpu.h 8.4 (Berkeley) 1/5/94
41 1.1 eeh */
42 1.1 eeh
43 1.1 eeh #ifndef _CPU_H_
44 1.1 eeh #define _CPU_H_
45 1.1 eeh
46 1.1 eeh /*
47 1.1 eeh * CTL_MACHDEP definitions.
48 1.1 eeh */
49 1.13 eeh #define CPU_BOOTED_KERNEL 1 /* string: booted kernel name */
50 1.33 pk #define CPU_BOOTED_DEVICE 2 /* string: device booted from */
51 1.33 pk #define CPU_BOOT_ARGS 3 /* string: args booted with */
52 1.33 pk #define CPU_ARCH 4 /* integer: cpu architecture version */
53 1.101 macallan #define CPU_VIS 5 /* 0 - no VIS, 1 - VIS 1.0, etc. */
54 1.1 eeh
55 1.124 mrg /*
56 1.124 mrg * This is exported via sysctl for cpuctl(8).
57 1.124 mrg */
58 1.124 mrg struct cacheinfo {
59 1.124 mrg int c_itotalsize;
60 1.124 mrg int c_ilinesize;
61 1.124 mrg int c_dtotalsize;
62 1.124 mrg int c_dlinesize;
63 1.124 mrg int c_etotalsize;
64 1.124 mrg int c_elinesize;
65 1.124 mrg };
66 1.124 mrg
67 1.95 mrg #if defined(_KERNEL) || defined(_KMEMUSER)
68 1.1 eeh /*
69 1.1 eeh * Exported definitions unique to SPARC cpu support.
70 1.1 eeh */
71 1.1 eeh
72 1.37 tsutsui #if defined(_KERNEL_OPT)
73 1.133 ryo #include "opt_gprof.h"
74 1.17 thorpej #include "opt_multiprocessor.h"
75 1.17 thorpej #include "opt_lockdebug.h"
76 1.17 thorpej #endif
77 1.17 thorpej
78 1.1 eeh #include <machine/psl.h>
79 1.1 eeh #include <machine/reg.h>
80 1.74 martin #include <machine/pte.h>
81 1.6 mrg #include <machine/intr.h>
82 1.95 mrg #if defined(_KERNEL)
83 1.122 palle #include <machine/bus_defs.h>
84 1.43 chs #include <machine/cpuset.h>
85 1.96 mrg #include <sparc64/sparc64/intreg.h>
86 1.95 mrg #endif
87 1.112 palle #ifdef SUN4V
88 1.117 nakayama #include <machine/hypervisor.h>
89 1.112 palle #endif
90 1.17 thorpej
91 1.46 yamt #include <sys/cpu_data.h>
92 1.134 riastrad #include <sys/mutex.h>
93 1.75 nakayama #include <sys/evcnt.h>
94 1.95 mrg
95 1.19 eeh /*
96 1.19 eeh * The cpu_info structure is part of a 64KB structure mapped both the kernel
97 1.19 eeh * pmap and a single locked TTE a CPUINFO_VA for that particular processor.
98 1.19 eeh * Each processor's cpu_info is accessible at CPUINFO_VA only for that
99 1.19 eeh * processor. Other processors can access that through an additional mapping
100 1.19 eeh * in the kernel pmap.
101 1.19 eeh *
102 1.19 eeh * The 64KB page contains:
103 1.19 eeh *
104 1.19 eeh * cpu_info
105 1.19 eeh * interrupt stack (all remaining space)
106 1.19 eeh * idle PCB
107 1.19 eeh * idle stack (STACKSPACE - sizeof(PCB))
108 1.19 eeh * 32KB TSB
109 1.19 eeh */
110 1.19 eeh
111 1.17 thorpej struct cpu_info {
112 1.93 martin struct cpu_data ci_data; /* MI per-cpu data */
113 1.93 martin
114 1.43 chs
115 1.42 petrov /*
116 1.42 petrov * SPARC cpu_info structures live at two VAs: one global
117 1.42 petrov * VA (so each CPU can access any other CPU's cpu_info)
118 1.42 petrov * and an alias VA CPUINFO_VA which is the same on each
119 1.42 petrov * CPU and maps to that CPU's cpu_info. Since the alias
120 1.42 petrov * CPUINFO_VA is how we locate our cpu_info, we have to
121 1.42 petrov * self-reference the global VA so that we can return it
122 1.42 petrov * in the curcpu() macro.
123 1.42 petrov */
124 1.50 perry struct cpu_info * volatile ci_self;
125 1.42 petrov
126 1.20 eeh /* Most important fields first */
127 1.34 thorpej struct lwp *ci_curlwp;
128 1.128 ad struct lwp *ci_onproc; /* current user LWP / kthread */
129 1.32 chs struct pcb *ci_cpcb;
130 1.19 eeh struct cpu_info *ci_next;
131 1.20 eeh
132 1.34 thorpej struct lwp *ci_fplwp;
133 1.51 cdi
134 1.51 cdi void *ci_eintstack;
135 1.51 cdi
136 1.60 ad int ci_mtx_count;
137 1.60 ad int ci_mtx_oldspl;
138 1.60 ad
139 1.51 cdi /* Spinning up the CPU */
140 1.53 cdi void (*ci_spinup)(void);
141 1.51 cdi paddr_t ci_paddr;
142 1.51 cdi
143 1.38 petrov int ci_cpuid;
144 1.20 eeh
145 1.124 mrg uint64_t ci_ver;
146 1.124 mrg
147 1.42 petrov /* CPU PROM information. */
148 1.42 petrov u_int ci_node;
149 1.124 mrg const char *ci_name;
150 1.124 mrg
151 1.124 mrg /* This is for sysctl. */
152 1.124 mrg struct cacheinfo ci_cacheinfo;
153 1.42 petrov
154 1.65 martin /* %tick and cpu frequency information */
155 1.65 martin u_long ci_tick_increment;
156 1.99 macallan uint64_t ci_cpu_clockrate[2]; /* %tick */
157 1.99 macallan uint64_t ci_system_clockrate[2]; /* %stick */
158 1.65 martin
159 1.75 nakayama /* Interrupts */
160 1.75 nakayama struct intrhand *ci_intrpending[16];
161 1.77 nakayama struct intrhand *ci_tick_ih;
162 1.76 nakayama
163 1.76 nakayama /* Event counters */
164 1.75 nakayama struct evcnt ci_tick_evcnt;
165 1.89 mrg
166 1.89 mrg /* This could be under MULTIPROCESSOR, but there's no good reason */
167 1.76 nakayama struct evcnt ci_ipi_evcnt[IPI_EVCNT_NUM];
168 1.75 nakayama
169 1.42 petrov int ci_flags;
170 1.42 petrov int ci_want_ast;
171 1.42 petrov int ci_want_resched;
172 1.68 martin int ci_idepth;
173 1.42 petrov
174 1.74 martin /*
175 1.74 martin * A context is simply a small number that differentiates multiple mappings
176 1.74 martin * of the same address. Contexts on the spitfire are 13 bits, but could
177 1.74 martin * be as large as 17 bits.
178 1.74 martin *
179 1.74 martin * Each context is either free or attached to a pmap.
180 1.74 martin *
181 1.74 martin * The context table is an array of pointers to psegs. Just dereference
182 1.74 martin * the right pointer and you get to the pmap segment tables. These are
183 1.74 martin * physical addresses, of course.
184 1.74 martin *
185 1.90 mrg * ci_ctx_lock protects this CPUs context allocation/free.
186 1.90 mrg * These are all allocated almost with in the same cacheline.
187 1.74 martin */
188 1.90 mrg kmutex_t ci_ctx_lock;
189 1.74 martin int ci_pmap_next_ctx;
190 1.84 nakayama int ci_numctx;
191 1.74 martin paddr_t *ci_ctxbusy;
192 1.74 martin LIST_HEAD(, pmap) ci_pmap_ctxlist;
193 1.74 martin
194 1.74 martin /*
195 1.74 martin * The TSBs are per cpu too (since MMU context differs between
196 1.74 martin * cpus). These are just caches for the TLBs.
197 1.74 martin */
198 1.74 martin pte_t *ci_tsb_dmmu;
199 1.74 martin pte_t *ci_tsb_immu;
200 1.74 martin
201 1.112 palle /* TSB description (sun4v). */
202 1.112 palle struct tsb_desc *ci_tsb_desc;
203 1.132 nakayama
204 1.109 palle /* MMU Fault Status Area (sun4v).
205 1.109 palle * Will be initialized to the physical address of the bottom of
206 1.109 palle * the interrupt stack.
207 1.109 palle */
208 1.123 palle paddr_t ci_mmufsa;
209 1.109 palle
210 1.111 palle /*
211 1.111 palle * sun4v mondo control fields
212 1.111 palle */
213 1.111 palle paddr_t ci_cpumq; /* cpu mondo queue address */
214 1.111 palle paddr_t ci_devmq; /* device mondo queue address */
215 1.111 palle paddr_t ci_cpuset; /* mondo recipient address */
216 1.111 palle paddr_t ci_mondo; /* mondo message address */
217 1.131 palle
218 1.102 nakayama /* probe fault in PCI config space reads */
219 1.102 nakayama bool ci_pci_probe;
220 1.102 nakayama bool ci_pci_fault;
221 1.102 nakayama
222 1.55 mrg volatile void *ci_ddb_regs; /* DDB regs */
223 1.131 palle
224 1.131 palle void (*ci_idlespin)(void);
225 1.133 ryo
226 1.133 ryo #if defined(GPROF) && defined(MULTIPROCESSOR)
227 1.133 ryo struct gmonparam *ci_gmon; /* MI per-cpu GPROF */
228 1.133 ryo #endif
229 1.17 thorpej };
230 1.17 thorpej
231 1.95 mrg #endif /* _KERNEL || _KMEMUSER */
232 1.95 mrg
233 1.95 mrg #ifdef _KERNEL
234 1.95 mrg
235 1.42 petrov #define CPUF_PRIMARY 1
236 1.42 petrov
237 1.42 petrov /*
238 1.42 petrov * CPU boot arguments. Used by secondary CPUs at the bootstrap time.
239 1.42 petrov */
240 1.42 petrov struct cpu_bootargs {
241 1.42 petrov u_int cb_node; /* PROM CPU node */
242 1.50 perry volatile int cb_flags;
243 1.42 petrov
244 1.42 petrov vaddr_t cb_ktext;
245 1.42 petrov paddr_t cb_ktextp;
246 1.42 petrov vaddr_t cb_ektext;
247 1.42 petrov
248 1.42 petrov vaddr_t cb_kdata;
249 1.42 petrov paddr_t cb_kdatap;
250 1.42 petrov vaddr_t cb_ekdata;
251 1.42 petrov
252 1.42 petrov paddr_t cb_cpuinfo;
253 1.113 palle int cb_cputyp;
254 1.42 petrov };
255 1.42 petrov
256 1.42 petrov extern struct cpu_bootargs *cpu_args;
257 1.42 petrov
258 1.89 mrg #if defined(MULTIPROCESSOR)
259 1.47 briggs extern int sparc_ncpus;
260 1.89 mrg #else
261 1.89 mrg #define sparc_ncpus 1
262 1.89 mrg #endif
263 1.89 mrg
264 1.19 eeh extern struct cpu_info *cpus;
265 1.83 nakayama extern struct pool_cache *fpstate_cache;
266 1.17 thorpej
267 1.129 martin /* CURCPU_INT() a local (per CPU) view of our cpu_info */
268 1.129 martin #define CURCPU_INT() ((struct cpu_info *)CPUINFO_VA)
269 1.129 martin /* in general we prefer the globaly visible pointer */
270 1.129 martin #define curcpu() (CURCPU_INT()->ci_self)
271 1.66 martin #define cpu_number() (curcpu()->ci_index)
272 1.42 petrov #define CPU_IS_PRIMARY(ci) ((ci)->ci_flags & CPUF_PRIMARY)
273 1.42 petrov
274 1.105 christos #define CPU_INFO_ITERATOR int __unused
275 1.105 christos #define CPU_INFO_FOREACH(cii, ci) ci = cpus; ci != NULL; ci = ci->ci_next
276 1.43 chs
277 1.129 martin /* these are only valid on the local cpu */
278 1.129 martin #define curlwp CURCPU_INT()->ci_curlwp
279 1.129 martin #define fplwp CURCPU_INT()->ci_fplwp
280 1.129 martin #define curpcb CURCPU_INT()->ci_cpcb
281 1.129 martin #define want_ast CURCPU_INT()->ci_want_ast
282 1.42 petrov
283 1.1 eeh /*
284 1.1 eeh * definitions of cpu-dependent requirements
285 1.1 eeh * referenced in generic code
286 1.1 eeh */
287 1.42 petrov #define cpu_wait(p) /* nothing */
288 1.48 martin void cpu_proc_fork(struct proc *, struct proc *);
289 1.38 petrov
290 1.74 martin /* run on the cpu itself */
291 1.74 martin void cpu_pmap_init(struct cpu_info *);
292 1.74 martin /* run upfront to prepare the cpu_info */
293 1.74 martin void cpu_pmap_prepare(struct cpu_info *, bool);
294 1.74 martin
295 1.118 palle /* Helper functions to retrieve cache info */
296 1.118 palle int cpu_ecache_associativity(int node);
297 1.118 palle int cpu_ecache_size(int node);
298 1.118 palle
299 1.38 petrov #if defined(MULTIPROCESSOR)
300 1.51 cdi extern vaddr_t cpu_spinup_trampoline;
301 1.51 cdi
302 1.51 cdi extern char *mp_tramp_code;
303 1.51 cdi extern u_long mp_tramp_code_len;
304 1.115 martin extern u_long mp_tramp_dtlb_slots, mp_tramp_itlb_slots;
305 1.51 cdi extern u_long mp_tramp_func;
306 1.51 cdi extern u_long mp_tramp_ci;
307 1.51 cdi
308 1.53 cdi void cpu_hatch(void);
309 1.53 cdi void cpu_boot_secondary_processors(void);
310 1.57 martin
311 1.57 martin /*
312 1.57 martin * Call a function on other cpus:
313 1.69 martin * multicast - send to everyone in the sparc64_cpuset_t
314 1.57 martin * broadcast - send to to all cpus but ourselves
315 1.57 martin * send - send to just this cpu
316 1.92 martin * The called function do not follow the C ABI, so need to be coded in
317 1.92 martin * assembler.
318 1.57 martin */
319 1.91 martin typedef void (* ipifunc_t)(void *, void *);
320 1.57 martin
321 1.76 nakayama void sparc64_multicast_ipi(sparc64_cpuset_t, ipifunc_t, uint64_t, uint64_t);
322 1.76 nakayama void sparc64_broadcast_ipi(ipifunc_t, uint64_t, uint64_t);
323 1.114 palle extern void (*sparc64_send_ipi)(int, ipifunc_t, uint64_t, uint64_t);
324 1.92 martin
325 1.92 martin /*
326 1.92 martin * Call an arbitrary C function on another cpu (or all others but ourself)
327 1.92 martin */
328 1.92 martin typedef void (*ipi_c_call_func_t)(void*);
329 1.92 martin void sparc64_generic_xcall(struct cpu_info*, ipi_c_call_func_t, void*);
330 1.92 martin
331 1.38 petrov #endif
332 1.35 nakayama
333 1.94 martin /* Provide %pc of a lwp */
334 1.94 martin #define LWP_PC(l) ((l)->l_md.md_tf->tf_pc)
335 1.94 martin
336 1.1 eeh /*
337 1.1 eeh * Arguments to hardclock, softclock and gatherstats encapsulate the
338 1.1 eeh * previous machine state in an opaque clockframe. The ipl is here
339 1.1 eeh * as well for strayintr (see locore.s:interrupt and intr.c:strayintr).
340 1.1 eeh * Note that CLKF_INTR is valid only if CLKF_USERMODE is false.
341 1.1 eeh */
342 1.1 eeh struct clockframe {
343 1.14 eeh struct trapframe64 t;
344 1.1 eeh };
345 1.1 eeh
346 1.1 eeh #define CLKF_USERMODE(framep) (((framep)->t.tf_tstate & TSTATE_PRIV) == 0)
347 1.1 eeh #define CLKF_PC(framep) ((framep)->t.tf_pc)
348 1.30 eeh /* Since some files in sys/kern do not know BIAS, I'm using 0x7ff here */
349 1.30 eeh #define CLKF_INTR(framep) \
350 1.30 eeh ((!CLKF_USERMODE(framep))&& \
351 1.30 eeh (((framep)->t.tf_out[6] & 1 ) ? \
352 1.30 eeh (((vaddr_t)(framep)->t.tf_out[6] < \
353 1.30 eeh (vaddr_t)EINTSTACK-0x7ff) && \
354 1.30 eeh ((vaddr_t)(framep)->t.tf_out[6] > \
355 1.30 eeh (vaddr_t)INTSTACK-0x7ff)) : \
356 1.30 eeh (((vaddr_t)(framep)->t.tf_out[6] < \
357 1.30 eeh (vaddr_t)EINTSTACK) && \
358 1.30 eeh ((vaddr_t)(framep)->t.tf_out[6] > \
359 1.30 eeh (vaddr_t)INTSTACK))))
360 1.1 eeh
361 1.1 eeh /*
362 1.1 eeh * Give a profiling tick to the current process when the user profiling
363 1.1 eeh * buffer pages are invalid. On the sparc, request an ast to send us
364 1.1 eeh * through trap(), marking the proc as needing a profiling tick.
365 1.1 eeh */
366 1.60 ad #define cpu_need_proftick(l) ((l)->l_pflag |= LP_OWEUPC, want_ast = 1)
367 1.1 eeh
368 1.1 eeh /*
369 1.78 nakayama * Notify an LWP that it has a signal pending, process as soon as possible.
370 1.1 eeh */
371 1.78 nakayama void cpu_signotify(struct lwp *);
372 1.1 eeh
373 1.121 palle
374 1.1 eeh /*
375 1.1 eeh * Interrupt handler chains. Interrupt handlers should return 0 for
376 1.1 eeh * ``not me'' or 1 (``I took care of it''). intr_establish() inserts a
377 1.1 eeh * handler into the list. The handler is called with its (single)
378 1.1 eeh * argument, or with a pointer to a clockframe if ih_arg is NULL.
379 1.1 eeh */
380 1.1 eeh struct intrhand {
381 1.53 cdi int (*ih_fun)(void *);
382 1.18 mrg void *ih_arg;
383 1.70 martin /* if we have to take the biglock, we interpose a wrapper
384 1.70 martin * and need to save the original function and arg */
385 1.70 martin int (*ih_realfun)(void *);
386 1.70 martin void *ih_realarg;
387 1.18 mrg short ih_number; /* interrupt number */
388 1.18 mrg /* the H/W provides */
389 1.23 eeh char ih_pil; /* interrupt priority */
390 1.21 eeh struct intrhand *ih_next; /* global list */
391 1.26 eeh struct intrhand *ih_pending; /* interrupt queued */
392 1.52 cdi volatile uint64_t *ih_map; /* Interrupt map reg */
393 1.52 cdi volatile uint64_t *ih_clr; /* clear interrupt reg */
394 1.119 palle void (*ih_ack)(struct intrhand *); /* ack interrupt function */
395 1.120 palle bus_space_tag_t ih_bus; /* parent bus */
396 1.97 macallan struct evcnt ih_cnt; /* counter for vmstat */
397 1.97 macallan uint32_t ih_ivec;
398 1.97 macallan char ih_name[32]; /* name for the above */
399 1.1 eeh };
400 1.29 mrg extern struct intrhand *intrhand[];
401 1.1 eeh extern struct intrhand *intrlev[MAXINTNUM];
402 1.1 eeh
403 1.81 martin void intr_establish(int level, bool mpsafe, struct intrhand *);
404 1.80 ad void *sparc_softintr_establish(int, int (*)(void *), void *);
405 1.80 ad void sparc_softintr_schedule(void *);
406 1.80 ad void sparc_softintr_disestablish(void *);
407 1.119 palle struct intrhand *intrhand_alloc(void);
408 1.42 petrov
409 1.106 palle /* cpu.c */
410 1.106 palle int cpu_myid(void);
411 1.106 palle
412 1.1 eeh /* disksubr.c */
413 1.1 eeh struct dkbad;
414 1.53 cdi int isbad(struct dkbad *bt, int, int, int);
415 1.1 eeh /* machdep.c */
416 1.62 christos void * reserve_dumppages(void *);
417 1.1 eeh /* clock.c */
418 1.1 eeh struct timeval;
419 1.76 nakayama int tickintr(void *); /* level 10/14 (tick) interrupt code */
420 1.99 macallan int stickintr(void *); /* system tick interrupt code */
421 1.103 macallan int stick2eintr(void *); /* system tick interrupt code */
422 1.53 cdi int clockintr(void *); /* level 10 (clock) interrupt code */
423 1.53 cdi int statintr(void *); /* level 14 (statclock) interrupt code */
424 1.77 nakayama int schedintr(void *); /* level 10 (schedclock) interrupt code */
425 1.76 nakayama void tickintr_establish(int, int (*)(void *));
426 1.99 macallan void stickintr_establish(int, int (*)(void *));
427 1.103 macallan void stick2eintr_establish(int, int (*)(void *));
428 1.103 macallan
429 1.1 eeh /* locore.s */
430 1.14 eeh struct fpstate64;
431 1.53 cdi void savefpstate(struct fpstate64 *);
432 1.53 cdi void loadfpstate(struct fpstate64 *);
433 1.56 martin void clearfpstate(void);
434 1.53 cdi uint64_t probeget(paddr_t, int, int);
435 1.53 cdi int probeset(paddr_t, int, int, uint64_t);
436 1.110 palle void setcputyp(int);
437 1.42 petrov
438 1.50 perry #define write_all_windows() __asm volatile("flushw" : : )
439 1.50 perry #define write_user_windows() __asm volatile("flushw" : : )
440 1.42 petrov
441 1.1 eeh struct pcb;
442 1.53 cdi void snapshot(struct pcb *);
443 1.53 cdi struct frame *getfp(void);
444 1.88 mrg void switchtoctx_us(int);
445 1.88 mrg void switchtoctx_usiii(int);
446 1.85 nakayama void next_tick(long);
447 1.99 macallan void next_stick(long);
448 1.126 palle void next_stick_init(void);
449 1.1 eeh /* trap.c */
450 1.98 martin void cpu_vmspace_exec(struct lwp *, vaddr_t, vaddr_t);
451 1.53 cdi int rwindow_save(struct lwp *);
452 1.1 eeh /* cons.c */
453 1.53 cdi int cnrom(void);
454 1.1 eeh /* zs.c */
455 1.53 cdi void zsconsole(struct tty *, int, int, void (**)(struct tty *, int));
456 1.1 eeh /* fb.c */
457 1.53 cdi void fb_unblank(void);
458 1.1 eeh /* kgdb_stub.c */
459 1.1 eeh #ifdef KGDB
460 1.53 cdi void kgdb_attach(int (*)(void *), void (*)(void *, int), void *);
461 1.53 cdi void kgdb_connect(int);
462 1.53 cdi void kgdb_panic(void);
463 1.1 eeh #endif
464 1.5 mrg /* emul.c */
465 1.53 cdi int fixalign(struct lwp *, struct trapframe64 *);
466 1.53 cdi int emulinstr(vaddr_t, struct trapframe64 *);
467 1.1 eeh
468 1.1 eeh #endif /* _KERNEL */
469 1.1 eeh #endif /* _CPU_H_ */
470