cpu.h revision 1.122 1 1.122 palle /* $NetBSD: cpu.h,v 1.122 2016/06/25 13:52:04 palle Exp $ */
2 1.1 eeh
3 1.1 eeh /*
4 1.1 eeh * Copyright (c) 1992, 1993
5 1.1 eeh * The Regents of the University of California. All rights reserved.
6 1.1 eeh *
7 1.1 eeh * This software was developed by the Computer Systems Engineering group
8 1.1 eeh * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9 1.1 eeh * contributed to Berkeley.
10 1.1 eeh *
11 1.1 eeh * All advertising materials mentioning features or use of this software
12 1.1 eeh * must display the following acknowledgement:
13 1.1 eeh * This product includes software developed by the University of
14 1.1 eeh * California, Lawrence Berkeley Laboratory.
15 1.1 eeh *
16 1.1 eeh * Redistribution and use in source and binary forms, with or without
17 1.1 eeh * modification, are permitted provided that the following conditions
18 1.1 eeh * are met:
19 1.1 eeh * 1. Redistributions of source code must retain the above copyright
20 1.1 eeh * notice, this list of conditions and the following disclaimer.
21 1.1 eeh * 2. Redistributions in binary form must reproduce the above copyright
22 1.1 eeh * notice, this list of conditions and the following disclaimer in the
23 1.1 eeh * documentation and/or other materials provided with the distribution.
24 1.36 agc * 3. Neither the name of the University nor the names of its contributors
25 1.1 eeh * may be used to endorse or promote products derived from this software
26 1.1 eeh * without specific prior written permission.
27 1.1 eeh *
28 1.1 eeh * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 1.1 eeh * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 1.1 eeh * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 1.1 eeh * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 1.1 eeh * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 1.1 eeh * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 1.1 eeh * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 1.1 eeh * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 1.1 eeh * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 1.1 eeh * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 1.1 eeh * SUCH DAMAGE.
39 1.1 eeh *
40 1.1 eeh * @(#)cpu.h 8.4 (Berkeley) 1/5/94
41 1.1 eeh */
42 1.1 eeh
43 1.1 eeh #ifndef _CPU_H_
44 1.1 eeh #define _CPU_H_
45 1.1 eeh
46 1.1 eeh /*
47 1.1 eeh * CTL_MACHDEP definitions.
48 1.1 eeh */
49 1.13 eeh #define CPU_BOOTED_KERNEL 1 /* string: booted kernel name */
50 1.33 pk #define CPU_BOOTED_DEVICE 2 /* string: device booted from */
51 1.33 pk #define CPU_BOOT_ARGS 3 /* string: args booted with */
52 1.33 pk #define CPU_ARCH 4 /* integer: cpu architecture version */
53 1.101 macallan #define CPU_VIS 5 /* 0 - no VIS, 1 - VIS 1.0, etc. */
54 1.101 macallan #define CPU_MAXID 6 /* number of valid machdep ids */
55 1.1 eeh
56 1.95 mrg #if defined(_KERNEL) || defined(_KMEMUSER)
57 1.1 eeh /*
58 1.1 eeh * Exported definitions unique to SPARC cpu support.
59 1.1 eeh */
60 1.1 eeh
61 1.37 tsutsui #if defined(_KERNEL_OPT)
62 1.17 thorpej #include "opt_multiprocessor.h"
63 1.17 thorpej #include "opt_lockdebug.h"
64 1.17 thorpej #endif
65 1.17 thorpej
66 1.1 eeh #include <machine/psl.h>
67 1.1 eeh #include <machine/reg.h>
68 1.74 martin #include <machine/pte.h>
69 1.6 mrg #include <machine/intr.h>
70 1.95 mrg #if defined(_KERNEL)
71 1.122 palle #include <machine/bus_defs.h>
72 1.43 chs #include <machine/cpuset.h>
73 1.96 mrg #include <sparc64/sparc64/intreg.h>
74 1.95 mrg #endif
75 1.112 palle #ifdef SUN4V
76 1.117 nakayama #include <machine/hypervisor.h>
77 1.112 palle #endif
78 1.17 thorpej
79 1.46 yamt #include <sys/cpu_data.h>
80 1.75 nakayama #include <sys/evcnt.h>
81 1.95 mrg
82 1.19 eeh /*
83 1.19 eeh * The cpu_info structure is part of a 64KB structure mapped both the kernel
84 1.19 eeh * pmap and a single locked TTE a CPUINFO_VA for that particular processor.
85 1.19 eeh * Each processor's cpu_info is accessible at CPUINFO_VA only for that
86 1.19 eeh * processor. Other processors can access that through an additional mapping
87 1.19 eeh * in the kernel pmap.
88 1.19 eeh *
89 1.19 eeh * The 64KB page contains:
90 1.19 eeh *
91 1.19 eeh * cpu_info
92 1.19 eeh * interrupt stack (all remaining space)
93 1.19 eeh * idle PCB
94 1.19 eeh * idle stack (STACKSPACE - sizeof(PCB))
95 1.19 eeh * 32KB TSB
96 1.19 eeh */
97 1.19 eeh
98 1.17 thorpej struct cpu_info {
99 1.93 martin struct cpu_data ci_data; /* MI per-cpu data */
100 1.93 martin
101 1.43 chs
102 1.42 petrov /*
103 1.42 petrov * SPARC cpu_info structures live at two VAs: one global
104 1.42 petrov * VA (so each CPU can access any other CPU's cpu_info)
105 1.42 petrov * and an alias VA CPUINFO_VA which is the same on each
106 1.42 petrov * CPU and maps to that CPU's cpu_info. Since the alias
107 1.42 petrov * CPUINFO_VA is how we locate our cpu_info, we have to
108 1.42 petrov * self-reference the global VA so that we can return it
109 1.42 petrov * in the curcpu() macro.
110 1.42 petrov */
111 1.50 perry struct cpu_info * volatile ci_self;
112 1.42 petrov
113 1.20 eeh /* Most important fields first */
114 1.34 thorpej struct lwp *ci_curlwp;
115 1.32 chs struct pcb *ci_cpcb;
116 1.19 eeh struct cpu_info *ci_next;
117 1.20 eeh
118 1.34 thorpej struct lwp *ci_fplwp;
119 1.51 cdi
120 1.51 cdi void *ci_eintstack;
121 1.51 cdi
122 1.60 ad int ci_mtx_count;
123 1.60 ad int ci_mtx_oldspl;
124 1.60 ad
125 1.51 cdi /* Spinning up the CPU */
126 1.53 cdi void (*ci_spinup)(void);
127 1.51 cdi paddr_t ci_paddr;
128 1.51 cdi
129 1.38 petrov int ci_cpuid;
130 1.20 eeh
131 1.42 petrov /* CPU PROM information. */
132 1.42 petrov u_int ci_node;
133 1.42 petrov
134 1.65 martin /* %tick and cpu frequency information */
135 1.65 martin u_long ci_tick_increment;
136 1.99 macallan uint64_t ci_cpu_clockrate[2]; /* %tick */
137 1.99 macallan uint64_t ci_system_clockrate[2]; /* %stick */
138 1.65 martin
139 1.75 nakayama /* Interrupts */
140 1.75 nakayama struct intrhand *ci_intrpending[16];
141 1.77 nakayama struct intrhand *ci_tick_ih;
142 1.76 nakayama
143 1.76 nakayama /* Event counters */
144 1.75 nakayama struct evcnt ci_tick_evcnt;
145 1.89 mrg
146 1.89 mrg /* This could be under MULTIPROCESSOR, but there's no good reason */
147 1.76 nakayama struct evcnt ci_ipi_evcnt[IPI_EVCNT_NUM];
148 1.75 nakayama
149 1.42 petrov int ci_flags;
150 1.42 petrov int ci_want_ast;
151 1.42 petrov int ci_want_resched;
152 1.68 martin int ci_idepth;
153 1.42 petrov
154 1.74 martin /*
155 1.74 martin * A context is simply a small number that differentiates multiple mappings
156 1.74 martin * of the same address. Contexts on the spitfire are 13 bits, but could
157 1.74 martin * be as large as 17 bits.
158 1.74 martin *
159 1.74 martin * Each context is either free or attached to a pmap.
160 1.74 martin *
161 1.74 martin * The context table is an array of pointers to psegs. Just dereference
162 1.74 martin * the right pointer and you get to the pmap segment tables. These are
163 1.74 martin * physical addresses, of course.
164 1.74 martin *
165 1.90 mrg * ci_ctx_lock protects this CPUs context allocation/free.
166 1.90 mrg * These are all allocated almost with in the same cacheline.
167 1.74 martin */
168 1.90 mrg kmutex_t ci_ctx_lock;
169 1.74 martin int ci_pmap_next_ctx;
170 1.84 nakayama int ci_numctx;
171 1.74 martin paddr_t *ci_ctxbusy;
172 1.74 martin LIST_HEAD(, pmap) ci_pmap_ctxlist;
173 1.74 martin
174 1.74 martin /*
175 1.74 martin * The TSBs are per cpu too (since MMU context differs between
176 1.74 martin * cpus). These are just caches for the TLBs.
177 1.74 martin */
178 1.74 martin pte_t *ci_tsb_dmmu;
179 1.74 martin pte_t *ci_tsb_immu;
180 1.74 martin
181 1.112 palle /* TSB description (sun4v). */
182 1.112 palle struct tsb_desc *ci_tsb_desc;
183 1.112 palle
184 1.109 palle /* MMU Fault Status Area (sun4v).
185 1.109 palle * Will be initialized to the physical address of the bottom of
186 1.109 palle * the interrupt stack.
187 1.109 palle */
188 1.107 palle paddr_t ci_mmfsa;
189 1.109 palle
190 1.111 palle /*
191 1.111 palle * sun4v mondo control fields
192 1.111 palle */
193 1.111 palle paddr_t ci_cpumq; /* cpu mondo queue address */
194 1.111 palle paddr_t ci_devmq; /* device mondo queue address */
195 1.111 palle paddr_t ci_cpuset; /* mondo recipient address */
196 1.111 palle paddr_t ci_mondo; /* mondo message address */
197 1.111 palle
198 1.102 nakayama /* probe fault in PCI config space reads */
199 1.102 nakayama bool ci_pci_probe;
200 1.102 nakayama bool ci_pci_fault;
201 1.102 nakayama
202 1.55 mrg volatile void *ci_ddb_regs; /* DDB regs */
203 1.17 thorpej };
204 1.17 thorpej
205 1.95 mrg #endif /* _KERNEL || _KMEMUSER */
206 1.95 mrg
207 1.95 mrg #ifdef _KERNEL
208 1.95 mrg
209 1.42 petrov #define CPUF_PRIMARY 1
210 1.42 petrov
211 1.42 petrov /*
212 1.42 petrov * CPU boot arguments. Used by secondary CPUs at the bootstrap time.
213 1.42 petrov */
214 1.42 petrov struct cpu_bootargs {
215 1.42 petrov u_int cb_node; /* PROM CPU node */
216 1.50 perry volatile int cb_flags;
217 1.42 petrov
218 1.42 petrov vaddr_t cb_ktext;
219 1.42 petrov paddr_t cb_ktextp;
220 1.42 petrov vaddr_t cb_ektext;
221 1.42 petrov
222 1.42 petrov vaddr_t cb_kdata;
223 1.42 petrov paddr_t cb_kdatap;
224 1.42 petrov vaddr_t cb_ekdata;
225 1.42 petrov
226 1.42 petrov paddr_t cb_cpuinfo;
227 1.113 palle int cb_cputyp;
228 1.42 petrov };
229 1.42 petrov
230 1.42 petrov extern struct cpu_bootargs *cpu_args;
231 1.42 petrov
232 1.89 mrg #if defined(MULTIPROCESSOR)
233 1.47 briggs extern int sparc_ncpus;
234 1.89 mrg #else
235 1.89 mrg #define sparc_ncpus 1
236 1.89 mrg #endif
237 1.89 mrg
238 1.19 eeh extern struct cpu_info *cpus;
239 1.83 nakayama extern struct pool_cache *fpstate_cache;
240 1.17 thorpej
241 1.43 chs #define curcpu() (((struct cpu_info *)CPUINFO_VA)->ci_self)
242 1.66 martin #define cpu_number() (curcpu()->ci_index)
243 1.42 petrov #define CPU_IS_PRIMARY(ci) ((ci)->ci_flags & CPUF_PRIMARY)
244 1.42 petrov
245 1.105 christos #define CPU_INFO_ITERATOR int __unused
246 1.105 christos #define CPU_INFO_FOREACH(cii, ci) ci = cpus; ci != NULL; ci = ci->ci_next
247 1.43 chs
248 1.40 cdi #define curlwp curcpu()->ci_curlwp
249 1.40 cdi #define fplwp curcpu()->ci_fplwp
250 1.40 cdi #define curpcb curcpu()->ci_cpcb
251 1.1 eeh
252 1.42 petrov #define want_ast curcpu()->ci_want_ast
253 1.42 petrov #define want_resched curcpu()->ci_want_resched
254 1.42 petrov
255 1.1 eeh /*
256 1.1 eeh * definitions of cpu-dependent requirements
257 1.1 eeh * referenced in generic code
258 1.1 eeh */
259 1.42 petrov #define cpu_wait(p) /* nothing */
260 1.48 martin void cpu_proc_fork(struct proc *, struct proc *);
261 1.38 petrov
262 1.74 martin /* run on the cpu itself */
263 1.74 martin void cpu_pmap_init(struct cpu_info *);
264 1.74 martin /* run upfront to prepare the cpu_info */
265 1.74 martin void cpu_pmap_prepare(struct cpu_info *, bool);
266 1.74 martin
267 1.118 palle /* Helper functions to retrieve cache info */
268 1.118 palle int cpu_ecache_associativity(int node);
269 1.118 palle int cpu_ecache_size(int node);
270 1.118 palle
271 1.38 petrov #if defined(MULTIPROCESSOR)
272 1.51 cdi extern vaddr_t cpu_spinup_trampoline;
273 1.51 cdi
274 1.51 cdi extern char *mp_tramp_code;
275 1.51 cdi extern u_long mp_tramp_code_len;
276 1.115 martin extern u_long mp_tramp_dtlb_slots, mp_tramp_itlb_slots;
277 1.51 cdi extern u_long mp_tramp_func;
278 1.51 cdi extern u_long mp_tramp_ci;
279 1.51 cdi
280 1.53 cdi void cpu_hatch(void);
281 1.53 cdi void cpu_boot_secondary_processors(void);
282 1.57 martin
283 1.57 martin /*
284 1.57 martin * Call a function on other cpus:
285 1.69 martin * multicast - send to everyone in the sparc64_cpuset_t
286 1.57 martin * broadcast - send to to all cpus but ourselves
287 1.57 martin * send - send to just this cpu
288 1.92 martin * The called function do not follow the C ABI, so need to be coded in
289 1.92 martin * assembler.
290 1.57 martin */
291 1.91 martin typedef void (* ipifunc_t)(void *, void *);
292 1.57 martin
293 1.76 nakayama void sparc64_multicast_ipi(sparc64_cpuset_t, ipifunc_t, uint64_t, uint64_t);
294 1.76 nakayama void sparc64_broadcast_ipi(ipifunc_t, uint64_t, uint64_t);
295 1.114 palle extern void (*sparc64_send_ipi)(int, ipifunc_t, uint64_t, uint64_t);
296 1.92 martin
297 1.92 martin /*
298 1.92 martin * Call an arbitrary C function on another cpu (or all others but ourself)
299 1.92 martin */
300 1.92 martin typedef void (*ipi_c_call_func_t)(void*);
301 1.92 martin void sparc64_generic_xcall(struct cpu_info*, ipi_c_call_func_t, void*);
302 1.92 martin
303 1.38 petrov #endif
304 1.35 nakayama
305 1.94 martin /* Provide %pc of a lwp */
306 1.94 martin #define LWP_PC(l) ((l)->l_md.md_tf->tf_pc)
307 1.94 martin
308 1.1 eeh /*
309 1.1 eeh * Arguments to hardclock, softclock and gatherstats encapsulate the
310 1.1 eeh * previous machine state in an opaque clockframe. The ipl is here
311 1.1 eeh * as well for strayintr (see locore.s:interrupt and intr.c:strayintr).
312 1.1 eeh * Note that CLKF_INTR is valid only if CLKF_USERMODE is false.
313 1.1 eeh */
314 1.1 eeh struct clockframe {
315 1.14 eeh struct trapframe64 t;
316 1.1 eeh };
317 1.1 eeh
318 1.1 eeh #define CLKF_USERMODE(framep) (((framep)->t.tf_tstate & TSTATE_PRIV) == 0)
319 1.1 eeh #define CLKF_PC(framep) ((framep)->t.tf_pc)
320 1.30 eeh /* Since some files in sys/kern do not know BIAS, I'm using 0x7ff here */
321 1.30 eeh #define CLKF_INTR(framep) \
322 1.30 eeh ((!CLKF_USERMODE(framep))&& \
323 1.30 eeh (((framep)->t.tf_out[6] & 1 ) ? \
324 1.30 eeh (((vaddr_t)(framep)->t.tf_out[6] < \
325 1.30 eeh (vaddr_t)EINTSTACK-0x7ff) && \
326 1.30 eeh ((vaddr_t)(framep)->t.tf_out[6] > \
327 1.30 eeh (vaddr_t)INTSTACK-0x7ff)) : \
328 1.30 eeh (((vaddr_t)(framep)->t.tf_out[6] < \
329 1.30 eeh (vaddr_t)EINTSTACK) && \
330 1.30 eeh ((vaddr_t)(framep)->t.tf_out[6] > \
331 1.30 eeh (vaddr_t)INTSTACK))))
332 1.1 eeh
333 1.1 eeh /*
334 1.1 eeh * Give a profiling tick to the current process when the user profiling
335 1.1 eeh * buffer pages are invalid. On the sparc, request an ast to send us
336 1.1 eeh * through trap(), marking the proc as needing a profiling tick.
337 1.1 eeh */
338 1.60 ad #define cpu_need_proftick(l) ((l)->l_pflag |= LP_OWEUPC, want_ast = 1)
339 1.1 eeh
340 1.1 eeh /*
341 1.78 nakayama * Notify an LWP that it has a signal pending, process as soon as possible.
342 1.1 eeh */
343 1.78 nakayama void cpu_signotify(struct lwp *);
344 1.1 eeh
345 1.121 palle
346 1.1 eeh /*
347 1.1 eeh * Interrupt handler chains. Interrupt handlers should return 0 for
348 1.1 eeh * ``not me'' or 1 (``I took care of it''). intr_establish() inserts a
349 1.1 eeh * handler into the list. The handler is called with its (single)
350 1.1 eeh * argument, or with a pointer to a clockframe if ih_arg is NULL.
351 1.1 eeh */
352 1.1 eeh struct intrhand {
353 1.53 cdi int (*ih_fun)(void *);
354 1.18 mrg void *ih_arg;
355 1.70 martin /* if we have to take the biglock, we interpose a wrapper
356 1.70 martin * and need to save the original function and arg */
357 1.70 martin int (*ih_realfun)(void *);
358 1.70 martin void *ih_realarg;
359 1.18 mrg short ih_number; /* interrupt number */
360 1.18 mrg /* the H/W provides */
361 1.23 eeh char ih_pil; /* interrupt priority */
362 1.21 eeh struct intrhand *ih_next; /* global list */
363 1.26 eeh struct intrhand *ih_pending; /* interrupt queued */
364 1.52 cdi volatile uint64_t *ih_map; /* Interrupt map reg */
365 1.52 cdi volatile uint64_t *ih_clr; /* clear interrupt reg */
366 1.119 palle void (*ih_ack)(struct intrhand *); /* ack interrupt function */
367 1.120 palle bus_space_tag_t ih_bus; /* parent bus */
368 1.97 macallan struct evcnt ih_cnt; /* counter for vmstat */
369 1.97 macallan uint32_t ih_ivec;
370 1.97 macallan char ih_name[32]; /* name for the above */
371 1.1 eeh };
372 1.29 mrg extern struct intrhand *intrhand[];
373 1.1 eeh extern struct intrhand *intrlev[MAXINTNUM];
374 1.1 eeh
375 1.81 martin void intr_establish(int level, bool mpsafe, struct intrhand *);
376 1.80 ad void *sparc_softintr_establish(int, int (*)(void *), void *);
377 1.80 ad void sparc_softintr_schedule(void *);
378 1.80 ad void sparc_softintr_disestablish(void *);
379 1.119 palle struct intrhand *intrhand_alloc(void);
380 1.42 petrov
381 1.106 palle /* cpu.c */
382 1.106 palle int cpu_myid(void);
383 1.106 palle
384 1.1 eeh /* disksubr.c */
385 1.1 eeh struct dkbad;
386 1.53 cdi int isbad(struct dkbad *bt, int, int, int);
387 1.1 eeh /* machdep.c */
388 1.62 christos void * reserve_dumppages(void *);
389 1.1 eeh /* clock.c */
390 1.1 eeh struct timeval;
391 1.76 nakayama int tickintr(void *); /* level 10/14 (tick) interrupt code */
392 1.99 macallan int stickintr(void *); /* system tick interrupt code */
393 1.103 macallan int stick2eintr(void *); /* system tick interrupt code */
394 1.53 cdi int clockintr(void *); /* level 10 (clock) interrupt code */
395 1.53 cdi int statintr(void *); /* level 14 (statclock) interrupt code */
396 1.77 nakayama int schedintr(void *); /* level 10 (schedclock) interrupt code */
397 1.76 nakayama void tickintr_establish(int, int (*)(void *));
398 1.99 macallan void stickintr_establish(int, int (*)(void *));
399 1.103 macallan void stick2eintr_establish(int, int (*)(void *));
400 1.103 macallan
401 1.1 eeh /* locore.s */
402 1.14 eeh struct fpstate64;
403 1.53 cdi void savefpstate(struct fpstate64 *);
404 1.53 cdi void loadfpstate(struct fpstate64 *);
405 1.56 martin void clearfpstate(void);
406 1.53 cdi uint64_t probeget(paddr_t, int, int);
407 1.53 cdi int probeset(paddr_t, int, int, uint64_t);
408 1.110 palle void setcputyp(int);
409 1.42 petrov
410 1.50 perry #define write_all_windows() __asm volatile("flushw" : : )
411 1.50 perry #define write_user_windows() __asm volatile("flushw" : : )
412 1.42 petrov
413 1.1 eeh struct pcb;
414 1.53 cdi void snapshot(struct pcb *);
415 1.53 cdi struct frame *getfp(void);
416 1.88 mrg void switchtoctx_us(int);
417 1.88 mrg void switchtoctx_usiii(int);
418 1.85 nakayama void next_tick(long);
419 1.99 macallan void next_stick(long);
420 1.1 eeh /* trap.c */
421 1.98 martin void cpu_vmspace_exec(struct lwp *, vaddr_t, vaddr_t);
422 1.53 cdi int rwindow_save(struct lwp *);
423 1.1 eeh /* cons.c */
424 1.53 cdi int cnrom(void);
425 1.1 eeh /* zs.c */
426 1.53 cdi void zsconsole(struct tty *, int, int, void (**)(struct tty *, int));
427 1.1 eeh /* fb.c */
428 1.53 cdi void fb_unblank(void);
429 1.1 eeh /* kgdb_stub.c */
430 1.1 eeh #ifdef KGDB
431 1.53 cdi void kgdb_attach(int (*)(void *), void (*)(void *, int), void *);
432 1.53 cdi void kgdb_connect(int);
433 1.53 cdi void kgdb_panic(void);
434 1.1 eeh #endif
435 1.5 mrg /* emul.c */
436 1.53 cdi int fixalign(struct lwp *, struct trapframe64 *);
437 1.53 cdi int emulinstr(vaddr_t, struct trapframe64 *);
438 1.1 eeh
439 1.95 mrg #else /* _KERNEL */
440 1.95 mrg
441 1.95 mrg /*
442 1.95 mrg * XXX: provide some definitions for crash(8), probably can share
443 1.95 mrg */
444 1.95 mrg #if defined(_KMEMUSER)
445 1.95 mrg #define curcpu() (((struct cpu_info *)CPUINFO_VA)->ci_self)
446 1.95 mrg #define curlwp curcpu()->ci_curlwp
447 1.95 mrg #endif
448 1.95 mrg
449 1.1 eeh #endif /* _KERNEL */
450 1.1 eeh #endif /* _CPU_H_ */
451