cpu.h revision 1.65.22.1 1 /* $NetBSD: cpu.h,v 1.65.22.1 2011/01/07 01:59:40 matt Exp $ */
2
3 /*
4 * Copyright (C) 1999 Wolfgang Solfrank.
5 * Copyright (C) 1999 TooLs GmbH.
6 * Copyright (C) 1995-1997 Wolfgang Solfrank.
7 * Copyright (C) 1995-1997 TooLs GmbH.
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by TooLs GmbH.
21 * 4. The name of TooLs GmbH may not be used to endorse or promote products
22 * derived from this software without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
26 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
27 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
29 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
30 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 */
35 #ifndef _POWERPC_CPU_H_
36 #define _POWERPC_CPU_H_
37
38 struct cache_info {
39 int dcache_size;
40 int dcache_line_size;
41 int icache_size;
42 int icache_line_size;
43 };
44
45 #ifdef _KERNEL
46 #if defined(_KERNEL_OPT)
47 #include "opt_lockdebug.h"
48 #include "opt_multiprocessor.h"
49 #include "opt_ppcarch.h"
50 #endif
51
52 #include <machine/frame.h>
53 #include <machine/psl.h>
54 #include <machine/intr.h>
55 #include <sys/device.h>
56
57 #include <sys/cpu_data.h>
58
59 struct cpu_info {
60 struct cpu_data ci_data; /* MI per-cpu data */
61 struct device *ci_dev; /* device of corresponding cpu */
62 struct cpu_softc *ci_softc; /* private cpu info */
63 struct lwp *ci_curlwp; /* current owner of the processor */
64
65 struct pcb *ci_curpcb;
66 struct pmap *ci_curpm;
67 struct lwp *ci_fpulwp;
68 struct lwp *ci_veclwp;
69 int ci_cpuid;
70
71 volatile int ci_astpending;
72 int ci_want_resched;
73 volatile uint64_t ci_lastintr;
74 volatile u_long ci_lasttb;
75 volatile int ci_tickspending;
76 volatile int ci_cpl;
77 volatile int ci_iactive;
78 volatile int ci_idepth;
79 #ifndef PPC_BOOKE
80 volatile int ci_ipending;
81 #endif
82 int ci_mtx_oldspl;
83 int ci_mtx_count;
84 #ifndef PPC_BOOKE
85 char *ci_intstk;
86 #endif
87 #ifndef PPC_BOOKE
88 #define CPUSAVE_LEN 8
89 register_t ci_tempsave[CPUSAVE_LEN];
90 register_t ci_ddbsave[CPUSAVE_LEN];
91 register_t ci_ipkdbsave[CPUSAVE_LEN];
92 #define CPUSAVE_R28 0 /* where r28 gets saved */
93 #define CPUSAVE_R29 1 /* where r29 gets saved */
94 #define CPUSAVE_R30 2 /* where r30 gets saved */
95 #define CPUSAVE_R31 3 /* where r31 gets saved */
96 #if defined(PPC_IBM4XX)
97 #define CPUSAVE_DEAR 4 /* where SPR_DAR gets saved */
98 #define CPUSAVE_ESR 5 /* where SPR_DSISR gets saved */
99 register_t ci_tlbmisssave[CPUSAVE_LEN];
100 #else
101 #define CPUSAVE_DAR 4 /* where SPR_DAR gets saved */
102 #define CPUSAVE_DSISR 5 /* where SPR_DSISR gets saved */
103 #define DISISAVE_LEN 4
104 register_t ci_disisave[DISISAVE_LEN];
105 #endif
106 #define CPUSAVE_SRR0 6 /* where SRR0 gets saved */
107 #define CPUSAVE_SRR1 7 /* where SRR1 gets saved */
108 #else
109 #define CPUSAVE_LEN 128
110 register_t ci_savelifo[CPUSAVE_LEN];
111 struct pmap_segtab *ci_pmap_segtabs[2];
112 #define ci_pmap_kern_segtab ci_pmap_segtabs[0]
113 #define ci_pmap_user_segtab ci_pmap_segtabs[1]
114 struct pmap_tlb_info *ci_tlb_info;
115 #endif
116 struct cache_info ci_ci;
117 void *ci_sysmon_cookie;
118 void (*ci_idlespin)(void);
119 uint32_t ci_khz;
120 struct evcnt ci_ev_clock; /* clock intrs */
121 struct evcnt ci_ev_statclock; /* stat clock */
122 #ifndef PPC_BOOKE
123 struct evcnt ci_ev_softclock; /* softclock intrs */
124 struct evcnt ci_ev_softnet; /* softnet intrs */
125 struct evcnt ci_ev_softserial; /* softserial intrs */
126 #endif
127 struct evcnt ci_ev_traps; /* calls to trap() */
128 struct evcnt ci_ev_kdsi; /* kernel DSI traps */
129 struct evcnt ci_ev_udsi; /* user DSI traps */
130 struct evcnt ci_ev_udsi_fatal; /* user DSI trap failures */
131 struct evcnt ci_ev_kisi; /* kernel ISI traps */
132 struct evcnt ci_ev_isi; /* user ISI traps */
133 struct evcnt ci_ev_isi_fatal; /* user ISI trap failures */
134 struct evcnt ci_ev_pgm; /* user PGM traps */
135 struct evcnt ci_ev_fpu; /* FPU traps */
136 struct evcnt ci_ev_fpusw; /* FPU context switch */
137 struct evcnt ci_ev_ali; /* Alignment traps */
138 struct evcnt ci_ev_ali_fatal; /* Alignment fatal trap */
139 struct evcnt ci_ev_scalls; /* system call traps */
140 struct evcnt ci_ev_vec; /* Altivec traps */
141 struct evcnt ci_ev_vecsw; /* Altivec context switches */
142 struct evcnt ci_ev_umchk; /* user MCHK events */
143 struct evcnt ci_ev_ipi; /* IPIs received */
144 struct evcnt ci_ev_tlbmiss_soft; /* tlb miss (no trap) */
145 struct evcnt ci_ev_dtlbmiss_hard; /* data tlb miss (trap) */
146 struct evcnt ci_ev_itlbmiss_hard; /* instruction tlb miss (trap) */
147 };
148
149 #ifdef MULTIPROCESSOR
150
151 struct cpu_hatch_data {
152 struct device *self;
153 struct cpu_info *ci;
154 int running;
155 int pir;
156 int asr;
157 int hid0;
158 int sdr1;
159 int sr[16];
160 int batu[4], batl[4];
161 int tbu, tbl;
162 };
163
164 static __inline int
165 cpu_number(void)
166 {
167 int pir;
168
169 __asm ("mfspr %0,1023" : "=r"(pir));
170 return pir;
171 }
172
173 void cpu_boot_secondary_processors(void);
174
175
176 #define CPU_IS_PRIMARY(ci) ((ci)->ci_cpuid == 0)
177 #define CPU_INFO_ITERATOR int
178 #define CPU_INFO_FOREACH(cii, ci) \
179 cii = 0, ci = &cpu_info[0]; cii < ncpu; cii++, ci++
180
181 #else
182
183 #define cpu_number() 0
184
185 #define CPU_INFO_ITERATOR int
186 #define CPU_INFO_FOREACH(cii, ci) \
187 cii = 0, ci = curcpu(); ci != NULL; ci = NULL
188
189 #endif /* MULTIPROCESSOR */
190
191 extern struct cpu_info cpu_info[];
192
193 static __inline struct cpu_info *
194 curcpu(void)
195 {
196 struct cpu_info *ci;
197
198 __asm volatile ("mfsprg %0,0" : "=r"(ci));
199 return ci;
200 }
201
202 #define curlwp (curcpu()->ci_curlwp)
203 #define curpcb (curcpu()->ci_curpcb)
204 #define curpm (curcpu()->ci_curpm)
205
206 static __inline register_t
207 mfmsr(void)
208 {
209 register_t msr;
210
211 __asm volatile ("mfmsr %0" : "=r"(msr));
212 return msr;
213 }
214
215 static __inline void
216 mtmsr(register_t msr)
217 {
218 //KASSERT(msr & PSL_CE);
219 //KASSERT(msr & PSL_DE);
220 __asm volatile ("mtmsr %0" : : "r"(msr));
221 }
222
223 static __inline uint32_t
224 mftbl(void)
225 {
226 uint32_t tbl;
227
228 __asm volatile (
229 #ifdef PPC_IBM403
230 " mftblo %0 \n"
231 #elif defined(PPC_BOOKE)
232 " mfspr %0,268 \n"
233 #else
234 " mftbl %0 \n"
235 #endif
236 : "=r" (tbl));
237
238 return tbl;
239 }
240
241 static __inline uint64_t
242 mftb(void)
243 {
244 uint64_t tb;
245
246 #ifdef _LP64
247 __asm volatile ("mftb %0" : "=r"(tb));
248 #else
249 int tmp;
250
251 __asm volatile (
252 #ifdef PPC_IBM403
253 "1: mftbhi %0 \n"
254 " mftblo %0+1 \n"
255 " mftbhi %1 \n"
256 #elif defined(PPC_BOOKE)
257 "1: mfspr %0,269 \n"
258 " mfspr %0+1,268 \n"
259 " mfspr %1,269 \n"
260 #else
261 "1: mftbu %0 \n"
262 " mftb %0+1 \n"
263 " mftbu %1 \n"
264 #endif
265 " cmplw %0,%1 \n"
266 " bne- 1b \n"
267 : "=r" (tb), "=r"(tmp) :: "cr0");
268 #endif
269
270 return tb;
271 }
272
273 static __inline uint32_t
274 mfrtcl(void)
275 {
276 uint32_t rtcl;
277
278 __asm volatile ("mfrtcl %0" : "=r"(rtcl));
279 return rtcl;
280 }
281
282 static __inline void
283 mfrtc(uint32_t *rtcp)
284 {
285 uint32_t tmp;
286
287 __asm volatile (
288 "1: mfrtcu %0 \n"
289 " mfrtcl %1 \n"
290 " mfrtcu %2 \n"
291 " cmplw %0,%2 \n"
292 " bne- 1b"
293 : "=r"(*rtcp), "=r"(*(rtcp + 1)), "=r"(tmp) :: "cr0");
294 }
295
296 static __inline uint32_t
297 mfpvr(void)
298 {
299 uint32_t pvr;
300
301 __asm volatile ("mfpvr %0" : "=r"(pvr));
302 return (pvr);
303 }
304
305 static __inline int
306 cntlzw(uint32_t val)
307 {
308 int cnt;
309
310 __asm volatile ("cntlzw %0,%1" : "=r"(cnt) : "r"(val));
311 return (cnt);
312 }
313
314 #if defined(PPC_IBM4XX) || defined(PPC_IBM403)
315 /*
316 * DCR (Device Control Register) access. These have to be
317 * macros because register address is encoded as immediate
318 * operand.
319 */
320 #define mtdcr(reg, val) \
321 __asm volatile("mtdcr %0,%1" : : "K"(reg), "r"(val))
322
323 #define mfdcr(reg) \
324 ({ \
325 uint32_t __val; \
326 \
327 __asm volatile("mfdcr %0,%1" : "=r"(__val) : "K"(reg)); \
328 __val; \
329 })
330 #endif /* PPC_IBM4XX || PPC_IBM403 */
331
332 #define CLKF_USERMODE(frame) (((frame)->cf_srr1 & PSL_PR) != 0)
333 #define CLKF_PC(frame) ((frame)->cf_srr0)
334 #define CLKF_INTR(frame) ((frame)->cf_idepth >= 0)
335 #define LWP_PC(l) (trapframe(l)->tf_srr0)
336
337
338 #define cpu_swapin(p)
339 #define cpu_swapout(p)
340 #define cpu_proc_fork(p1, p2)
341 #define cpu_lwp_free2(l)
342
343 extern int powersave;
344 extern int cpu_timebase;
345 extern int cpu_printfataltraps;
346 extern char cpu_model[];
347
348 void cpu_uarea_remap(struct lwp *);
349 struct cpu_info *cpu_attach_common(struct device *, int);
350 void cpu_setup(struct device *, struct cpu_info *);
351 void cpu_identify(char *, size_t);
352 void delay (unsigned int);
353 void cpu_probe_cache(void);
354 #ifndef PPC_BOOKE
355 void dcache_flush_page(vaddr_t);
356 void icache_flush_page(vaddr_t);
357 void dcache_flush(vaddr_t, vsize_t);
358 void icache_flush(vaddr_t, vsize_t);
359 #else
360 void dcache_wb_page(vaddr_t);
361 void dcache_wbinv_page(vaddr_t);
362 void dcache_inv_page(vaddr_t);
363 void dcache_zero_page(vaddr_t);
364 void icache_inv_page(vaddr_t);
365 void dcache_wb(vaddr_t, vsize_t);
366 void dcache_wbinv(vaddr_t, vsize_t);
367 void dcache_inv(vaddr_t, vsize_t);
368 void icache_inv(vaddr_t, vsize_t);
369 #endif
370 void *mapiodev(paddr_t, psize_t);
371 void unmapiodev(vaddr_t, vsize_t);
372
373 #ifdef MULTIPROCESSOR
374 int md_setup_trampoline(volatile struct cpu_hatch_data *, struct cpu_info *);
375 void md_presync_timebase(volatile struct cpu_hatch_data *);
376 void md_start_timebase(volatile struct cpu_hatch_data *);
377 void md_sync_timebase(volatile struct cpu_hatch_data *);
378 void md_setup_interrupts(void);
379 int cpu_spinup(struct device *, struct cpu_info *);
380 register_t cpu_hatch(void);
381 void cpu_spinup_trampoline(void);
382 #endif
383
384 #define DELAY(n) delay(n)
385
386 #define cpu_need_resched(ci, v) (ci->ci_want_resched = ci->ci_astpending = 1)
387 #define cpu_did_resched(l) ((void)(curcpu()->ci_want_resched = 0))
388 #define cpu_need_proftick(l) ((l)->l_pflag |= LP_OWEUPC, curcpu()->ci_astpending = 1)
389 #define cpu_signotify(l) (curcpu()->ci_astpending = 1) /* XXXSMP */
390
391 #if !defined(PPC_IBM4XX) && !defined(PPC_BOOKE)
392 void oea_init(void (*)(void));
393 void oea_startup(const char *);
394 void oea_dumpsys(void);
395 void oea_install_extint(void (*)(void));
396 paddr_t kvtop(void *);
397 void softnet(int);
398
399 extern paddr_t msgbuf_paddr;
400 extern int cpu_altivec;
401 #endif
402
403 #endif /* _KERNEL */
404
405 /* XXX The below breaks unified pmap on ppc32 */
406
407 #if defined(_KERNEL) || defined(_STANDALONE)
408 #if !defined(CACHELINESIZE)
409 #ifdef PPC_IBM403
410 #define CACHELINESIZE 16
411 #define MAXCACHELINESIZE 16
412 #else
413 #if defined (PPC_OEA64_BRIDGE)
414 #define CACHELINESIZE 128
415 #define MAXCACHELINESIZE 128
416 #else
417 #define CACHELINESIZE 32
418 #define MAXCACHELINESIZE 32
419 #endif /* PPC_OEA64_BRIDGE */
420 #endif
421 #endif
422 #endif
423
424 void __syncicache(void *, size_t);
425
426 /*
427 * CTL_MACHDEP definitions.
428 */
429 #define CPU_CACHELINE 1
430 #define CPU_TIMEBASE 2
431 #define CPU_CPUTEMP 3
432 #define CPU_PRINTFATALTRAPS 4
433 #define CPU_CACHEINFO 5
434 #define CPU_ALTIVEC 6
435 #define CPU_MODEL 7
436 #define CPU_POWERSAVE 8 /* int: use CPU powersave mode */
437 #define CPU_BOOTED_DEVICE 9 /* string: device we booted from */
438 #define CPU_BOOTED_KERNEL 10 /* string: kernel we booted */
439 #define CPU_MAXID 11 /* number of valid machdep ids */
440
441 #endif /* _POWERPC_CPU_H_ */
442