cpufunc.h revision 1.37 1 /* $NetBSD: cpufunc.h,v 1.37 2005/12/28 19:09:29 perry Exp $ */
2
3 /*
4 * Copyright (c) 1997 Mark Brinicombe.
5 * Copyright (c) 1997 Causality Limited
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Causality Limited.
19 * 4. The name of Causality Limited may not be used to endorse or promote
20 * products derived from this software without specific prior written
21 * permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
24 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
27 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
29 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * RiscBSD kernel project
36 *
37 * cpufunc.h
38 *
39 * Prototypes for cpu, mmu and tlb related functions.
40 */
41
42 #ifndef _ARM32_CPUFUNC_H_
43 #define _ARM32_CPUFUNC_H_
44
45 #ifdef _KERNEL
46
47 #include <sys/types.h>
48 #include <arm/cpuconf.h>
49
50 struct cpu_functions {
51
52 /* CPU functions */
53
54 u_int (*cf_id) __P((void));
55 void (*cf_cpwait) __P((void));
56
57 /* MMU functions */
58
59 u_int (*cf_control) __P((u_int, u_int));
60 void (*cf_domains) __P((u_int));
61 void (*cf_setttb) __P((u_int));
62 u_int (*cf_faultstatus) __P((void));
63 u_int (*cf_faultaddress) __P((void));
64
65 /* TLB functions */
66
67 void (*cf_tlb_flushID) __P((void));
68 void (*cf_tlb_flushID_SE) __P((u_int));
69 void (*cf_tlb_flushI) __P((void));
70 void (*cf_tlb_flushI_SE) __P((u_int));
71 void (*cf_tlb_flushD) __P((void));
72 void (*cf_tlb_flushD_SE) __P((u_int));
73
74 /*
75 * Cache operations:
76 *
77 * We define the following primitives:
78 *
79 * icache_sync_all Synchronize I-cache
80 * icache_sync_range Synchronize I-cache range
81 *
82 * dcache_wbinv_all Write-back and Invalidate D-cache
83 * dcache_wbinv_range Write-back and Invalidate D-cache range
84 * dcache_inv_range Invalidate D-cache range
85 * dcache_wb_range Write-back D-cache range
86 *
87 * idcache_wbinv_all Write-back and Invalidate D-cache,
88 * Invalidate I-cache
89 * idcache_wbinv_range Write-back and Invalidate D-cache,
90 * Invalidate I-cache range
91 *
92 * Note that the ARM term for "write-back" is "clean". We use
93 * the term "write-back" since it's a more common way to describe
94 * the operation.
95 *
96 * There are some rules that must be followed:
97 *
98 * I-cache Synch (all or range):
99 * The goal is to synchronize the instruction stream,
100 * so you may beed to write-back dirty D-cache blocks
101 * first. If a range is requested, and you can't
102 * synchronize just a range, you have to hit the whole
103 * thing.
104 *
105 * D-cache Write-Back and Invalidate range:
106 * If you can't WB-Inv a range, you must WB-Inv the
107 * entire D-cache.
108 *
109 * D-cache Invalidate:
110 * If you can't Inv the D-cache, you must Write-Back
111 * and Invalidate. Code that uses this operation
112 * MUST NOT assume that the D-cache will not be written
113 * back to memory.
114 *
115 * D-cache Write-Back:
116 * If you can't Write-back without doing an Inv,
117 * that's fine. Then treat this as a WB-Inv.
118 * Skipping the invalidate is merely an optimization.
119 *
120 * All operations:
121 * Valid virtual addresses must be passed to each
122 * cache operation.
123 */
124 void (*cf_icache_sync_all) __P((void));
125 void (*cf_icache_sync_range) __P((vaddr_t, vsize_t));
126
127 void (*cf_dcache_wbinv_all) __P((void));
128 void (*cf_dcache_wbinv_range) __P((vaddr_t, vsize_t));
129 void (*cf_dcache_inv_range) __P((vaddr_t, vsize_t));
130 void (*cf_dcache_wb_range) __P((vaddr_t, vsize_t));
131
132 void (*cf_idcache_wbinv_all) __P((void));
133 void (*cf_idcache_wbinv_range) __P((vaddr_t, vsize_t));
134
135 /* Other functions */
136
137 void (*cf_flush_prefetchbuf) __P((void));
138 void (*cf_drain_writebuf) __P((void));
139 void (*cf_flush_brnchtgt_C) __P((void));
140 void (*cf_flush_brnchtgt_E) __P((u_int));
141
142 void (*cf_sleep) __P((int mode));
143
144 /* Soft functions */
145
146 int (*cf_dataabt_fixup) __P((void *));
147 int (*cf_prefetchabt_fixup) __P((void *));
148
149 void (*cf_context_switch) __P((void));
150
151 void (*cf_setup) __P((char *));
152 };
153
154 extern struct cpu_functions cpufuncs;
155 extern u_int cputype;
156
157 #define cpu_id() cpufuncs.cf_id()
158 #define cpu_cpwait() cpufuncs.cf_cpwait()
159
160 #define cpu_control(c, e) cpufuncs.cf_control(c, e)
161 #define cpu_domains(d) cpufuncs.cf_domains(d)
162 #define cpu_setttb(t) cpufuncs.cf_setttb(t)
163 #define cpu_faultstatus() cpufuncs.cf_faultstatus()
164 #define cpu_faultaddress() cpufuncs.cf_faultaddress()
165
166 #define cpu_tlb_flushID() cpufuncs.cf_tlb_flushID()
167 #define cpu_tlb_flushID_SE(e) cpufuncs.cf_tlb_flushID_SE(e)
168 #define cpu_tlb_flushI() cpufuncs.cf_tlb_flushI()
169 #define cpu_tlb_flushI_SE(e) cpufuncs.cf_tlb_flushI_SE(e)
170 #define cpu_tlb_flushD() cpufuncs.cf_tlb_flushD()
171 #define cpu_tlb_flushD_SE(e) cpufuncs.cf_tlb_flushD_SE(e)
172
173 #define cpu_icache_sync_all() cpufuncs.cf_icache_sync_all()
174 #define cpu_icache_sync_range(a, s) cpufuncs.cf_icache_sync_range((a), (s))
175
176 #define cpu_dcache_wbinv_all() cpufuncs.cf_dcache_wbinv_all()
177 #define cpu_dcache_wbinv_range(a, s) cpufuncs.cf_dcache_wbinv_range((a), (s))
178 #define cpu_dcache_inv_range(a, s) cpufuncs.cf_dcache_inv_range((a), (s))
179 #define cpu_dcache_wb_range(a, s) cpufuncs.cf_dcache_wb_range((a), (s))
180
181 #define cpu_idcache_wbinv_all() cpufuncs.cf_idcache_wbinv_all()
182 #define cpu_idcache_wbinv_range(a, s) cpufuncs.cf_idcache_wbinv_range((a), (s))
183
184 #define cpu_flush_prefetchbuf() cpufuncs.cf_flush_prefetchbuf()
185 #define cpu_drain_writebuf() cpufuncs.cf_drain_writebuf()
186 #define cpu_flush_brnchtgt_C() cpufuncs.cf_flush_brnchtgt_C()
187 #define cpu_flush_brnchtgt_E(e) cpufuncs.cf_flush_brnchtgt_E(e)
188
189 #define cpu_sleep(m) cpufuncs.cf_sleep(m)
190
191 #define cpu_dataabt_fixup(a) cpufuncs.cf_dataabt_fixup(a)
192 #define cpu_prefetchabt_fixup(a) cpufuncs.cf_prefetchabt_fixup(a)
193 #define ABORT_FIXUP_OK 0 /* fixup succeeded */
194 #define ABORT_FIXUP_FAILED 1 /* fixup failed */
195 #define ABORT_FIXUP_RETURN 2 /* abort handler should return */
196
197 #define cpu_setup(a) cpufuncs.cf_setup(a)
198
199 int set_cpufuncs __P((void));
200 #define ARCHITECTURE_NOT_PRESENT 1 /* known but not configured */
201 #define ARCHITECTURE_NOT_SUPPORTED 2 /* not known */
202
203 void cpufunc_nullop __P((void));
204 int cpufunc_null_fixup __P((void *));
205 int early_abort_fixup __P((void *));
206 int late_abort_fixup __P((void *));
207 u_int cpufunc_id __P((void));
208 u_int cpufunc_control __P((u_int, u_int));
209 void cpufunc_domains __P((u_int));
210 u_int cpufunc_faultstatus __P((void));
211 u_int cpufunc_faultaddress __P((void));
212
213 #ifdef CPU_ARM3
214 u_int arm3_control __P((u_int, u_int));
215 void arm3_cache_flush __P((void));
216 #endif /* CPU_ARM3 */
217
218 #if defined(CPU_ARM6) || defined(CPU_ARM7)
219 void arm67_setttb __P((u_int));
220 void arm67_tlb_flush __P((void));
221 void arm67_tlb_purge __P((u_int));
222 void arm67_cache_flush __P((void));
223 void arm67_context_switch __P((void));
224 #endif /* CPU_ARM6 || CPU_ARM7 */
225
226 #ifdef CPU_ARM6
227 void arm6_setup __P((char *));
228 #endif /* CPU_ARM6 */
229
230 #ifdef CPU_ARM7
231 void arm7_setup __P((char *));
232 #endif /* CPU_ARM7 */
233
234 #ifdef CPU_ARM7TDMI
235 int arm7_dataabt_fixup __P((void *));
236 void arm7tdmi_setup __P((char *));
237 void arm7tdmi_setttb __P((u_int));
238 void arm7tdmi_tlb_flushID __P((void));
239 void arm7tdmi_tlb_flushID_SE __P((u_int));
240 void arm7tdmi_cache_flushID __P((void));
241 void arm7tdmi_context_switch __P((void));
242 #endif /* CPU_ARM7TDMI */
243
244 #ifdef CPU_ARM8
245 void arm8_setttb __P((u_int));
246 void arm8_tlb_flushID __P((void));
247 void arm8_tlb_flushID_SE __P((u_int));
248 void arm8_cache_flushID __P((void));
249 void arm8_cache_flushID_E __P((u_int));
250 void arm8_cache_cleanID __P((void));
251 void arm8_cache_cleanID_E __P((u_int));
252 void arm8_cache_purgeID __P((void));
253 void arm8_cache_purgeID_E __P((u_int entry));
254
255 void arm8_cache_syncI __P((void));
256 void arm8_cache_cleanID_rng __P((vaddr_t, vsize_t));
257 void arm8_cache_cleanD_rng __P((vaddr_t, vsize_t));
258 void arm8_cache_purgeID_rng __P((vaddr_t, vsize_t));
259 void arm8_cache_purgeD_rng __P((vaddr_t, vsize_t));
260 void arm8_cache_syncI_rng __P((vaddr_t, vsize_t));
261
262 void arm8_context_switch __P((void));
263
264 void arm8_setup __P((char *));
265
266 u_int arm8_clock_config __P((u_int, u_int));
267 #endif
268
269 #ifdef CPU_SA110
270 void sa110_setup __P((char *));
271 void sa110_context_switch __P((void));
272 #endif /* CPU_SA110 */
273
274 #if defined(CPU_SA1100) || defined(CPU_SA1110)
275 void sa11x0_drain_readbuf __P((void));
276
277 void sa11x0_context_switch __P((void));
278 void sa11x0_cpu_sleep __P((int));
279
280 void sa11x0_setup __P((char *));
281 #endif
282
283 #if defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110)
284 void sa1_setttb __P((u_int));
285
286 void sa1_tlb_flushID_SE __P((u_int));
287
288 void sa1_cache_flushID __P((void));
289 void sa1_cache_flushI __P((void));
290 void sa1_cache_flushD __P((void));
291 void sa1_cache_flushD_SE __P((u_int));
292
293 void sa1_cache_cleanID __P((void));
294 void sa1_cache_cleanD __P((void));
295 void sa1_cache_cleanD_E __P((u_int));
296
297 void sa1_cache_purgeID __P((void));
298 void sa1_cache_purgeID_E __P((u_int));
299 void sa1_cache_purgeD __P((void));
300 void sa1_cache_purgeD_E __P((u_int));
301
302 void sa1_cache_syncI __P((void));
303 void sa1_cache_cleanID_rng __P((vaddr_t, vsize_t));
304 void sa1_cache_cleanD_rng __P((vaddr_t, vsize_t));
305 void sa1_cache_purgeID_rng __P((vaddr_t, vsize_t));
306 void sa1_cache_purgeD_rng __P((vaddr_t, vsize_t));
307 void sa1_cache_syncI_rng __P((vaddr_t, vsize_t));
308
309 #endif
310
311 #ifdef CPU_ARM9
312 void arm9_setttb __P((u_int));
313
314 void arm9_tlb_flushID_SE __P((u_int));
315
316 void arm9_icache_sync_all __P((void));
317 void arm9_icache_sync_range __P((vaddr_t, vsize_t));
318
319 void arm9_dcache_wbinv_all __P((void));
320 void arm9_dcache_wbinv_range __P((vaddr_t, vsize_t));
321 void arm9_dcache_inv_range __P((vaddr_t, vsize_t));
322 void arm9_dcache_wb_range __P((vaddr_t, vsize_t));
323
324 void arm9_idcache_wbinv_all __P((void));
325 void arm9_idcache_wbinv_range __P((vaddr_t, vsize_t));
326
327 void arm9_context_switch __P((void));
328
329 void arm9_setup __P((char *));
330
331 extern unsigned arm9_dcache_sets_max;
332 extern unsigned arm9_dcache_sets_inc;
333 extern unsigned arm9_dcache_index_max;
334 extern unsigned arm9_dcache_index_inc;
335 #endif
336
337 #ifdef CPU_ARM10
338 void arm10_setttb __P((u_int));
339
340 void arm10_tlb_flushID_SE __P((u_int));
341 void arm10_tlb_flushI_SE __P((u_int));
342
343 void arm10_context_switch __P((void));
344
345 void arm10_setup __P((char *));
346 #endif
347
348 #ifdef CPU_ARM11
349 void arm11_setttb __P((u_int));
350
351 void arm11_tlb_flushID_SE __P((u_int));
352 void arm11_tlb_flushI_SE __P((u_int));
353
354 void arm11_context_switch __P((void));
355
356 void arm11_setup __P((char *string));
357 void arm11_tlb_flushID __P((void));
358 void arm11_tlb_flushI __P((void));
359 void arm11_tlb_flushD __P((void));
360 void arm11_tlb_flushD_SE __P((u_int va));
361
362 void arm11_drain_writebuf __P((void));
363 #endif
364
365 #if defined (CPU_ARM10) || defined (CPU_ARM11)
366 void armv5_icache_sync_all __P((void));
367 void armv5_icache_sync_range __P((vaddr_t, vsize_t));
368
369 void armv5_dcache_wbinv_all __P((void));
370 void armv5_dcache_wbinv_range __P((vaddr_t, vsize_t));
371 void armv5_dcache_inv_range __P((vaddr_t, vsize_t));
372 void armv5_dcache_wb_range __P((vaddr_t, vsize_t));
373
374 void armv5_idcache_wbinv_all __P((void));
375 void armv5_idcache_wbinv_range __P((vaddr_t, vsize_t));
376
377 extern unsigned armv5_dcache_sets_max;
378 extern unsigned armv5_dcache_sets_inc;
379 extern unsigned armv5_dcache_index_max;
380 extern unsigned armv5_dcache_index_inc;
381 #endif
382
383 #if defined(CPU_ARM9) || defined(CPU_ARM10) || defined(CPU_SA110) || \
384 defined(CPU_SA1100) || defined(CPU_SA1110) || \
385 defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
386 defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425)
387
388 void armv4_tlb_flushID __P((void));
389 void armv4_tlb_flushI __P((void));
390 void armv4_tlb_flushD __P((void));
391 void armv4_tlb_flushD_SE __P((u_int));
392
393 void armv4_drain_writebuf __P((void));
394 #endif
395
396 #if defined(CPU_IXP12X0)
397 void ixp12x0_drain_readbuf __P((void));
398 void ixp12x0_context_switch __P((void));
399 void ixp12x0_setup __P((char *));
400 #endif
401
402 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
403 defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425)
404 void xscale_cpwait __P((void));
405
406 void xscale_cpu_sleep __P((int));
407
408 u_int xscale_control __P((u_int, u_int));
409
410 void xscale_setttb __P((u_int));
411
412 void xscale_tlb_flushID_SE __P((u_int));
413
414 void xscale_cache_flushID __P((void));
415 void xscale_cache_flushI __P((void));
416 void xscale_cache_flushD __P((void));
417 void xscale_cache_flushD_SE __P((u_int));
418
419 void xscale_cache_cleanID __P((void));
420 void xscale_cache_cleanD __P((void));
421 void xscale_cache_cleanD_E __P((u_int));
422
423 void xscale_cache_clean_minidata __P((void));
424
425 void xscale_cache_purgeID __P((void));
426 void xscale_cache_purgeID_E __P((u_int));
427 void xscale_cache_purgeD __P((void));
428 void xscale_cache_purgeD_E __P((u_int));
429
430 void xscale_cache_syncI __P((void));
431 void xscale_cache_cleanID_rng __P((vaddr_t, vsize_t));
432 void xscale_cache_cleanD_rng __P((vaddr_t, vsize_t));
433 void xscale_cache_purgeID_rng __P((vaddr_t, vsize_t));
434 void xscale_cache_purgeD_rng __P((vaddr_t, vsize_t));
435 void xscale_cache_syncI_rng __P((vaddr_t, vsize_t));
436 void xscale_cache_flushD_rng __P((vaddr_t, vsize_t));
437
438 void xscale_context_switch __P((void));
439
440 void xscale_setup __P((char *));
441 #endif /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || __CPU_XSCALE_PXA2XX || CPU_XSCALE_IXP425 */
442
443 #define tlb_flush cpu_tlb_flushID
444 #define setttb cpu_setttb
445 #define drain_writebuf cpu_drain_writebuf
446
447 /*
448 * Macros for manipulating CPU interrupts
449 */
450 #ifdef __PROG32
451 static __inline u_int32_t __set_cpsr_c(u_int bic, u_int eor) __attribute__((__unused__));
452
453 static __inline u_int32_t
454 __set_cpsr_c(u_int bic, u_int eor)
455 {
456 u_int32_t tmp, ret;
457
458 __asm volatile(
459 "mrs %0, cpsr\n" /* Get the CPSR */
460 "bic %1, %0, %2\n" /* Clear bits */
461 "eor %1, %1, %3\n" /* XOR bits */
462 "msr cpsr_c, %1\n" /* Set the control field of CPSR */
463 : "=&r" (ret), "=&r" (tmp)
464 : "r" (bic), "r" (eor) : "memory");
465
466 return ret;
467 }
468
469 #define disable_interrupts(mask) \
470 (__set_cpsr_c((mask) & (I32_bit | F32_bit), \
471 (mask) & (I32_bit | F32_bit)))
472
473 #define enable_interrupts(mask) \
474 (__set_cpsr_c((mask) & (I32_bit | F32_bit), 0))
475
476 #define restore_interrupts(old_cpsr) \
477 (__set_cpsr_c((I32_bit | F32_bit), (old_cpsr) & (I32_bit | F32_bit)))
478 #else /* ! __PROG32 */
479 #define disable_interrupts(mask) \
480 (set_r15((mask) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE), \
481 (mask) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE)))
482
483 #define enable_interrupts(mask) \
484 (set_r15((mask) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE), 0))
485
486 #define restore_interrupts(old_r15) \
487 (set_r15((R15_IRQ_DISABLE | R15_FIQ_DISABLE), \
488 (old_r15) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE)))
489 #endif /* __PROG32 */
490
491 #ifdef __PROG32
492 /* Functions to manipulate the CPSR. */
493 u_int SetCPSR(u_int, u_int);
494 u_int GetCPSR(void);
495 #else
496 /* Functions to manipulate the processor control bits in r15. */
497 u_int set_r15(u_int, u_int);
498 u_int get_r15(void);
499 #endif /* __PROG32 */
500
501 /*
502 * Functions to manipulate cpu r13
503 * (in arm/arm32/setstack.S)
504 */
505
506 void set_stackptr __P((u_int, u_int));
507 u_int get_stackptr __P((u_int));
508
509 /*
510 * Miscellany
511 */
512
513 int get_pc_str_offset __P((void));
514
515 /*
516 * CPU functions from locore.S
517 */
518
519 void cpu_reset __P((void)) __attribute__((__noreturn__));
520
521 /*
522 * Cache info variables.
523 */
524
525 /* PRIMARY CACHE VARIABLES */
526 extern int arm_picache_size;
527 extern int arm_picache_line_size;
528 extern int arm_picache_ways;
529
530 extern int arm_pdcache_size; /* and unified */
531 extern int arm_pdcache_line_size;
532 extern int arm_pdcache_ways;
533
534 extern int arm_pcache_type;
535 extern int arm_pcache_unified;
536
537 extern int arm_dcache_align;
538 extern int arm_dcache_align_mask;
539
540 #endif /* _KERNEL */
541 #endif /* _ARM32_CPUFUNC_H_ */
542
543 /* End of cpufunc.h */
544