cache_r4k.h revision 1.2.2.4 1 /* $NetBSD: cache_r4k.h,v 1.2.2.4 2002/04/01 07:40:57 nathanw Exp $ */
2
3 /*
4 * Copyright 2001 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Cache definitions/operations for R4000-style caches.
40 */
41
42 #define CACHE_R4K_I 0
43 #define CACHE_R4K_D 1
44 #define CACHE_R4K_SI 2
45 #define CACHE_R4K_SD 3
46
47 #define CACHEOP_R4K_INDEX_INV (0 << 2) /* I, SI */
48 #define CACHEOP_R4K_INDEX_WB_INV (0 << 2) /* D, SD */
49 #define CACHEOP_R4K_INDEX_LOAD_TAG (1 << 2) /* all */
50 #define CACHEOP_R4K_INDEX_STORE_TAG (2 << 2) /* all */
51 #define CACHEOP_R4K_CREATE_DIRTY_EXCL (3 << 2) /* D, SD */
52 #define CACHEOP_R4K_HIT_INV (4 << 2) /* all */
53 #define CACHEOP_R4K_HIT_WB_INV (5 << 2) /* D, SD */
54 #define CACHEOP_R4K_FILL (5 << 2) /* I */
55 #define CACHEOP_R4K_HIT_WB (6 << 2) /* I, D, SD */
56 #define CACHEOP_R4K_HIT_SET_VIRTUAL (7 << 2) /* SI, SD */
57
58 #if defined(_KERNEL) && !defined(_LOCORE)
59
60 /*
61 * cache_r4k_op_line:
62 *
63 * Perform the specified cache operation on a single line.
64 */
65 #define cache_op_r4k_line(va, op) \
66 do { \
67 __asm __volatile( \
68 ".set noreorder \n\t" \
69 "cache %1, 0(%0) \n\t" \
70 ".set reorder" \
71 : \
72 : "r" (va), "i" (op) \
73 : "memory"); \
74 } while (/*CONSTCOND*/0)
75
76 /*
77 * cache_r4k_op_32lines_16:
78 *
79 * Perform the specified cache operation on 32 16-byte
80 * cache lines.
81 */
82 #define cache_r4k_op_32lines_16(va, op) \
83 do { \
84 __asm __volatile( \
85 ".set noreorder \n\t" \
86 "cache %1, 0x000(%0); cache %1, 0x010(%0); \n\t" \
87 "cache %1, 0x020(%0); cache %1, 0x030(%0); \n\t" \
88 "cache %1, 0x040(%0); cache %1, 0x050(%0); \n\t" \
89 "cache %1, 0x060(%0); cache %1, 0x070(%0); \n\t" \
90 "cache %1, 0x080(%0); cache %1, 0x090(%0); \n\t" \
91 "cache %1, 0x0a0(%0); cache %1, 0x0b0(%0); \n\t" \
92 "cache %1, 0x0c0(%0); cache %1, 0x0d0(%0); \n\t" \
93 "cache %1, 0x0e0(%0); cache %1, 0x0f0(%0); \n\t" \
94 "cache %1, 0x100(%0); cache %1, 0x110(%0); \n\t" \
95 "cache %1, 0x120(%0); cache %1, 0x130(%0); \n\t" \
96 "cache %1, 0x140(%0); cache %1, 0x150(%0); \n\t" \
97 "cache %1, 0x160(%0); cache %1, 0x170(%0); \n\t" \
98 "cache %1, 0x180(%0); cache %1, 0x190(%0); \n\t" \
99 "cache %1, 0x1a0(%0); cache %1, 0x1b0(%0); \n\t" \
100 "cache %1, 0x1c0(%0); cache %1, 0x1d0(%0); \n\t" \
101 "cache %1, 0x1e0(%0); cache %1, 0x1f0(%0); \n\t" \
102 ".set reorder" \
103 : \
104 : "r" (va), "i" (op) \
105 : "memory"); \
106 } while (/*CONSTCOND*/0)
107
108 /*
109 * cache_r4k_op_32lines_32:
110 *
111 * Perform the specified cache operation on 32 32-byte
112 * cache lines.
113 */
114 #define cache_r4k_op_32lines_32(va, op) \
115 do { \
116 __asm __volatile( \
117 ".set noreorder \n\t" \
118 "cache %1, 0x000(%0); cache %1, 0x020(%0); \n\t" \
119 "cache %1, 0x040(%0); cache %1, 0x060(%0); \n\t" \
120 "cache %1, 0x080(%0); cache %1, 0x0a0(%0); \n\t" \
121 "cache %1, 0x0c0(%0); cache %1, 0x0e0(%0); \n\t" \
122 "cache %1, 0x100(%0); cache %1, 0x120(%0); \n\t" \
123 "cache %1, 0x140(%0); cache %1, 0x160(%0); \n\t" \
124 "cache %1, 0x180(%0); cache %1, 0x1a0(%0); \n\t" \
125 "cache %1, 0x1c0(%0); cache %1, 0x1e0(%0); \n\t" \
126 "cache %1, 0x200(%0); cache %1, 0x220(%0); \n\t" \
127 "cache %1, 0x240(%0); cache %1, 0x260(%0); \n\t" \
128 "cache %1, 0x280(%0); cache %1, 0x2a0(%0); \n\t" \
129 "cache %1, 0x2c0(%0); cache %1, 0x2e0(%0); \n\t" \
130 "cache %1, 0x300(%0); cache %1, 0x320(%0); \n\t" \
131 "cache %1, 0x340(%0); cache %1, 0x360(%0); \n\t" \
132 "cache %1, 0x380(%0); cache %1, 0x3a0(%0); \n\t" \
133 "cache %1, 0x3c0(%0); cache %1, 0x3e0(%0); \n\t" \
134 ".set reorder" \
135 : \
136 : "r" (va), "i" (op) \
137 : "memory"); \
138 } while (/*CONSTCOND*/0)
139
140 /*
141 * cache_r4k_op_32lines_128:
142 *
143 * Perform the specified cache operation on 32 128-byte
144 * cache lines.
145 */
146 #define cache_r4k_op_32lines_128(va, op) \
147 do { \
148 __asm __volatile( \
149 ".set noreorder \n\t" \
150 "cache %1, 0x0000(%0); cache %1, 0x0080(%0); \n\t" \
151 "cache %1, 0x0100(%0); cache %1, 0x0180(%0); \n\t" \
152 "cache %1, 0x0200(%0); cache %1, 0x0280(%0); \n\t" \
153 "cache %1, 0x0300(%0); cache %1, 0x0380(%0); \n\t" \
154 "cache %1, 0x0400(%0); cache %1, 0x0480(%0); \n\t" \
155 "cache %1, 0x0500(%0); cache %1, 0x0580(%0); \n\t" \
156 "cache %1, 0x0600(%0); cache %1, 0x0680(%0); \n\t" \
157 "cache %1, 0x0700(%0); cache %1, 0x0780(%0); \n\t" \
158 "cache %1, 0x0800(%0); cache %1, 0x0880(%0); \n\t" \
159 "cache %1, 0x0900(%0); cache %1, 0x0980(%0); \n\t" \
160 "cache %1, 0x0a00(%0); cache %1, 0x0a80(%0); \n\t" \
161 "cache %1, 0x0b00(%0); cache %1, 0x0b80(%0); \n\t" \
162 "cache %1, 0x0c00(%0); cache %1, 0x0c80(%0); \n\t" \
163 "cache %1, 0x0d00(%0); cache %1, 0x0d80(%0); \n\t" \
164 "cache %1, 0x0e00(%0); cache %1, 0x0e80(%0); \n\t" \
165 "cache %1, 0x0f00(%0); cache %1, 0x0f80(%0); \n\t" \
166 ".set reorder" \
167 : \
168 : "r" (va), "i" (op) \
169 : "memory"); \
170 } while (/*CONSTCOND*/0)
171
172 /*
173 * cache_r4k_op_16lines_16_2way:
174 *
175 * Perform the specified cache operation on 16 16-byte
176 * cache lines, 2-ways.
177 */
178 #define cache_r4k_op_16lines_16_2way(va1, va2, op) \
179 do { \
180 __asm __volatile( \
181 ".set noreorder \n\t" \
182 "cache %2, 0x000(%0); cache %2, 0x000(%1); \n\t" \
183 "cache %2, 0x010(%0); cache %2, 0x010(%1); \n\t" \
184 "cache %2, 0x020(%0); cache %2, 0x020(%1); \n\t" \
185 "cache %2, 0x030(%0); cache %2, 0x030(%1); \n\t" \
186 "cache %2, 0x040(%0); cache %2, 0x040(%1); \n\t" \
187 "cache %2, 0x050(%0); cache %2, 0x050(%1); \n\t" \
188 "cache %2, 0x060(%0); cache %2, 0x060(%1); \n\t" \
189 "cache %2, 0x070(%0); cache %2, 0x070(%1); \n\t" \
190 "cache %2, 0x080(%0); cache %2, 0x080(%1); \n\t" \
191 "cache %2, 0x090(%0); cache %2, 0x090(%1); \n\t" \
192 "cache %2, 0x0a0(%0); cache %2, 0x0a0(%1); \n\t" \
193 "cache %2, 0x0b0(%0); cache %2, 0x0b0(%1); \n\t" \
194 "cache %2, 0x0c0(%0); cache %2, 0x0c0(%1); \n\t" \
195 "cache %2, 0x0d0(%0); cache %2, 0x0d0(%1); \n\t" \
196 "cache %2, 0x0e0(%0); cache %2, 0x0e0(%1); \n\t" \
197 "cache %2, 0x0f0(%0); cache %2, 0x0f0(%1); \n\t" \
198 ".set reorder" \
199 : \
200 : "r" (va1), "r" (va2), "i" (op) \
201 : "memory"); \
202 } while (/*CONSTCOND*/0)
203
204 /*
205 * cache_r4k_op_16lines_32_2way:
206 *
207 * Perform the specified cache operation on 16 32-byte
208 * cache lines, 2-ways.
209 */
210 #define cache_r4k_op_16lines_32_2way(va1, va2, op) \
211 do { \
212 __asm __volatile( \
213 ".set noreorder \n\t" \
214 "cache %2, 0x000(%0); cache %2, 0x000(%1); \n\t" \
215 "cache %2, 0x020(%0); cache %2, 0x020(%1); \n\t" \
216 "cache %2, 0x040(%0); cache %2, 0x040(%1); \n\t" \
217 "cache %2, 0x060(%0); cache %2, 0x060(%1); \n\t" \
218 "cache %2, 0x080(%0); cache %2, 0x080(%1); \n\t" \
219 "cache %2, 0x0a0(%0); cache %2, 0x0a0(%1); \n\t" \
220 "cache %2, 0x0c0(%0); cache %2, 0x0c0(%1); \n\t" \
221 "cache %2, 0x0e0(%0); cache %2, 0x0e0(%1); \n\t" \
222 "cache %2, 0x100(%0); cache %2, 0x100(%1); \n\t" \
223 "cache %2, 0x120(%0); cache %2, 0x120(%1); \n\t" \
224 "cache %2, 0x140(%0); cache %2, 0x140(%1); \n\t" \
225 "cache %2, 0x160(%0); cache %2, 0x160(%1); \n\t" \
226 "cache %2, 0x180(%0); cache %2, 0x180(%1); \n\t" \
227 "cache %2, 0x1a0(%0); cache %2, 0x1a0(%1); \n\t" \
228 "cache %2, 0x1c0(%0); cache %2, 0x1c0(%1); \n\t" \
229 "cache %2, 0x1e0(%0); cache %2, 0x1e0(%1); \n\t" \
230 ".set reorder" \
231 : \
232 : "r" (va1), "r" (va2), "i" (op) \
233 : "memory"); \
234 } while (/*CONSTCOND*/0)
235
236 /*
237 * cache_r4k_op_8lines_16_4way:
238 *
239 * Perform the specified cache operation on 8 16-byte
240 * cache lines, 4-ways.
241 */
242 #define cache_r4k_op_8lines_16_4way(va1, va2, va3, va4, op) \
243 do { \
244 __asm __volatile( \
245 ".set noreorder \n\t" \
246 "cache %4, 0x000(%0); cache %4, 0x000(%1); \n\t" \
247 "cache %4, 0x000(%2); cache %4, 0x000(%3); \n\t" \
248 "cache %4, 0x010(%0); cache %4, 0x010(%1); \n\t" \
249 "cache %4, 0x010(%2); cache %4, 0x010(%3); \n\t" \
250 "cache %4, 0x020(%0); cache %4, 0x020(%1); \n\t" \
251 "cache %4, 0x020(%2); cache %4, 0x020(%3); \n\t" \
252 "cache %4, 0x030(%0); cache %4, 0x030(%1); \n\t" \
253 "cache %4, 0x030(%2); cache %4, 0x030(%3); \n\t" \
254 "cache %4, 0x040(%0); cache %4, 0x040(%1); \n\t" \
255 "cache %4, 0x040(%2); cache %4, 0x040(%3); \n\t" \
256 "cache %4, 0x050(%0); cache %4, 0x050(%1); \n\t" \
257 "cache %4, 0x050(%2); cache %4, 0x050(%3); \n\t" \
258 "cache %4, 0x060(%0); cache %4, 0x060(%1); \n\t" \
259 "cache %4, 0x060(%2); cache %4, 0x060(%3); \n\t" \
260 "cache %4, 0x070(%0); cache %4, 0x070(%1); \n\t" \
261 "cache %4, 0x070(%2); cache %4, 0x070(%3); \n\t" \
262 ".set reorder" \
263 : \
264 : "r" (va1), "r" (va2), "r" (va3), "r" (va4), "i" (op) \
265 : "memory"); \
266 } while (/*CONSTCOND*/0)
267
268 /*
269 * cache_r4k_op_8lines_32_4way:
270 *
271 * Perform the specified cache operation on 8 32-byte
272 * cache lines, 4-ways.
273 */
274 #define cache_r4k_op_8lines_32_4way(va1, va2, va3, va4, op) \
275 do { \
276 __asm __volatile( \
277 ".set noreorder \n\t" \
278 "cache %4, 0x000(%0); cache %4, 0x000(%1); \n\t" \
279 "cache %4, 0x000(%2); cache %4, 0x000(%3); \n\t" \
280 "cache %4, 0x020(%0); cache %4, 0x020(%1); \n\t" \
281 "cache %4, 0x020(%2); cache %4, 0x020(%3); \n\t" \
282 "cache %4, 0x040(%0); cache %4, 0x040(%1); \n\t" \
283 "cache %4, 0x040(%2); cache %4, 0x040(%3); \n\t" \
284 "cache %4, 0x060(%0); cache %4, 0x060(%1); \n\t" \
285 "cache %4, 0x060(%2); cache %4, 0x060(%3); \n\t" \
286 "cache %4, 0x080(%0); cache %4, 0x080(%1); \n\t" \
287 "cache %4, 0x080(%2); cache %4, 0x080(%3); \n\t" \
288 "cache %4, 0x0a0(%0); cache %4, 0x0a0(%1); \n\t" \
289 "cache %4, 0x0a0(%2); cache %4, 0x0a0(%3); \n\t" \
290 "cache %4, 0x0c0(%0); cache %4, 0x0c0(%1); \n\t" \
291 "cache %4, 0x0c0(%2); cache %4, 0x0c0(%3); \n\t" \
292 "cache %4, 0x0e0(%0); cache %4, 0x0e0(%1); \n\t" \
293 "cache %4, 0x0e0(%2); cache %4, 0x0e0(%3); \n\t" \
294 ".set reorder" \
295 : \
296 : "r" (va1), "r" (va2), "r" (va3), "r" (va4), "i" (op) \
297 : "memory"); \
298 } while (/*CONSTCOND*/0)
299
300 void r4k_icache_sync_all_16(void);
301 void r4k_icache_sync_range_16(vaddr_t, vsize_t);
302 void r4k_icache_sync_range_index_16(vaddr_t, vsize_t);
303
304 void r4k_icache_sync_all_32(void);
305 void r4k_icache_sync_range_32(vaddr_t, vsize_t);
306 void r4k_icache_sync_range_index_32(vaddr_t, vsize_t);
307
308 void r4k_pdcache_wbinv_all_16(void);
309 void r4k_pdcache_wbinv_range_16(vaddr_t, vsize_t);
310 void r4k_pdcache_wbinv_range_index_16(vaddr_t, vsize_t);
311
312 void r4k_pdcache_inv_range_16(vaddr_t, vsize_t);
313 void r4k_pdcache_wb_range_16(vaddr_t, vsize_t);
314
315 void r4k_pdcache_wbinv_all_32(void);
316 void r4k_pdcache_wbinv_range_32(vaddr_t, vsize_t);
317 void r4k_pdcache_wbinv_range_index_32(vaddr_t, vsize_t);
318
319 void r4k_pdcache_inv_range_32(vaddr_t, vsize_t);
320 void r4k_pdcache_wb_range_32(vaddr_t, vsize_t);
321
322 void r5k_icache_sync_all_32(void);
323 void r5k_icache_sync_range_32(vaddr_t, vsize_t);
324 void r5k_icache_sync_range_index_32(vaddr_t, vsize_t);
325
326 void r5k_pdcache_wbinv_all_16(void);
327 void r5k_pdcache_wbinv_all_32(void);
328 void r4600v1_pdcache_wbinv_range_32(vaddr_t, vsize_t);
329 void r4600v2_pdcache_wbinv_range_32(vaddr_t, vsize_t);
330 void vr4131v1_pdcache_wbinv_range_16(vaddr_t, vsize_t);
331 void r5k_pdcache_wbinv_range_16(vaddr_t, vsize_t);
332 void r5k_pdcache_wbinv_range_32(vaddr_t, vsize_t);
333 void r5k_pdcache_wbinv_range_index_16(vaddr_t, vsize_t);
334 void r5k_pdcache_wbinv_range_index_32(vaddr_t, vsize_t);
335
336 void r4600v1_pdcache_inv_range_32(vaddr_t, vsize_t);
337 void r4600v2_pdcache_inv_range_32(vaddr_t, vsize_t);
338 void r5k_pdcache_inv_range_16(vaddr_t, vsize_t);
339 void r5k_pdcache_inv_range_32(vaddr_t, vsize_t);
340 void r4600v1_pdcache_wb_range_32(vaddr_t, vsize_t);
341 void r4600v2_pdcache_wb_range_32(vaddr_t, vsize_t);
342 void r5k_pdcache_wb_range_16(vaddr_t, vsize_t);
343 void r5k_pdcache_wb_range_32(vaddr_t, vsize_t);
344
345 void r4k_sdcache_wbinv_all_32(void);
346 void r4k_sdcache_wbinv_range_32(vaddr_t, vsize_t);
347 void r4k_sdcache_wbinv_range_index_32(vaddr_t, vsize_t);
348
349 void r4k_sdcache_inv_range_32(vaddr_t, vsize_t);
350 void r4k_sdcache_wb_range_32(vaddr_t, vsize_t);
351
352 void r4k_sdcache_wbinv_all_128(void);
353 void r4k_sdcache_wbinv_range_128(vaddr_t, vsize_t);
354 void r4k_sdcache_wbinv_range_index_128(vaddr_t, vsize_t);
355
356 void r4k_sdcache_inv_range_128(vaddr_t, vsize_t);
357 void r4k_sdcache_wb_range_128(vaddr_t, vsize_t);
358
359 void r4k_sdcache_wbinv_all_generic(void);
360 void r4k_sdcache_wbinv_range_generic(vaddr_t, vsize_t);
361 void r4k_sdcache_wbinv_range_index_generic(vaddr_t, vsize_t);
362
363 void r4k_sdcache_inv_range_generic(vaddr_t, vsize_t);
364 void r4k_sdcache_wb_range_generic(vaddr_t, vsize_t);
365
366 #endif /* _KERNEL && !_LOCORE */
367