cache_r4k.h revision 1.14 1 1.12 matt /* cache_r4k.h,v 1.11.96.3 2012/01/19 08:28:48 matt Exp */
2 1.2 thorpej
3 1.2 thorpej /*
4 1.2 thorpej * Copyright 2001 Wasabi Systems, Inc.
5 1.2 thorpej * All rights reserved.
6 1.2 thorpej *
7 1.2 thorpej * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 1.2 thorpej *
9 1.2 thorpej * Redistribution and use in source and binary forms, with or without
10 1.2 thorpej * modification, are permitted provided that the following conditions
11 1.2 thorpej * are met:
12 1.2 thorpej * 1. Redistributions of source code must retain the above copyright
13 1.2 thorpej * notice, this list of conditions and the following disclaimer.
14 1.2 thorpej * 2. Redistributions in binary form must reproduce the above copyright
15 1.2 thorpej * notice, this list of conditions and the following disclaimer in the
16 1.2 thorpej * documentation and/or other materials provided with the distribution.
17 1.2 thorpej * 3. All advertising materials mentioning features or use of this software
18 1.2 thorpej * must display the following acknowledgement:
19 1.2 thorpej * This product includes software developed for the NetBSD Project by
20 1.2 thorpej * Wasabi Systems, Inc.
21 1.2 thorpej * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 1.2 thorpej * or promote products derived from this software without specific prior
23 1.2 thorpej * written permission.
24 1.2 thorpej *
25 1.2 thorpej * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 1.2 thorpej * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 1.2 thorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 1.2 thorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 1.2 thorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 1.2 thorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 1.2 thorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 1.2 thorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 1.2 thorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 1.2 thorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 1.2 thorpej * POSSIBILITY OF SUCH DAMAGE.
36 1.2 thorpej */
37 1.2 thorpej
38 1.2 thorpej /*
39 1.2 thorpej * Cache definitions/operations for R4000-style caches.
40 1.2 thorpej */
41 1.2 thorpej
42 1.2 thorpej #define CACHE_R4K_I 0
43 1.2 thorpej #define CACHE_R4K_D 1
44 1.2 thorpej #define CACHE_R4K_SI 2
45 1.2 thorpej #define CACHE_R4K_SD 3
46 1.2 thorpej
47 1.2 thorpej #define CACHEOP_R4K_INDEX_INV (0 << 2) /* I, SI */
48 1.2 thorpej #define CACHEOP_R4K_INDEX_WB_INV (0 << 2) /* D, SD */
49 1.2 thorpej #define CACHEOP_R4K_INDEX_LOAD_TAG (1 << 2) /* all */
50 1.2 thorpej #define CACHEOP_R4K_INDEX_STORE_TAG (2 << 2) /* all */
51 1.2 thorpej #define CACHEOP_R4K_CREATE_DIRTY_EXCL (3 << 2) /* D, SD */
52 1.2 thorpej #define CACHEOP_R4K_HIT_INV (4 << 2) /* all */
53 1.2 thorpej #define CACHEOP_R4K_HIT_WB_INV (5 << 2) /* D, SD */
54 1.2 thorpej #define CACHEOP_R4K_FILL (5 << 2) /* I */
55 1.2 thorpej #define CACHEOP_R4K_HIT_WB (6 << 2) /* I, D, SD */
56 1.2 thorpej #define CACHEOP_R4K_HIT_SET_VIRTUAL (7 << 2) /* SI, SD */
57 1.2 thorpej
58 1.9 simonb #if !defined(_LOCORE)
59 1.2 thorpej
60 1.14 matt #if 1
61 1.2 thorpej /*
62 1.2 thorpej * cache_r4k_op_line:
63 1.2 thorpej *
64 1.2 thorpej * Perform the specified cache operation on a single line.
65 1.2 thorpej */
66 1.12 matt static inline void
67 1.12 matt cache_op_r4k_line(register_t va, u_int op)
68 1.12 matt {
69 1.12 matt __CTASSERT(__builtin_constant_p(op));
70 1.12 matt __asm volatile(
71 1.12 matt ".set push" "\n\t"
72 1.12 matt ".set noreorder" "\n\t"
73 1.12 matt "cache %[op], 0(%[va])" "\n\t"
74 1.12 matt ".set pop"
75 1.12 matt :
76 1.12 matt : [op] "n" (op), [va] "r" (va)
77 1.12 matt : "memory");
78 1.12 matt }
79 1.12 matt
80 1.12 matt /*
81 1.12 matt * cache_r4k_op_8lines_NN:
82 1.12 matt *
83 1.12 matt * Perform the specified cache operation on 8 n-byte cache lines.
84 1.12 matt */
85 1.12 matt static inline void
86 1.12 matt cache_r4k_op_8lines_NN(size_t n, register_t va, u_int op)
87 1.12 matt {
88 1.12 matt __asm volatile(
89 1.12 matt ".set push" "\n\t"
90 1.12 matt ".set noreorder" "\n\t"
91 1.12 matt "cache %[op], (0*%[n])(%[va])" "\n\t"
92 1.12 matt "cache %[op], (1*%[n])(%[va])" "\n\t"
93 1.12 matt "cache %[op], (2*%[n])(%[va])" "\n\t"
94 1.12 matt "cache %[op], (3*%[n])(%[va])" "\n\t"
95 1.12 matt "cache %[op], (4*%[n])(%[va])" "\n\t"
96 1.12 matt "cache %[op], (5*%[n])(%[va])" "\n\t"
97 1.12 matt "cache %[op], (6*%[n])(%[va])" "\n\t"
98 1.12 matt "cache %[op], (7*%[n])(%[va])" "\n\t"
99 1.12 matt ".set pop"
100 1.12 matt :
101 1.12 matt : [va] "r" (va), [op] "i" (op), [n] "n" (n)
102 1.12 matt : "memory");
103 1.12 matt }
104 1.8 simonb
105 1.8 simonb /*
106 1.8 simonb * cache_r4k_op_8lines_16:
107 1.8 simonb * Perform the specified cache operation on 8 16-byte cache lines.
108 1.8 simonb * cache_r4k_op_8lines_32:
109 1.8 simonb * Perform the specified cache operation on 8 32-byte cache lines.
110 1.8 simonb */
111 1.12 matt #define cache_r4k_op_8lines_16(va, op) \
112 1.12 matt cache_r4k_op_8lines_NN(16, (va), (op))
113 1.12 matt #define cache_r4k_op_8lines_32(va, op) \
114 1.12 matt cache_r4k_op_8lines_NN(32, (va), (op))
115 1.12 matt #define cache_r4k_op_8lines_64(va, op) \
116 1.12 matt cache_r4k_op_8lines_NN(64, (va), (op))
117 1.12 matt #define cache_r4k_op_8lines_128(va, op) \
118 1.12 matt cache_r4k_op_8lines_NN(128, (va), (op))
119 1.12 matt
120 1.12 matt /*
121 1.12 matt * cache_r4k_op_32lines_NN:
122 1.12 matt *
123 1.12 matt * Perform the specified cache operation on 32 n-byte cache lines.
124 1.12 matt */
125 1.12 matt static inline void
126 1.12 matt cache_r4k_op_32lines_NN(size_t n, register_t va, u_int op)
127 1.12 matt {
128 1.12 matt __CTASSERT(__builtin_constant_p(n));
129 1.12 matt __CTASSERT(__builtin_constant_p(op));
130 1.12 matt __asm volatile(
131 1.12 matt ".set push" "\n\t"
132 1.12 matt ".set noreorder" "\n\t"
133 1.12 matt "cache %[op], (0*%[n])(%[va])" "\n\t"
134 1.12 matt "cache %[op], (1*%[n])(%[va])" "\n\t"
135 1.12 matt "cache %[op], (2*%[n])(%[va])" "\n\t"
136 1.12 matt "cache %[op], (3*%[n])(%[va])" "\n\t"
137 1.12 matt "cache %[op], (4*%[n])(%[va])" "\n\t"
138 1.12 matt "cache %[op], (5*%[n])(%[va])" "\n\t"
139 1.12 matt "cache %[op], (6*%[n])(%[va])" "\n\t"
140 1.12 matt "cache %[op], (7*%[n])(%[va])" "\n\t"
141 1.12 matt "cache %[op], (8*%[n])(%[va])" "\n\t"
142 1.12 matt "cache %[op], (9*%[n])(%[va])" "\n\t"
143 1.12 matt "cache %[op], (10*%[n])(%[va])" "\n\t"
144 1.12 matt "cache %[op], (11*%[n])(%[va])" "\n\t"
145 1.12 matt "cache %[op], (12*%[n])(%[va])" "\n\t"
146 1.12 matt "cache %[op], (13*%[n])(%[va])" "\n\t"
147 1.12 matt "cache %[op], (14*%[n])(%[va])" "\n\t"
148 1.12 matt "cache %[op], (15*%[n])(%[va])" "\n\t"
149 1.12 matt "cache %[op], (16*%[n])(%[va])" "\n\t"
150 1.12 matt "cache %[op], (17*%[n])(%[va])" "\n\t"
151 1.12 matt "cache %[op], (18*%[n])(%[va])" "\n\t"
152 1.12 matt "cache %[op], (19*%[n])(%[va])" "\n\t"
153 1.12 matt "cache %[op], (20*%[n])(%[va])" "\n\t"
154 1.12 matt "cache %[op], (21*%[n])(%[va])" "\n\t"
155 1.12 matt "cache %[op], (22*%[n])(%[va])" "\n\t"
156 1.12 matt "cache %[op], (23*%[n])(%[va])" "\n\t"
157 1.12 matt "cache %[op], (24*%[n])(%[va])" "\n\t"
158 1.12 matt "cache %[op], (25*%[n])(%[va])" "\n\t"
159 1.12 matt "cache %[op], (26*%[n])(%[va])" "\n\t"
160 1.12 matt "cache %[op], (27*%[n])(%[va])" "\n\t"
161 1.12 matt "cache %[op], (28*%[n])(%[va])" "\n\t"
162 1.12 matt "cache %[op], (29*%[n])(%[va])" "\n\t"
163 1.12 matt "cache %[op], (30*%[n])(%[va])" "\n\t"
164 1.12 matt "cache %[op], (31*%[n])(%[va])" "\n\t"
165 1.12 matt ".set pop"
166 1.12 matt :
167 1.12 matt : [n] "n" ((uint8_t)n), [va] "r" (va), [op] "i" ((uint8_t)op)
168 1.12 matt : "memory");
169 1.12 matt }
170 1.2 thorpej
171 1.2 thorpej /*
172 1.2 thorpej * cache_r4k_op_32lines_16:
173 1.2 thorpej *
174 1.12 matt * Perform the specified cache operation on 32 16-byte cache lines.
175 1.2 thorpej */
176 1.12 matt #define cache_r4k_op_32lines_16(va, op) \
177 1.12 matt cache_r4k_op_32lines_NN(16, (va), (op))
178 1.12 matt #define cache_r4k_op_32lines_32(va, op) \
179 1.12 matt cache_r4k_op_32lines_NN(32, (va), (op))
180 1.12 matt #define cache_r4k_op_32lines_64(va, op) \
181 1.12 matt cache_r4k_op_32lines_NN(64, (va), (op))
182 1.12 matt #define cache_r4k_op_32lines_128(va, op) \
183 1.12 matt cache_r4k_op_32lines_NN(128, (va), (op))
184 1.2 thorpej
185 1.2 thorpej /*
186 1.12 matt * cache_r4k_op_16lines_16_2way:
187 1.12 matt * Perform the specified cache operation on 16 n-byte cache lines, 2-ways.
188 1.2 thorpej */
189 1.12 matt static inline void
190 1.12 matt cache_r4k_op_16lines_NN_2way(size_t n, register_t va1, register_t va2, u_int op)
191 1.12 matt {
192 1.12 matt __asm volatile(
193 1.12 matt ".set push" "\n\t"
194 1.12 matt ".set noreorder" "\n\t"
195 1.12 matt "cache %[op], (0*%[n])(%[va1])" "\n\t"
196 1.12 matt "cache %[op], (0*%[n])(%[va2])" "\n\t"
197 1.12 matt "cache %[op], (1*%[n])(%[va1])" "\n\t"
198 1.12 matt "cache %[op], (1*%[n])(%[va2])" "\n\t"
199 1.12 matt "cache %[op], (2*%[n])(%[va1])" "\n\t"
200 1.12 matt "cache %[op], (2*%[n])(%[va2])" "\n\t"
201 1.12 matt "cache %[op], (3*%[n])(%[va1])" "\n\t"
202 1.12 matt "cache %[op], (3*%[n])(%[va2])" "\n\t"
203 1.12 matt "cache %[op], (4*%[n])(%[va1])" "\n\t"
204 1.12 matt "cache %[op], (4*%[n])(%[va2])" "\n\t"
205 1.12 matt "cache %[op], (5*%[n])(%[va1])" "\n\t"
206 1.12 matt "cache %[op], (5*%[n])(%[va2])" "\n\t"
207 1.12 matt "cache %[op], (6*%[n])(%[va1])" "\n\t"
208 1.12 matt "cache %[op], (6*%[n])(%[va2])" "\n\t"
209 1.12 matt "cache %[op], (7*%[n])(%[va1])" "\n\t"
210 1.12 matt "cache %[op], (7*%[n])(%[va2])" "\n\t"
211 1.12 matt "cache %[op], (8*%[n])(%[va1])" "\n\t"
212 1.12 matt "cache %[op], (8*%[n])(%[va2])" "\n\t"
213 1.12 matt "cache %[op], (9*%[n])(%[va1])" "\n\t"
214 1.12 matt "cache %[op], (9*%[n])(%[va2])" "\n\t"
215 1.12 matt "cache %[op], (10*%[n])(%[va1])" "\n\t"
216 1.12 matt "cache %[op], (10*%[n])(%[va2])" "\n\t"
217 1.12 matt "cache %[op], (11*%[n])(%[va1])" "\n\t"
218 1.12 matt "cache %[op], (11*%[n])(%[va2])" "\n\t"
219 1.12 matt "cache %[op], (12*%[n])(%[va1])" "\n\t"
220 1.12 matt "cache %[op], (12*%[n])(%[va2])" "\n\t"
221 1.12 matt "cache %[op], (13*%[n])(%[va1])" "\n\t"
222 1.12 matt "cache %[op], (13*%[n])(%[va2])" "\n\t"
223 1.12 matt "cache %[op], (14*%[n])(%[va1])" "\n\t"
224 1.12 matt "cache %[op], (14*%[n])(%[va2])" "\n\t"
225 1.12 matt "cache %[op], (15*%[n])(%[va1])" "\n\t"
226 1.12 matt "cache %[op], (15*%[n])(%[va2])" "\n\t"
227 1.12 matt ".set pop"
228 1.12 matt :
229 1.12 matt : [va1] "r" (va1), [va2] "r" (va2), [op] "i" (op), [n] "n" (n)
230 1.12 matt : "memory");
231 1.12 matt }
232 1.3 thorpej
233 1.3 thorpej /*
234 1.5 takemura * cache_r4k_op_16lines_16_2way:
235 1.12 matt * Perform the specified cache operation on 16 16-byte cache lines, 2-ways.
236 1.12 matt * cache_r4k_op_16lines_32_2way:
237 1.12 matt * Perform the specified cache operation on 16 32-byte cache lines, 2-ways.
238 1.5 takemura */
239 1.5 takemura #define cache_r4k_op_16lines_16_2way(va1, va2, op) \
240 1.12 matt cache_r4k_op_16lines_NN_2way(16, (va1), (va2), (op))
241 1.2 thorpej #define cache_r4k_op_16lines_32_2way(va1, va2, op) \
242 1.12 matt cache_r4k_op_16lines_NN_2way(32, (va1), (va2), (op))
243 1.12 matt #define cache_r4k_op_16lines_64_2way(va1, va2, op) \
244 1.12 matt cache_r4k_op_16lines_NN_2way(64, (va1), (va2), (op))
245 1.12 matt
246 1.12 matt /*
247 1.12 matt * cache_r4k_op_8lines_NN_4way:
248 1.12 matt * Perform the specified cache operation on 8 n-byte cache lines, 4-ways.
249 1.12 matt */
250 1.12 matt static inline void
251 1.12 matt cache_r4k_op_8lines_NN_4way(size_t n, register_t va1, register_t va2,
252 1.12 matt register_t va3, register_t va4, u_int op)
253 1.12 matt {
254 1.12 matt __asm volatile(
255 1.12 matt ".set push" "\n\t"
256 1.12 matt ".set noreorder" "\n\t"
257 1.12 matt "cache %[op], (0*%[n])(%[va1])" "\n\t"
258 1.12 matt "cache %[op], (0*%[n])(%[va2])" "\n\t"
259 1.12 matt "cache %[op], (0*%[n])(%[va3])" "\n\t"
260 1.12 matt "cache %[op], (0*%[n])(%[va4])" "\n\t"
261 1.12 matt "cache %[op], (1*%[n])(%[va1])" "\n\t"
262 1.12 matt "cache %[op], (1*%[n])(%[va2])" "\n\t"
263 1.12 matt "cache %[op], (1*%[n])(%[va3])" "\n\t"
264 1.12 matt "cache %[op], (1*%[n])(%[va4])" "\n\t"
265 1.12 matt "cache %[op], (2*%[n])(%[va1])" "\n\t"
266 1.12 matt "cache %[op], (2*%[n])(%[va2])" "\n\t"
267 1.12 matt "cache %[op], (2*%[n])(%[va3])" "\n\t"
268 1.12 matt "cache %[op], (2*%[n])(%[va4])" "\n\t"
269 1.12 matt "cache %[op], (3*%[n])(%[va1])" "\n\t"
270 1.12 matt "cache %[op], (3*%[n])(%[va2])" "\n\t"
271 1.12 matt "cache %[op], (3*%[n])(%[va3])" "\n\t"
272 1.12 matt "cache %[op], (3*%[n])(%[va4])" "\n\t"
273 1.12 matt "cache %[op], (4*%[n])(%[va1])" "\n\t"
274 1.12 matt "cache %[op], (4*%[n])(%[va2])" "\n\t"
275 1.12 matt "cache %[op], (4*%[n])(%[va3])" "\n\t"
276 1.12 matt "cache %[op], (4*%[n])(%[va4])" "\n\t"
277 1.12 matt "cache %[op], (5*%[n])(%[va1])" "\n\t"
278 1.12 matt "cache %[op], (5*%[n])(%[va2])" "\n\t"
279 1.12 matt "cache %[op], (5*%[n])(%[va3])" "\n\t"
280 1.12 matt "cache %[op], (5*%[n])(%[va4])" "\n\t"
281 1.12 matt "cache %[op], (6*%[n])(%[va1])" "\n\t"
282 1.12 matt "cache %[op], (6*%[n])(%[va2])" "\n\t"
283 1.12 matt "cache %[op], (6*%[n])(%[va3])" "\n\t"
284 1.12 matt "cache %[op], (6*%[n])(%[va4])" "\n\t"
285 1.12 matt "cache %[op], (7*%[n])(%[va1])" "\n\t"
286 1.12 matt "cache %[op], (7*%[n])(%[va2])" "\n\t"
287 1.12 matt "cache %[op], (7*%[n])(%[va3])" "\n\t"
288 1.12 matt "cache %[op], (7*%[n])(%[va4])" "\n\t"
289 1.12 matt ".set pop"
290 1.12 matt :
291 1.12 matt : [va1] "r" (va1), [va2] "r" (va2),
292 1.12 matt [va3] "r" (va3), [va4] "r" (va4),
293 1.12 matt [op] "i" (op), [n] "n" (n)
294 1.12 matt : "memory");
295 1.12 matt }
296 1.7 simonb /*
297 1.7 simonb * cache_r4k_op_8lines_16_4way:
298 1.12 matt * Perform the specified cache operation on 8 16-byte cache lines, 4-ways.
299 1.7 simonb * cache_r4k_op_8lines_32_4way:
300 1.12 matt * Perform the specified cache operation on 8 32-byte cache lines, 4-ways.
301 1.7 simonb */
302 1.12 matt #define cache_r4k_op_8lines_16_4way(va1, va2, va3, va4, op) \
303 1.12 matt cache_r4k_op_8lines_NN_4way(16, (va1), (va2), (va3), (va4), (op))
304 1.12 matt #define cache_r4k_op_8lines_32_4way(va1, va2, va3, va4, op) \
305 1.12 matt cache_r4k_op_8lines_NN_4way(32, (va1), (va2), (va3), (va4), (op))
306 1.12 matt #define cache_r4k_op_8lines_64_4way(va1, va2, va3, va4, op) \
307 1.12 matt cache_r4k_op_8lines_NN_4way(64, (va1), (va2), (va3), (va4), (op))
308 1.12 matt #define cache_r4k_op_8lines_128_4way(va1, va2, va3, va4, op) \
309 1.12 matt cache_r4k_op_8lines_NN_4way(128, (va1), (va2), (va3), (va4), (op))
310 1.14 matt #endif
311 1.7 simonb
312 1.14 matt /* cache_r4k.c */
313 1.2 thorpej
314 1.14 matt void r4k_icache_sync_all_generic(void);
315 1.14 matt void r4k_icache_sync_range_generic(register_t, vsize_t);
316 1.14 matt void r4k_icache_sync_range_index_generic(vaddr_t, vsize_t);
317 1.14 matt void r4k_pdcache_wbinv_all_generic(void);
318 1.2 thorpej void r4k_sdcache_wbinv_all_generic(void);
319 1.12 matt
320 1.12 matt /* cache_r4k_pcache16.S */
321 1.12 matt
322 1.12 matt void cache_r4k_icache_index_inv_16(vaddr_t, vsize_t);
323 1.12 matt void cache_r4k_icache_hit_inv_16(register_t, vsize_t);
324 1.12 matt void cache_r4k_pdcache_index_wb_inv_16(vaddr_t, vsize_t);
325 1.12 matt void cache_r4k_pdcache_hit_inv_16(register_t, vsize_t);
326 1.12 matt void cache_r4k_pdcache_hit_wb_inv_16(register_t, vsize_t);
327 1.12 matt void cache_r4k_pdcache_hit_wb_16(register_t, vsize_t);
328 1.12 matt
329 1.14 matt /* cache_r4k_scache16.S */
330 1.14 matt
331 1.14 matt void cache_r4k_sdcache_index_wb_inv_16(vaddr_t, vsize_t);
332 1.14 matt void cache_r4k_sdcache_hit_inv_16(register_t, vsize_t);
333 1.14 matt void cache_r4k_sdcache_hit_wb_inv_16(register_t, vsize_t);
334 1.14 matt void cache_r4k_sdcache_hit_wb_16(register_t, vsize_t);
335 1.14 matt
336 1.12 matt /* cache_r4k_pcache32.S */
337 1.12 matt
338 1.12 matt void cache_r4k_icache_index_inv_32(vaddr_t, vsize_t);
339 1.12 matt void cache_r4k_icache_hit_inv_32(register_t, vsize_t);
340 1.12 matt void cache_r4k_pdcache_index_wb_inv_32(vaddr_t, vsize_t);
341 1.12 matt void cache_r4k_pdcache_hit_inv_32(register_t, vsize_t);
342 1.12 matt void cache_r4k_pdcache_hit_wb_inv_32(register_t, vsize_t);
343 1.12 matt void cache_r4k_pdcache_hit_wb_32(register_t, vsize_t);
344 1.12 matt
345 1.14 matt /* cache_r4k_scache32.S */
346 1.14 matt
347 1.14 matt void cache_r4k_sdcache_index_wb_inv_32(vaddr_t, vsize_t);
348 1.14 matt void cache_r4k_sdcache_hit_inv_32(register_t, vsize_t);
349 1.14 matt void cache_r4k_sdcache_hit_wb_inv_32(register_t, vsize_t);
350 1.14 matt void cache_r4k_sdcache_hit_wb_32(register_t, vsize_t);
351 1.14 matt
352 1.12 matt /* cache_r4k_pcache64.S */
353 1.12 matt
354 1.12 matt void cache_r4k_icache_index_inv_64(vaddr_t, vsize_t);
355 1.12 matt void cache_r4k_icache_hit_inv_64(register_t, vsize_t);
356 1.12 matt void cache_r4k_pdcache_index_wb_inv_64(vaddr_t, vsize_t);
357 1.12 matt void cache_r4k_pdcache_hit_inv_64(register_t, vsize_t);
358 1.12 matt void cache_r4k_pdcache_hit_wb_inv_64(register_t, vsize_t);
359 1.12 matt void cache_r4k_pdcache_hit_wb_64(register_t, vsize_t);
360 1.12 matt
361 1.14 matt /* cache_r4k_scache64.S */
362 1.14 matt
363 1.14 matt void cache_r4k_sdcache_index_wb_inv_64(vaddr_t, vsize_t);
364 1.14 matt void cache_r4k_sdcache_hit_inv_64(register_t, vsize_t);
365 1.14 matt void cache_r4k_sdcache_hit_wb_inv_64(register_t, vsize_t);
366 1.14 matt void cache_r4k_sdcache_hit_wb_64(register_t, vsize_t);
367 1.14 matt
368 1.12 matt /* cache_r4k_pcache128.S */
369 1.12 matt
370 1.12 matt void cache_r4k_icache_index_inv_128(vaddr_t, vsize_t);
371 1.12 matt void cache_r4k_icache_hit_inv_128(register_t, vsize_t);
372 1.12 matt void cache_r4k_pdcache_index_wb_inv_128(vaddr_t, vsize_t);
373 1.12 matt void cache_r4k_pdcache_hit_inv_128(register_t, vsize_t);
374 1.12 matt void cache_r4k_pdcache_hit_wb_inv_128(register_t, vsize_t);
375 1.12 matt void cache_r4k_pdcache_hit_wb_128(register_t, vsize_t);
376 1.14 matt
377 1.14 matt /* cache_r4k_scache128.S */
378 1.14 matt
379 1.12 matt void cache_r4k_sdcache_index_wb_inv_128(vaddr_t, vsize_t);
380 1.12 matt void cache_r4k_sdcache_hit_inv_128(register_t, vsize_t);
381 1.12 matt void cache_r4k_sdcache_hit_wb_inv_128(register_t, vsize_t);
382 1.12 matt void cache_r4k_sdcache_hit_wb_128(register_t, vsize_t);
383 1.13 skrll
384 1.9 simonb #endif /* !_LOCORE */
385