cache_r4k.h revision 1.13 1 1.12 matt /* cache_r4k.h,v 1.11.96.3 2012/01/19 08:28:48 matt Exp */
2 1.2 thorpej
3 1.2 thorpej /*
4 1.2 thorpej * Copyright 2001 Wasabi Systems, Inc.
5 1.2 thorpej * All rights reserved.
6 1.2 thorpej *
7 1.2 thorpej * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 1.2 thorpej *
9 1.2 thorpej * Redistribution and use in source and binary forms, with or without
10 1.2 thorpej * modification, are permitted provided that the following conditions
11 1.2 thorpej * are met:
12 1.2 thorpej * 1. Redistributions of source code must retain the above copyright
13 1.2 thorpej * notice, this list of conditions and the following disclaimer.
14 1.2 thorpej * 2. Redistributions in binary form must reproduce the above copyright
15 1.2 thorpej * notice, this list of conditions and the following disclaimer in the
16 1.2 thorpej * documentation and/or other materials provided with the distribution.
17 1.2 thorpej * 3. All advertising materials mentioning features or use of this software
18 1.2 thorpej * must display the following acknowledgement:
19 1.2 thorpej * This product includes software developed for the NetBSD Project by
20 1.2 thorpej * Wasabi Systems, Inc.
21 1.2 thorpej * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 1.2 thorpej * or promote products derived from this software without specific prior
23 1.2 thorpej * written permission.
24 1.2 thorpej *
25 1.2 thorpej * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 1.2 thorpej * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 1.2 thorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 1.2 thorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 1.2 thorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 1.2 thorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 1.2 thorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 1.2 thorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 1.2 thorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 1.2 thorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 1.2 thorpej * POSSIBILITY OF SUCH DAMAGE.
36 1.2 thorpej */
37 1.2 thorpej
38 1.2 thorpej /*
39 1.2 thorpej * Cache definitions/operations for R4000-style caches.
40 1.2 thorpej */
41 1.2 thorpej
42 1.2 thorpej #define CACHE_R4K_I 0
43 1.2 thorpej #define CACHE_R4K_D 1
44 1.2 thorpej #define CACHE_R4K_SI 2
45 1.2 thorpej #define CACHE_R4K_SD 3
46 1.2 thorpej
47 1.2 thorpej #define CACHEOP_R4K_INDEX_INV (0 << 2) /* I, SI */
48 1.2 thorpej #define CACHEOP_R4K_INDEX_WB_INV (0 << 2) /* D, SD */
49 1.2 thorpej #define CACHEOP_R4K_INDEX_LOAD_TAG (1 << 2) /* all */
50 1.2 thorpej #define CACHEOP_R4K_INDEX_STORE_TAG (2 << 2) /* all */
51 1.2 thorpej #define CACHEOP_R4K_CREATE_DIRTY_EXCL (3 << 2) /* D, SD */
52 1.2 thorpej #define CACHEOP_R4K_HIT_INV (4 << 2) /* all */
53 1.2 thorpej #define CACHEOP_R4K_HIT_WB_INV (5 << 2) /* D, SD */
54 1.2 thorpej #define CACHEOP_R4K_FILL (5 << 2) /* I */
55 1.2 thorpej #define CACHEOP_R4K_HIT_WB (6 << 2) /* I, D, SD */
56 1.2 thorpej #define CACHEOP_R4K_HIT_SET_VIRTUAL (7 << 2) /* SI, SD */
57 1.2 thorpej
58 1.9 simonb #if !defined(_LOCORE)
59 1.2 thorpej
60 1.2 thorpej /*
61 1.2 thorpej * cache_r4k_op_line:
62 1.2 thorpej *
63 1.2 thorpej * Perform the specified cache operation on a single line.
64 1.2 thorpej */
65 1.12 matt static inline void
66 1.12 matt cache_op_r4k_line(register_t va, u_int op)
67 1.12 matt {
68 1.12 matt __CTASSERT(__builtin_constant_p(op));
69 1.12 matt __asm volatile(
70 1.12 matt ".set push" "\n\t"
71 1.12 matt ".set noreorder" "\n\t"
72 1.12 matt "cache %[op], 0(%[va])" "\n\t"
73 1.12 matt ".set pop"
74 1.12 matt :
75 1.12 matt : [op] "n" (op), [va] "r" (va)
76 1.12 matt : "memory");
77 1.12 matt }
78 1.12 matt
79 1.12 matt /*
80 1.12 matt * cache_r4k_op_8lines_NN:
81 1.12 matt *
82 1.12 matt * Perform the specified cache operation on 8 n-byte cache lines.
83 1.12 matt */
84 1.12 matt static inline void
85 1.12 matt cache_r4k_op_8lines_NN(size_t n, register_t va, u_int op)
86 1.12 matt {
87 1.12 matt __asm volatile(
88 1.12 matt ".set push" "\n\t"
89 1.12 matt ".set noreorder" "\n\t"
90 1.12 matt "cache %[op], (0*%[n])(%[va])" "\n\t"
91 1.12 matt "cache %[op], (1*%[n])(%[va])" "\n\t"
92 1.12 matt "cache %[op], (2*%[n])(%[va])" "\n\t"
93 1.12 matt "cache %[op], (3*%[n])(%[va])" "\n\t"
94 1.12 matt "cache %[op], (4*%[n])(%[va])" "\n\t"
95 1.12 matt "cache %[op], (5*%[n])(%[va])" "\n\t"
96 1.12 matt "cache %[op], (6*%[n])(%[va])" "\n\t"
97 1.12 matt "cache %[op], (7*%[n])(%[va])" "\n\t"
98 1.12 matt ".set pop"
99 1.12 matt :
100 1.12 matt : [va] "r" (va), [op] "i" (op), [n] "n" (n)
101 1.12 matt : "memory");
102 1.12 matt }
103 1.8 simonb
104 1.8 simonb /*
105 1.8 simonb * cache_r4k_op_8lines_16:
106 1.8 simonb * Perform the specified cache operation on 8 16-byte cache lines.
107 1.8 simonb * cache_r4k_op_8lines_32:
108 1.8 simonb * Perform the specified cache operation on 8 32-byte cache lines.
109 1.8 simonb */
110 1.12 matt #define cache_r4k_op_8lines_16(va, op) \
111 1.12 matt cache_r4k_op_8lines_NN(16, (va), (op))
112 1.12 matt #define cache_r4k_op_8lines_32(va, op) \
113 1.12 matt cache_r4k_op_8lines_NN(32, (va), (op))
114 1.12 matt #define cache_r4k_op_8lines_64(va, op) \
115 1.12 matt cache_r4k_op_8lines_NN(64, (va), (op))
116 1.12 matt #define cache_r4k_op_8lines_128(va, op) \
117 1.12 matt cache_r4k_op_8lines_NN(128, (va), (op))
118 1.12 matt
119 1.12 matt /*
120 1.12 matt * cache_r4k_op_32lines_NN:
121 1.12 matt *
122 1.12 matt * Perform the specified cache operation on 32 n-byte cache lines.
123 1.12 matt */
124 1.12 matt static inline void
125 1.12 matt cache_r4k_op_32lines_NN(size_t n, register_t va, u_int op)
126 1.12 matt {
127 1.12 matt __CTASSERT(__builtin_constant_p(n));
128 1.12 matt __CTASSERT(__builtin_constant_p(op));
129 1.12 matt __asm volatile(
130 1.12 matt ".set push" "\n\t"
131 1.12 matt ".set noreorder" "\n\t"
132 1.12 matt "cache %[op], (0*%[n])(%[va])" "\n\t"
133 1.12 matt "cache %[op], (1*%[n])(%[va])" "\n\t"
134 1.12 matt "cache %[op], (2*%[n])(%[va])" "\n\t"
135 1.12 matt "cache %[op], (3*%[n])(%[va])" "\n\t"
136 1.12 matt "cache %[op], (4*%[n])(%[va])" "\n\t"
137 1.12 matt "cache %[op], (5*%[n])(%[va])" "\n\t"
138 1.12 matt "cache %[op], (6*%[n])(%[va])" "\n\t"
139 1.12 matt "cache %[op], (7*%[n])(%[va])" "\n\t"
140 1.12 matt "cache %[op], (8*%[n])(%[va])" "\n\t"
141 1.12 matt "cache %[op], (9*%[n])(%[va])" "\n\t"
142 1.12 matt "cache %[op], (10*%[n])(%[va])" "\n\t"
143 1.12 matt "cache %[op], (11*%[n])(%[va])" "\n\t"
144 1.12 matt "cache %[op], (12*%[n])(%[va])" "\n\t"
145 1.12 matt "cache %[op], (13*%[n])(%[va])" "\n\t"
146 1.12 matt "cache %[op], (14*%[n])(%[va])" "\n\t"
147 1.12 matt "cache %[op], (15*%[n])(%[va])" "\n\t"
148 1.12 matt "cache %[op], (16*%[n])(%[va])" "\n\t"
149 1.12 matt "cache %[op], (17*%[n])(%[va])" "\n\t"
150 1.12 matt "cache %[op], (18*%[n])(%[va])" "\n\t"
151 1.12 matt "cache %[op], (19*%[n])(%[va])" "\n\t"
152 1.12 matt "cache %[op], (20*%[n])(%[va])" "\n\t"
153 1.12 matt "cache %[op], (21*%[n])(%[va])" "\n\t"
154 1.12 matt "cache %[op], (22*%[n])(%[va])" "\n\t"
155 1.12 matt "cache %[op], (23*%[n])(%[va])" "\n\t"
156 1.12 matt "cache %[op], (24*%[n])(%[va])" "\n\t"
157 1.12 matt "cache %[op], (25*%[n])(%[va])" "\n\t"
158 1.12 matt "cache %[op], (26*%[n])(%[va])" "\n\t"
159 1.12 matt "cache %[op], (27*%[n])(%[va])" "\n\t"
160 1.12 matt "cache %[op], (28*%[n])(%[va])" "\n\t"
161 1.12 matt "cache %[op], (29*%[n])(%[va])" "\n\t"
162 1.12 matt "cache %[op], (30*%[n])(%[va])" "\n\t"
163 1.12 matt "cache %[op], (31*%[n])(%[va])" "\n\t"
164 1.12 matt ".set pop"
165 1.12 matt :
166 1.12 matt : [n] "n" ((uint8_t)n), [va] "r" (va), [op] "i" ((uint8_t)op)
167 1.12 matt : "memory");
168 1.12 matt }
169 1.2 thorpej
170 1.2 thorpej /*
171 1.2 thorpej * cache_r4k_op_32lines_16:
172 1.2 thorpej *
173 1.12 matt * Perform the specified cache operation on 32 16-byte cache lines.
174 1.2 thorpej */
175 1.12 matt #define cache_r4k_op_32lines_16(va, op) \
176 1.12 matt cache_r4k_op_32lines_NN(16, (va), (op))
177 1.12 matt #define cache_r4k_op_32lines_32(va, op) \
178 1.12 matt cache_r4k_op_32lines_NN(32, (va), (op))
179 1.12 matt #define cache_r4k_op_32lines_64(va, op) \
180 1.12 matt cache_r4k_op_32lines_NN(64, (va), (op))
181 1.12 matt #define cache_r4k_op_32lines_128(va, op) \
182 1.12 matt cache_r4k_op_32lines_NN(128, (va), (op))
183 1.2 thorpej
184 1.2 thorpej /*
185 1.12 matt * cache_r4k_op_16lines_16_2way:
186 1.12 matt * Perform the specified cache operation on 16 n-byte cache lines, 2-ways.
187 1.2 thorpej */
188 1.12 matt static inline void
189 1.12 matt cache_r4k_op_16lines_NN_2way(size_t n, register_t va1, register_t va2, u_int op)
190 1.12 matt {
191 1.12 matt __asm volatile(
192 1.12 matt ".set push" "\n\t"
193 1.12 matt ".set noreorder" "\n\t"
194 1.12 matt "cache %[op], (0*%[n])(%[va1])" "\n\t"
195 1.12 matt "cache %[op], (0*%[n])(%[va2])" "\n\t"
196 1.12 matt "cache %[op], (1*%[n])(%[va1])" "\n\t"
197 1.12 matt "cache %[op], (1*%[n])(%[va2])" "\n\t"
198 1.12 matt "cache %[op], (2*%[n])(%[va1])" "\n\t"
199 1.12 matt "cache %[op], (2*%[n])(%[va2])" "\n\t"
200 1.12 matt "cache %[op], (3*%[n])(%[va1])" "\n\t"
201 1.12 matt "cache %[op], (3*%[n])(%[va2])" "\n\t"
202 1.12 matt "cache %[op], (4*%[n])(%[va1])" "\n\t"
203 1.12 matt "cache %[op], (4*%[n])(%[va2])" "\n\t"
204 1.12 matt "cache %[op], (5*%[n])(%[va1])" "\n\t"
205 1.12 matt "cache %[op], (5*%[n])(%[va2])" "\n\t"
206 1.12 matt "cache %[op], (6*%[n])(%[va1])" "\n\t"
207 1.12 matt "cache %[op], (6*%[n])(%[va2])" "\n\t"
208 1.12 matt "cache %[op], (7*%[n])(%[va1])" "\n\t"
209 1.12 matt "cache %[op], (7*%[n])(%[va2])" "\n\t"
210 1.12 matt "cache %[op], (8*%[n])(%[va1])" "\n\t"
211 1.12 matt "cache %[op], (8*%[n])(%[va2])" "\n\t"
212 1.12 matt "cache %[op], (9*%[n])(%[va1])" "\n\t"
213 1.12 matt "cache %[op], (9*%[n])(%[va2])" "\n\t"
214 1.12 matt "cache %[op], (10*%[n])(%[va1])" "\n\t"
215 1.12 matt "cache %[op], (10*%[n])(%[va2])" "\n\t"
216 1.12 matt "cache %[op], (11*%[n])(%[va1])" "\n\t"
217 1.12 matt "cache %[op], (11*%[n])(%[va2])" "\n\t"
218 1.12 matt "cache %[op], (12*%[n])(%[va1])" "\n\t"
219 1.12 matt "cache %[op], (12*%[n])(%[va2])" "\n\t"
220 1.12 matt "cache %[op], (13*%[n])(%[va1])" "\n\t"
221 1.12 matt "cache %[op], (13*%[n])(%[va2])" "\n\t"
222 1.12 matt "cache %[op], (14*%[n])(%[va1])" "\n\t"
223 1.12 matt "cache %[op], (14*%[n])(%[va2])" "\n\t"
224 1.12 matt "cache %[op], (15*%[n])(%[va1])" "\n\t"
225 1.12 matt "cache %[op], (15*%[n])(%[va2])" "\n\t"
226 1.12 matt ".set pop"
227 1.12 matt :
228 1.12 matt : [va1] "r" (va1), [va2] "r" (va2), [op] "i" (op), [n] "n" (n)
229 1.12 matt : "memory");
230 1.12 matt }
231 1.3 thorpej
232 1.3 thorpej /*
233 1.5 takemura * cache_r4k_op_16lines_16_2way:
234 1.12 matt * Perform the specified cache operation on 16 16-byte cache lines, 2-ways.
235 1.12 matt * cache_r4k_op_16lines_32_2way:
236 1.12 matt * Perform the specified cache operation on 16 32-byte cache lines, 2-ways.
237 1.5 takemura */
238 1.5 takemura #define cache_r4k_op_16lines_16_2way(va1, va2, op) \
239 1.12 matt cache_r4k_op_16lines_NN_2way(16, (va1), (va2), (op))
240 1.2 thorpej #define cache_r4k_op_16lines_32_2way(va1, va2, op) \
241 1.12 matt cache_r4k_op_16lines_NN_2way(32, (va1), (va2), (op))
242 1.12 matt #define cache_r4k_op_16lines_64_2way(va1, va2, op) \
243 1.12 matt cache_r4k_op_16lines_NN_2way(64, (va1), (va2), (op))
244 1.12 matt
245 1.12 matt /*
246 1.12 matt * cache_r4k_op_8lines_NN_4way:
247 1.12 matt * Perform the specified cache operation on 8 n-byte cache lines, 4-ways.
248 1.12 matt */
249 1.12 matt static inline void
250 1.12 matt cache_r4k_op_8lines_NN_4way(size_t n, register_t va1, register_t va2,
251 1.12 matt register_t va3, register_t va4, u_int op)
252 1.12 matt {
253 1.12 matt __asm volatile(
254 1.12 matt ".set push" "\n\t"
255 1.12 matt ".set noreorder" "\n\t"
256 1.12 matt "cache %[op], (0*%[n])(%[va1])" "\n\t"
257 1.12 matt "cache %[op], (0*%[n])(%[va2])" "\n\t"
258 1.12 matt "cache %[op], (0*%[n])(%[va3])" "\n\t"
259 1.12 matt "cache %[op], (0*%[n])(%[va4])" "\n\t"
260 1.12 matt "cache %[op], (1*%[n])(%[va1])" "\n\t"
261 1.12 matt "cache %[op], (1*%[n])(%[va2])" "\n\t"
262 1.12 matt "cache %[op], (1*%[n])(%[va3])" "\n\t"
263 1.12 matt "cache %[op], (1*%[n])(%[va4])" "\n\t"
264 1.12 matt "cache %[op], (2*%[n])(%[va1])" "\n\t"
265 1.12 matt "cache %[op], (2*%[n])(%[va2])" "\n\t"
266 1.12 matt "cache %[op], (2*%[n])(%[va3])" "\n\t"
267 1.12 matt "cache %[op], (2*%[n])(%[va4])" "\n\t"
268 1.12 matt "cache %[op], (3*%[n])(%[va1])" "\n\t"
269 1.12 matt "cache %[op], (3*%[n])(%[va2])" "\n\t"
270 1.12 matt "cache %[op], (3*%[n])(%[va3])" "\n\t"
271 1.12 matt "cache %[op], (3*%[n])(%[va4])" "\n\t"
272 1.12 matt "cache %[op], (4*%[n])(%[va1])" "\n\t"
273 1.12 matt "cache %[op], (4*%[n])(%[va2])" "\n\t"
274 1.12 matt "cache %[op], (4*%[n])(%[va3])" "\n\t"
275 1.12 matt "cache %[op], (4*%[n])(%[va4])" "\n\t"
276 1.12 matt "cache %[op], (5*%[n])(%[va1])" "\n\t"
277 1.12 matt "cache %[op], (5*%[n])(%[va2])" "\n\t"
278 1.12 matt "cache %[op], (5*%[n])(%[va3])" "\n\t"
279 1.12 matt "cache %[op], (5*%[n])(%[va4])" "\n\t"
280 1.12 matt "cache %[op], (6*%[n])(%[va1])" "\n\t"
281 1.12 matt "cache %[op], (6*%[n])(%[va2])" "\n\t"
282 1.12 matt "cache %[op], (6*%[n])(%[va3])" "\n\t"
283 1.12 matt "cache %[op], (6*%[n])(%[va4])" "\n\t"
284 1.12 matt "cache %[op], (7*%[n])(%[va1])" "\n\t"
285 1.12 matt "cache %[op], (7*%[n])(%[va2])" "\n\t"
286 1.12 matt "cache %[op], (7*%[n])(%[va3])" "\n\t"
287 1.12 matt "cache %[op], (7*%[n])(%[va4])" "\n\t"
288 1.12 matt ".set pop"
289 1.12 matt :
290 1.12 matt : [va1] "r" (va1), [va2] "r" (va2),
291 1.12 matt [va3] "r" (va3), [va4] "r" (va4),
292 1.12 matt [op] "i" (op), [n] "n" (n)
293 1.12 matt : "memory");
294 1.12 matt }
295 1.7 simonb /*
296 1.7 simonb * cache_r4k_op_8lines_16_4way:
297 1.12 matt * Perform the specified cache operation on 8 16-byte cache lines, 4-ways.
298 1.7 simonb * cache_r4k_op_8lines_32_4way:
299 1.12 matt * Perform the specified cache operation on 8 32-byte cache lines, 4-ways.
300 1.7 simonb */
301 1.12 matt #define cache_r4k_op_8lines_16_4way(va1, va2, va3, va4, op) \
302 1.12 matt cache_r4k_op_8lines_NN_4way(16, (va1), (va2), (va3), (va4), (op))
303 1.12 matt #define cache_r4k_op_8lines_32_4way(va1, va2, va3, va4, op) \
304 1.12 matt cache_r4k_op_8lines_NN_4way(32, (va1), (va2), (va3), (va4), (op))
305 1.12 matt #define cache_r4k_op_8lines_64_4way(va1, va2, va3, va4, op) \
306 1.12 matt cache_r4k_op_8lines_NN_4way(64, (va1), (va2), (va3), (va4), (op))
307 1.12 matt #define cache_r4k_op_8lines_128_4way(va1, va2, va3, va4, op) \
308 1.12 matt cache_r4k_op_8lines_NN_4way(128, (va1), (va2), (va3), (va4), (op))
309 1.7 simonb
310 1.2 thorpej void r4k_icache_sync_all_16(void);
311 1.12 matt void r4k_icache_sync_range_16(register_t, vsize_t);
312 1.2 thorpej void r4k_icache_sync_range_index_16(vaddr_t, vsize_t);
313 1.2 thorpej
314 1.4 tsutsui void r4k_icache_sync_all_32(void);
315 1.12 matt void r4k_icache_sync_range_32(register_t, vsize_t);
316 1.4 tsutsui void r4k_icache_sync_range_index_32(vaddr_t, vsize_t);
317 1.4 tsutsui
318 1.2 thorpej void r4k_pdcache_wbinv_all_16(void);
319 1.12 matt void r4k_pdcache_wbinv_range_16(register_t, vsize_t);
320 1.2 thorpej void r4k_pdcache_wbinv_range_index_16(vaddr_t, vsize_t);
321 1.2 thorpej
322 1.12 matt void r4k_pdcache_inv_range_16(register_t, vsize_t);
323 1.12 matt void r4k_pdcache_wb_range_16(register_t, vsize_t);
324 1.4 tsutsui
325 1.4 tsutsui void r4k_pdcache_wbinv_all_32(void);
326 1.12 matt void r4k_pdcache_wbinv_range_32(register_t, vsize_t);
327 1.4 tsutsui void r4k_pdcache_wbinv_range_index_32(vaddr_t, vsize_t);
328 1.4 tsutsui
329 1.12 matt void r4k_pdcache_inv_range_32(register_t, vsize_t);
330 1.12 matt void r4k_pdcache_wb_range_32(register_t, vsize_t);
331 1.2 thorpej
332 1.2 thorpej void r4k_sdcache_wbinv_all_32(void);
333 1.12 matt void r4k_sdcache_wbinv_range_32(register_t, vsize_t);
334 1.2 thorpej void r4k_sdcache_wbinv_range_index_32(vaddr_t, vsize_t);
335 1.2 thorpej
336 1.12 matt void r4k_sdcache_inv_range_32(register_t, vsize_t);
337 1.12 matt void r4k_sdcache_wb_range_32(register_t, vsize_t);
338 1.3 thorpej
339 1.3 thorpej void r4k_sdcache_wbinv_all_128(void);
340 1.12 matt void r4k_sdcache_wbinv_range_128(register_t, vsize_t);
341 1.3 thorpej void r4k_sdcache_wbinv_range_index_128(vaddr_t, vsize_t);
342 1.3 thorpej
343 1.12 matt void r4k_sdcache_inv_range_128(register_t, vsize_t);
344 1.12 matt void r4k_sdcache_wb_range_128(register_t, vsize_t);
345 1.2 thorpej
346 1.2 thorpej void r4k_sdcache_wbinv_all_generic(void);
347 1.12 matt void r4k_sdcache_wbinv_range_generic(register_t, vsize_t);
348 1.2 thorpej void r4k_sdcache_wbinv_range_index_generic(vaddr_t, vsize_t);
349 1.2 thorpej
350 1.12 matt void r4k_sdcache_inv_range_generic(register_t, vsize_t);
351 1.12 matt void r4k_sdcache_wb_range_generic(register_t, vsize_t);
352 1.12 matt
353 1.12 matt /* cache_r4k_pcache16.S */
354 1.12 matt
355 1.12 matt void cache_r4k_icache_index_inv_16(vaddr_t, vsize_t);
356 1.12 matt void cache_r4k_icache_hit_inv_16(register_t, vsize_t);
357 1.12 matt void cache_r4k_pdcache_index_wb_inv_16(vaddr_t, vsize_t);
358 1.12 matt void cache_r4k_pdcache_hit_inv_16(register_t, vsize_t);
359 1.12 matt void cache_r4k_pdcache_hit_wb_inv_16(register_t, vsize_t);
360 1.12 matt void cache_r4k_pdcache_hit_wb_16(register_t, vsize_t);
361 1.12 matt
362 1.12 matt /* cache_r4k_pcache32.S */
363 1.12 matt
364 1.12 matt void cache_r4k_icache_index_inv_32(vaddr_t, vsize_t);
365 1.12 matt void cache_r4k_icache_hit_inv_32(register_t, vsize_t);
366 1.12 matt void cache_r4k_pdcache_index_wb_inv_32(vaddr_t, vsize_t);
367 1.12 matt void cache_r4k_pdcache_hit_inv_32(register_t, vsize_t);
368 1.12 matt void cache_r4k_pdcache_hit_wb_inv_32(register_t, vsize_t);
369 1.12 matt void cache_r4k_pdcache_hit_wb_32(register_t, vsize_t);
370 1.12 matt
371 1.12 matt /* cache_r4k_pcache64.S */
372 1.12 matt
373 1.12 matt void cache_r4k_icache_index_inv_64(vaddr_t, vsize_t);
374 1.12 matt void cache_r4k_icache_hit_inv_64(register_t, vsize_t);
375 1.12 matt void cache_r4k_pdcache_index_wb_inv_64(vaddr_t, vsize_t);
376 1.12 matt void cache_r4k_pdcache_hit_inv_64(register_t, vsize_t);
377 1.12 matt void cache_r4k_pdcache_hit_wb_inv_64(register_t, vsize_t);
378 1.12 matt void cache_r4k_pdcache_hit_wb_64(register_t, vsize_t);
379 1.12 matt
380 1.12 matt /* cache_r4k_pcache128.S */
381 1.12 matt
382 1.12 matt void cache_r4k_icache_index_inv_128(vaddr_t, vsize_t);
383 1.12 matt void cache_r4k_icache_hit_inv_128(register_t, vsize_t);
384 1.12 matt void cache_r4k_pdcache_index_wb_inv_128(vaddr_t, vsize_t);
385 1.12 matt void cache_r4k_pdcache_hit_inv_128(register_t, vsize_t);
386 1.12 matt void cache_r4k_pdcache_hit_wb_inv_128(register_t, vsize_t);
387 1.12 matt void cache_r4k_pdcache_hit_wb_128(register_t, vsize_t);
388 1.12 matt void cache_r4k_sdcache_index_wb_inv_128(vaddr_t, vsize_t);
389 1.12 matt void cache_r4k_sdcache_hit_inv_128(register_t, vsize_t);
390 1.12 matt void cache_r4k_sdcache_hit_wb_inv_128(register_t, vsize_t);
391 1.12 matt void cache_r4k_sdcache_hit_wb_128(register_t, vsize_t);
392 1.13 skrll
393 1.9 simonb #endif /* !_LOCORE */
394