cache_r4k.c revision 1.7 1 1.7 cgd /* $NetBSD: cache_r4k.c,v 1.7 2002/11/07 23:03:21 cgd Exp $ */
2 1.2 thorpej
3 1.2 thorpej /*
4 1.2 thorpej * Copyright 2001 Wasabi Systems, Inc.
5 1.2 thorpej * All rights reserved.
6 1.2 thorpej *
7 1.2 thorpej * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 1.2 thorpej *
9 1.2 thorpej * Redistribution and use in source and binary forms, with or without
10 1.2 thorpej * modification, are permitted provided that the following conditions
11 1.2 thorpej * are met:
12 1.2 thorpej * 1. Redistributions of source code must retain the above copyright
13 1.2 thorpej * notice, this list of conditions and the following disclaimer.
14 1.2 thorpej * 2. Redistributions in binary form must reproduce the above copyright
15 1.2 thorpej * notice, this list of conditions and the following disclaimer in the
16 1.2 thorpej * documentation and/or other materials provided with the distribution.
17 1.2 thorpej * 3. All advertising materials mentioning features or use of this software
18 1.2 thorpej * must display the following acknowledgement:
19 1.2 thorpej * This product includes software developed for the NetBSD Project by
20 1.2 thorpej * Wasabi Systems, Inc.
21 1.2 thorpej * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 1.2 thorpej * or promote products derived from this software without specific prior
23 1.2 thorpej * written permission.
24 1.2 thorpej *
25 1.2 thorpej * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 1.2 thorpej * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 1.2 thorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 1.2 thorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 1.2 thorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 1.2 thorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 1.2 thorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 1.2 thorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 1.2 thorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 1.2 thorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 1.2 thorpej * POSSIBILITY OF SUCH DAMAGE.
36 1.2 thorpej */
37 1.2 thorpej
38 1.2 thorpej #include <sys/param.h>
39 1.2 thorpej
40 1.2 thorpej #include <mips/cache.h>
41 1.2 thorpej #include <mips/cache_r4k.h>
42 1.2 thorpej
43 1.2 thorpej /*
44 1.2 thorpej * Cache operations for R4000/R4400-style caches:
45 1.2 thorpej *
46 1.2 thorpej * - Direct-mapped
47 1.2 thorpej * - Write-back
48 1.2 thorpej * - Virtually indexed, physically tagged
49 1.2 thorpej *
50 1.2 thorpej * XXX Does not handle split secondary caches.
51 1.2 thorpej */
52 1.2 thorpej
53 1.2 thorpej #define round_line(x) (((x) + 15) & ~15)
54 1.2 thorpej #define trunc_line(x) ((x) & ~15)
55 1.2 thorpej
56 1.2 thorpej __asm(".set mips3");
57 1.2 thorpej
58 1.2 thorpej void
59 1.2 thorpej r4k_icache_sync_all_16(void)
60 1.2 thorpej {
61 1.2 thorpej vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
62 1.2 thorpej vaddr_t eva = va + mips_picache_size;
63 1.2 thorpej
64 1.2 thorpej mips_dcache_wbinv_all();
65 1.2 thorpej
66 1.2 thorpej __asm __volatile("sync");
67 1.2 thorpej
68 1.2 thorpej while (va < eva) {
69 1.2 thorpej cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
70 1.2 thorpej va += (32 * 16);
71 1.2 thorpej }
72 1.2 thorpej }
73 1.2 thorpej
74 1.2 thorpej void
75 1.2 thorpej r4k_icache_sync_range_16(vaddr_t va, vsize_t size)
76 1.2 thorpej {
77 1.2 thorpej vaddr_t eva = round_line(va + size);
78 1.2 thorpej
79 1.2 thorpej va = trunc_line(va);
80 1.2 thorpej
81 1.2 thorpej mips_dcache_wb_range(va, (eva - va));
82 1.2 thorpej
83 1.2 thorpej __asm __volatile("sync");
84 1.2 thorpej
85 1.2 thorpej while ((eva - va) >= (32 * 16)) {
86 1.2 thorpej cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
87 1.2 thorpej va += (32 * 16);
88 1.2 thorpej }
89 1.2 thorpej
90 1.2 thorpej while (va < eva) {
91 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
92 1.2 thorpej va += 16;
93 1.2 thorpej }
94 1.2 thorpej }
95 1.2 thorpej
96 1.2 thorpej void
97 1.2 thorpej r4k_icache_sync_range_index_16(vaddr_t va, vsize_t size)
98 1.2 thorpej {
99 1.7 cgd vaddr_t eva, orig_va;
100 1.7 cgd
101 1.7 cgd orig_va = va;
102 1.2 thorpej
103 1.2 thorpej eva = round_line(va + size);
104 1.2 thorpej va = trunc_line(va);
105 1.2 thorpej
106 1.2 thorpej mips_dcache_wbinv_range_index(va, (eva - va));
107 1.2 thorpej
108 1.2 thorpej __asm __volatile("sync");
109 1.2 thorpej
110 1.2 thorpej /*
111 1.2 thorpej * Since we're doing Index ops, we expect to not be able
112 1.2 thorpej * to access the address we've been given. So, get the
113 1.2 thorpej * bits that determine the cache index, and make a KSEG0
114 1.2 thorpej * address out of them.
115 1.2 thorpej */
116 1.7 cgd va = MIPS_PHYS_TO_KSEG0(orig_va & mips_picache_way_mask);
117 1.2 thorpej
118 1.2 thorpej eva = round_line(va + size);
119 1.2 thorpej va = trunc_line(va);
120 1.2 thorpej
121 1.2 thorpej while ((eva - va) >= (32 * 16)) {
122 1.2 thorpej cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
123 1.2 thorpej va += (32 * 16);
124 1.2 thorpej }
125 1.2 thorpej
126 1.2 thorpej while (va < eva) {
127 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
128 1.2 thorpej va += 16;
129 1.2 thorpej }
130 1.2 thorpej }
131 1.2 thorpej
132 1.2 thorpej void
133 1.2 thorpej r4k_pdcache_wbinv_all_16(void)
134 1.2 thorpej {
135 1.2 thorpej vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
136 1.2 thorpej vaddr_t eva = va + mips_pdcache_size;
137 1.2 thorpej
138 1.2 thorpej while (va < eva) {
139 1.2 thorpej cache_r4k_op_32lines_16(va,
140 1.2 thorpej CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
141 1.2 thorpej va += (32 * 16);
142 1.2 thorpej }
143 1.2 thorpej }
144 1.2 thorpej
145 1.2 thorpej void
146 1.2 thorpej r4k_pdcache_wbinv_range_16(vaddr_t va, vsize_t size)
147 1.2 thorpej {
148 1.2 thorpej vaddr_t eva = round_line(va + size);
149 1.2 thorpej
150 1.2 thorpej va = trunc_line(va);
151 1.2 thorpej
152 1.2 thorpej while ((eva - va) >= (32 * 16)) {
153 1.2 thorpej cache_r4k_op_32lines_16(va,
154 1.2 thorpej CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
155 1.2 thorpej va += (32 * 16);
156 1.2 thorpej }
157 1.2 thorpej
158 1.2 thorpej while (va < eva) {
159 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
160 1.2 thorpej va += 16;
161 1.2 thorpej }
162 1.2 thorpej }
163 1.2 thorpej
164 1.2 thorpej void
165 1.2 thorpej r4k_pdcache_wbinv_range_index_16(vaddr_t va, vsize_t size)
166 1.2 thorpej {
167 1.2 thorpej vaddr_t eva;
168 1.2 thorpej
169 1.2 thorpej /*
170 1.2 thorpej * Since we're doing Index ops, we expect to not be able
171 1.2 thorpej * to access the address we've been given. So, get the
172 1.2 thorpej * bits that determine the cache index, and make a KSEG0
173 1.2 thorpej * address out of them.
174 1.2 thorpej */
175 1.2 thorpej va = MIPS_PHYS_TO_KSEG0(va & (mips_pdcache_size - 1));
176 1.2 thorpej
177 1.2 thorpej eva = round_line(va + size);
178 1.2 thorpej va = trunc_line(va);
179 1.2 thorpej
180 1.2 thorpej while ((eva - va) >= (32 * 16)) {
181 1.2 thorpej cache_r4k_op_32lines_16(va,
182 1.2 thorpej CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
183 1.2 thorpej va += (32 * 16);
184 1.2 thorpej }
185 1.2 thorpej
186 1.2 thorpej while (va < eva) {
187 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
188 1.2 thorpej va += 16;
189 1.2 thorpej }
190 1.2 thorpej }
191 1.2 thorpej
192 1.2 thorpej void
193 1.2 thorpej r4k_pdcache_inv_range_16(vaddr_t va, vsize_t size)
194 1.2 thorpej {
195 1.2 thorpej vaddr_t eva = round_line(va + size);
196 1.2 thorpej
197 1.2 thorpej va = trunc_line(va);
198 1.2 thorpej
199 1.2 thorpej while ((eva - va) >= (32 * 16)) {
200 1.2 thorpej cache_r4k_op_32lines_16(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
201 1.2 thorpej va += (32 * 16);
202 1.2 thorpej }
203 1.2 thorpej
204 1.2 thorpej while (va < eva) {
205 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
206 1.2 thorpej va += 16;
207 1.2 thorpej }
208 1.2 thorpej }
209 1.2 thorpej
210 1.2 thorpej void
211 1.2 thorpej r4k_pdcache_wb_range_16(vaddr_t va, vsize_t size)
212 1.2 thorpej {
213 1.2 thorpej vaddr_t eva = round_line(va + size);
214 1.2 thorpej
215 1.2 thorpej va = trunc_line(va);
216 1.2 thorpej
217 1.2 thorpej while ((eva - va) >= (32 * 16)) {
218 1.2 thorpej cache_r4k_op_32lines_16(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
219 1.2 thorpej va += (32 * 16);
220 1.2 thorpej }
221 1.2 thorpej
222 1.2 thorpej while (va < eva) {
223 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
224 1.2 thorpej va += 16;
225 1.2 thorpej }
226 1.2 thorpej }
227 1.2 thorpej
228 1.2 thorpej #undef round_line
229 1.2 thorpej #undef trunc_line
230 1.2 thorpej
231 1.2 thorpej #define round_line(x) (((x) + 31) & ~31)
232 1.2 thorpej #define trunc_line(x) ((x) & ~31)
233 1.6 tsutsui
234 1.6 tsutsui void
235 1.6 tsutsui r4k_icache_sync_all_32(void)
236 1.6 tsutsui {
237 1.6 tsutsui vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
238 1.6 tsutsui vaddr_t eva = va + mips_picache_size;
239 1.6 tsutsui
240 1.6 tsutsui mips_dcache_wbinv_all();
241 1.6 tsutsui
242 1.6 tsutsui __asm __volatile("sync");
243 1.6 tsutsui
244 1.6 tsutsui while (va < eva) {
245 1.6 tsutsui cache_r4k_op_32lines_32(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
246 1.6 tsutsui va += (32 * 32);
247 1.6 tsutsui }
248 1.6 tsutsui }
249 1.6 tsutsui
250 1.6 tsutsui void
251 1.6 tsutsui r4k_icache_sync_range_32(vaddr_t va, vsize_t size)
252 1.6 tsutsui {
253 1.6 tsutsui vaddr_t eva = round_line(va + size);
254 1.6 tsutsui
255 1.6 tsutsui va = trunc_line(va);
256 1.6 tsutsui
257 1.6 tsutsui mips_dcache_wb_range(va, (eva - va));
258 1.6 tsutsui
259 1.6 tsutsui __asm __volatile("sync");
260 1.6 tsutsui
261 1.6 tsutsui while ((eva - va) >= (32 * 32)) {
262 1.6 tsutsui cache_r4k_op_32lines_32(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
263 1.6 tsutsui va += (32 * 32);
264 1.6 tsutsui }
265 1.6 tsutsui
266 1.6 tsutsui while (va < eva) {
267 1.6 tsutsui cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
268 1.6 tsutsui va += 32;
269 1.6 tsutsui }
270 1.6 tsutsui }
271 1.6 tsutsui
272 1.6 tsutsui void
273 1.6 tsutsui r4k_icache_sync_range_index_32(vaddr_t va, vsize_t size)
274 1.6 tsutsui {
275 1.6 tsutsui vaddr_t eva;
276 1.6 tsutsui
277 1.6 tsutsui eva = round_line(va + size);
278 1.6 tsutsui va = trunc_line(va);
279 1.6 tsutsui
280 1.6 tsutsui mips_dcache_wbinv_range_index(va, (eva - va));
281 1.6 tsutsui
282 1.6 tsutsui __asm __volatile("sync");
283 1.6 tsutsui
284 1.6 tsutsui /*
285 1.6 tsutsui * Since we're doing Index ops, we expect to not be able
286 1.6 tsutsui * to access the address we've been given. So, get the
287 1.6 tsutsui * bits that determine the cache index, and make a KSEG0
288 1.6 tsutsui * address out of them.
289 1.6 tsutsui */
290 1.6 tsutsui va = MIPS_PHYS_TO_KSEG0(va & mips_picache_way_mask);
291 1.6 tsutsui
292 1.6 tsutsui eva = round_line(va + size);
293 1.6 tsutsui va = trunc_line(va);
294 1.6 tsutsui
295 1.6 tsutsui while ((eva - va) >= (32 * 32)) {
296 1.6 tsutsui cache_r4k_op_32lines_32(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
297 1.6 tsutsui va += (32 * 32);
298 1.6 tsutsui }
299 1.6 tsutsui
300 1.6 tsutsui while (va < eva) {
301 1.6 tsutsui cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
302 1.6 tsutsui va += 32;
303 1.6 tsutsui }
304 1.6 tsutsui }
305 1.6 tsutsui
306 1.6 tsutsui void
307 1.6 tsutsui r4k_pdcache_wbinv_all_32(void)
308 1.6 tsutsui {
309 1.6 tsutsui vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
310 1.6 tsutsui vaddr_t eva = va + mips_pdcache_size;
311 1.6 tsutsui
312 1.6 tsutsui while (va < eva) {
313 1.6 tsutsui cache_r4k_op_32lines_32(va,
314 1.6 tsutsui CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
315 1.6 tsutsui va += (32 * 32);
316 1.6 tsutsui }
317 1.6 tsutsui }
318 1.6 tsutsui
319 1.6 tsutsui void
320 1.6 tsutsui r4k_pdcache_wbinv_range_32(vaddr_t va, vsize_t size)
321 1.6 tsutsui {
322 1.6 tsutsui vaddr_t eva = round_line(va + size);
323 1.6 tsutsui
324 1.6 tsutsui va = trunc_line(va);
325 1.6 tsutsui
326 1.6 tsutsui while ((eva - va) >= (32 * 32)) {
327 1.6 tsutsui cache_r4k_op_32lines_32(va,
328 1.6 tsutsui CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
329 1.6 tsutsui va += (32 * 32);
330 1.6 tsutsui }
331 1.6 tsutsui
332 1.6 tsutsui while (va < eva) {
333 1.6 tsutsui cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
334 1.6 tsutsui va += 32;
335 1.6 tsutsui }
336 1.6 tsutsui }
337 1.6 tsutsui
338 1.6 tsutsui void
339 1.6 tsutsui r4k_pdcache_wbinv_range_index_32(vaddr_t va, vsize_t size)
340 1.6 tsutsui {
341 1.6 tsutsui vaddr_t eva;
342 1.6 tsutsui
343 1.6 tsutsui /*
344 1.6 tsutsui * Since we're doing Index ops, we expect to not be able
345 1.6 tsutsui * to access the address we've been given. So, get the
346 1.6 tsutsui * bits that determine the cache index, and make a KSEG0
347 1.6 tsutsui * address out of them.
348 1.6 tsutsui */
349 1.6 tsutsui va = MIPS_PHYS_TO_KSEG0(va & (mips_pdcache_size - 1));
350 1.6 tsutsui
351 1.6 tsutsui eva = round_line(va + size);
352 1.6 tsutsui va = trunc_line(va);
353 1.6 tsutsui
354 1.6 tsutsui while ((eva - va) >= (32 * 32)) {
355 1.6 tsutsui cache_r4k_op_32lines_32(va,
356 1.6 tsutsui CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
357 1.6 tsutsui va += (32 * 32);
358 1.6 tsutsui }
359 1.6 tsutsui
360 1.6 tsutsui while (va < eva) {
361 1.6 tsutsui cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
362 1.6 tsutsui va += 32;
363 1.6 tsutsui }
364 1.6 tsutsui }
365 1.6 tsutsui
366 1.6 tsutsui void
367 1.6 tsutsui r4k_pdcache_inv_range_32(vaddr_t va, vsize_t size)
368 1.6 tsutsui {
369 1.6 tsutsui vaddr_t eva = round_line(va + size);
370 1.6 tsutsui
371 1.6 tsutsui va = trunc_line(va);
372 1.6 tsutsui
373 1.6 tsutsui while ((eva - va) >= (32 * 32)) {
374 1.6 tsutsui cache_r4k_op_32lines_32(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
375 1.6 tsutsui va += (32 * 32);
376 1.6 tsutsui }
377 1.6 tsutsui
378 1.6 tsutsui while (va < eva) {
379 1.6 tsutsui cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
380 1.6 tsutsui va += 32;
381 1.6 tsutsui }
382 1.6 tsutsui }
383 1.6 tsutsui
384 1.6 tsutsui void
385 1.6 tsutsui r4k_pdcache_wb_range_32(vaddr_t va, vsize_t size)
386 1.6 tsutsui {
387 1.6 tsutsui vaddr_t eva = round_line(va + size);
388 1.6 tsutsui
389 1.6 tsutsui va = trunc_line(va);
390 1.6 tsutsui
391 1.6 tsutsui while ((eva - va) >= (32 * 32)) {
392 1.6 tsutsui cache_r4k_op_32lines_32(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
393 1.6 tsutsui va += (32 * 32);
394 1.6 tsutsui }
395 1.6 tsutsui
396 1.6 tsutsui while (va < eva) {
397 1.6 tsutsui cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
398 1.6 tsutsui va += 32;
399 1.6 tsutsui }
400 1.6 tsutsui }
401 1.2 thorpej
402 1.2 thorpej void
403 1.2 thorpej r4k_sdcache_wbinv_all_32(void)
404 1.2 thorpej {
405 1.2 thorpej vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
406 1.2 thorpej vaddr_t eva = va + mips_sdcache_size;
407 1.2 thorpej
408 1.2 thorpej while (va < eva) {
409 1.2 thorpej cache_r4k_op_32lines_32(va,
410 1.2 thorpej CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
411 1.2 thorpej va += (32 * 32);
412 1.2 thorpej }
413 1.2 thorpej }
414 1.2 thorpej
415 1.2 thorpej void
416 1.2 thorpej r4k_sdcache_wbinv_range_32(vaddr_t va, vsize_t size)
417 1.2 thorpej {
418 1.2 thorpej vaddr_t eva = round_line(va + size);
419 1.2 thorpej
420 1.2 thorpej va = trunc_line(va);
421 1.2 thorpej
422 1.2 thorpej while ((eva - va) >= (32 * 32)) {
423 1.2 thorpej cache_r4k_op_32lines_32(va,
424 1.2 thorpej CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
425 1.2 thorpej va += (32 * 32);
426 1.2 thorpej }
427 1.2 thorpej
428 1.2 thorpej while (va < eva) {
429 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
430 1.2 thorpej va += 32;
431 1.2 thorpej }
432 1.2 thorpej }
433 1.2 thorpej
434 1.2 thorpej void
435 1.2 thorpej r4k_sdcache_wbinv_range_index_32(vaddr_t va, vsize_t size)
436 1.2 thorpej {
437 1.2 thorpej vaddr_t eva;
438 1.2 thorpej
439 1.2 thorpej /*
440 1.2 thorpej * Since we're doing Index ops, we expect to not be able
441 1.2 thorpej * to access the address we've been given. So, get the
442 1.2 thorpej * bits that determine the cache index, and make a KSEG0
443 1.2 thorpej * address out of them.
444 1.2 thorpej */
445 1.2 thorpej va = MIPS_PHYS_TO_KSEG0(va & (mips_sdcache_size - 1));
446 1.2 thorpej
447 1.2 thorpej eva = round_line(va + size);
448 1.2 thorpej va = trunc_line(va);
449 1.2 thorpej
450 1.2 thorpej while ((eva - va) >= (32 * 32)) {
451 1.4 thorpej cache_r4k_op_32lines_32(va,
452 1.2 thorpej CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
453 1.2 thorpej va += (32 * 32);
454 1.2 thorpej }
455 1.2 thorpej
456 1.2 thorpej while (va < eva) {
457 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
458 1.2 thorpej va += 32;
459 1.2 thorpej }
460 1.2 thorpej }
461 1.2 thorpej
462 1.2 thorpej void
463 1.2 thorpej r4k_sdcache_inv_range_32(vaddr_t va, vsize_t size)
464 1.2 thorpej {
465 1.2 thorpej vaddr_t eva = round_line(va + size);
466 1.2 thorpej
467 1.2 thorpej va = trunc_line(va);
468 1.2 thorpej
469 1.2 thorpej while ((eva - va) >= (32 * 32)) {
470 1.2 thorpej cache_r4k_op_32lines_32(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
471 1.2 thorpej va += (32 * 32);
472 1.2 thorpej }
473 1.2 thorpej
474 1.2 thorpej while (va < eva) {
475 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
476 1.2 thorpej va += 32;
477 1.2 thorpej }
478 1.2 thorpej }
479 1.2 thorpej
480 1.2 thorpej void
481 1.2 thorpej r4k_sdcache_wb_range_32(vaddr_t va, vsize_t size)
482 1.2 thorpej {
483 1.2 thorpej vaddr_t eva = round_line(va + size);
484 1.2 thorpej
485 1.2 thorpej va = trunc_line(va);
486 1.2 thorpej
487 1.2 thorpej while ((eva - va) >= (32 * 32)) {
488 1.2 thorpej cache_r4k_op_32lines_32(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
489 1.2 thorpej va += (32 * 32);
490 1.2 thorpej }
491 1.2 thorpej
492 1.2 thorpej while (va < eva) {
493 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
494 1.2 thorpej va += 32;
495 1.3 thorpej }
496 1.3 thorpej }
497 1.3 thorpej
498 1.3 thorpej #undef round_line
499 1.3 thorpej #undef trunc_line
500 1.3 thorpej
501 1.3 thorpej #define round_line(x) (((x) + 127) & ~127)
502 1.3 thorpej #define trunc_line(x) ((x) & ~127)
503 1.3 thorpej
504 1.3 thorpej void
505 1.3 thorpej r4k_sdcache_wbinv_all_128(void)
506 1.3 thorpej {
507 1.3 thorpej vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
508 1.3 thorpej vaddr_t eva = va + mips_sdcache_size;
509 1.3 thorpej
510 1.3 thorpej while (va < eva) {
511 1.3 thorpej cache_r4k_op_32lines_128(va,
512 1.3 thorpej CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
513 1.3 thorpej va += (32 * 128);
514 1.3 thorpej }
515 1.3 thorpej }
516 1.3 thorpej
517 1.3 thorpej void
518 1.3 thorpej r4k_sdcache_wbinv_range_128(vaddr_t va, vsize_t size)
519 1.3 thorpej {
520 1.3 thorpej vaddr_t eva = round_line(va + size);
521 1.3 thorpej
522 1.3 thorpej va = trunc_line(va);
523 1.3 thorpej
524 1.3 thorpej while ((eva - va) >= (32 * 128)) {
525 1.3 thorpej cache_r4k_op_32lines_128(va,
526 1.3 thorpej CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
527 1.3 thorpej va += (32 * 128);
528 1.3 thorpej }
529 1.3 thorpej
530 1.3 thorpej while (va < eva) {
531 1.3 thorpej cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
532 1.3 thorpej va += 128;
533 1.3 thorpej }
534 1.3 thorpej }
535 1.3 thorpej
536 1.3 thorpej void
537 1.3 thorpej r4k_sdcache_wbinv_range_index_128(vaddr_t va, vsize_t size)
538 1.3 thorpej {
539 1.3 thorpej vaddr_t eva;
540 1.3 thorpej
541 1.3 thorpej /*
542 1.3 thorpej * Since we're doing Index ops, we expect to not be able
543 1.3 thorpej * to access the address we've been given. So, get the
544 1.3 thorpej * bits that determine the cache index, and make a KSEG0
545 1.3 thorpej * address out of them.
546 1.3 thorpej */
547 1.3 thorpej va = MIPS_PHYS_TO_KSEG0(va & (mips_sdcache_size - 1));
548 1.3 thorpej
549 1.3 thorpej eva = round_line(va + size);
550 1.3 thorpej va = trunc_line(va);
551 1.3 thorpej
552 1.3 thorpej while ((eva - va) >= (32 * 128)) {
553 1.3 thorpej cache_r4k_op_32lines_128(va,
554 1.3 thorpej CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
555 1.3 thorpej va += (32 * 128);
556 1.3 thorpej }
557 1.3 thorpej
558 1.3 thorpej while (va < eva) {
559 1.3 thorpej cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
560 1.3 thorpej va += 128;
561 1.3 thorpej }
562 1.3 thorpej }
563 1.3 thorpej
564 1.3 thorpej void
565 1.3 thorpej r4k_sdcache_inv_range_128(vaddr_t va, vsize_t size)
566 1.3 thorpej {
567 1.3 thorpej vaddr_t eva = round_line(va + size);
568 1.3 thorpej
569 1.3 thorpej va = trunc_line(va);
570 1.3 thorpej
571 1.3 thorpej while ((eva - va) >= (32 * 128)) {
572 1.3 thorpej cache_r4k_op_32lines_128(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
573 1.3 thorpej va += (32 * 128);
574 1.3 thorpej }
575 1.3 thorpej
576 1.3 thorpej while (va < eva) {
577 1.3 thorpej cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
578 1.3 thorpej va += 128;
579 1.3 thorpej }
580 1.3 thorpej }
581 1.3 thorpej
582 1.3 thorpej void
583 1.3 thorpej r4k_sdcache_wb_range_128(vaddr_t va, vsize_t size)
584 1.3 thorpej {
585 1.3 thorpej vaddr_t eva = round_line(va + size);
586 1.3 thorpej
587 1.3 thorpej va = trunc_line(va);
588 1.3 thorpej
589 1.3 thorpej while ((eva - va) >= (32 * 128)) {
590 1.3 thorpej cache_r4k_op_32lines_128(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
591 1.3 thorpej va += (32 * 128);
592 1.3 thorpej }
593 1.3 thorpej
594 1.3 thorpej while (va < eva) {
595 1.3 thorpej cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
596 1.3 thorpej va += 128;
597 1.2 thorpej }
598 1.2 thorpej }
599 1.2 thorpej
600 1.2 thorpej #undef round_line
601 1.2 thorpej #undef trunc_line
602 1.2 thorpej
603 1.2 thorpej #define round_line(x) (((x) + mips_sdcache_line_size - 1) & ~(mips_sdcache_line_size - 1))
604 1.2 thorpej #define trunc_line(x) ((x) & ~(mips_sdcache_line_size - 1))
605 1.2 thorpej
606 1.2 thorpej void
607 1.2 thorpej r4k_sdcache_wbinv_all_generic(void)
608 1.2 thorpej {
609 1.2 thorpej vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
610 1.2 thorpej vaddr_t eva = va + mips_sdcache_size;
611 1.5 shin int line_size = mips_sdcache_line_size;
612 1.2 thorpej
613 1.2 thorpej while (va < eva) {
614 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
615 1.5 shin va += line_size;
616 1.2 thorpej }
617 1.2 thorpej }
618 1.2 thorpej
619 1.2 thorpej void
620 1.2 thorpej r4k_sdcache_wbinv_range_generic(vaddr_t va, vsize_t size)
621 1.2 thorpej {
622 1.2 thorpej vaddr_t eva = round_line(va + size);
623 1.5 shin int line_size = mips_sdcache_line_size;
624 1.2 thorpej
625 1.2 thorpej va = trunc_line(va);
626 1.2 thorpej
627 1.2 thorpej while (va < eva) {
628 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
629 1.5 shin va += line_size;
630 1.2 thorpej }
631 1.2 thorpej }
632 1.2 thorpej
633 1.2 thorpej void
634 1.2 thorpej r4k_sdcache_wbinv_range_index_generic(vaddr_t va, vsize_t size)
635 1.2 thorpej {
636 1.2 thorpej vaddr_t eva;
637 1.5 shin int line_size = mips_sdcache_line_size;
638 1.2 thorpej
639 1.2 thorpej /*
640 1.2 thorpej * Since we're doing Index ops, we expect to not be able
641 1.2 thorpej * to access the address we've been given. So, get the
642 1.2 thorpej * bits that determine the cache index, and make a KSEG0
643 1.2 thorpej * address out of them.
644 1.2 thorpej */
645 1.2 thorpej va = MIPS_PHYS_TO_KSEG0(va & (mips_sdcache_size - 1));
646 1.2 thorpej
647 1.2 thorpej eva = round_line(va + size);
648 1.2 thorpej va = trunc_line(va);
649 1.2 thorpej
650 1.2 thorpej while (va < eva) {
651 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
652 1.5 shin va += line_size;
653 1.2 thorpej }
654 1.2 thorpej }
655 1.2 thorpej
656 1.2 thorpej void
657 1.2 thorpej r4k_sdcache_inv_range_generic(vaddr_t va, vsize_t size)
658 1.2 thorpej {
659 1.2 thorpej vaddr_t eva = round_line(va + size);
660 1.5 shin int line_size = mips_sdcache_line_size;
661 1.2 thorpej
662 1.2 thorpej va = trunc_line(va);
663 1.2 thorpej
664 1.2 thorpej while (va < eva) {
665 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
666 1.5 shin va += line_size;
667 1.2 thorpej }
668 1.2 thorpej }
669 1.2 thorpej
670 1.2 thorpej void
671 1.2 thorpej r4k_sdcache_wb_range_generic(vaddr_t va, vsize_t size)
672 1.2 thorpej {
673 1.2 thorpej vaddr_t eva = round_line(va + size);
674 1.5 shin int line_size = mips_sdcache_line_size;
675 1.2 thorpej
676 1.2 thorpej va = trunc_line(va);
677 1.2 thorpej
678 1.2 thorpej while (va < eva) {
679 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
680 1.5 shin va += line_size;
681 1.2 thorpej }
682 1.2 thorpej }
683 1.2 thorpej
684 1.2 thorpej #undef round_line
685 1.2 thorpej #undef trunc_line
686